hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7fae0f85d85777eb6b303e7ce07c37387dcff7f | 8,597 | py | Python | packages/python/plotly/plotly/graph_objs/choroplethmapbox/colorbar/title/__init__.py | potpath/plotly.py | 46cd47f441d8bda9b14b4ba66a33f02731faf8f0 | [
"MIT"
] | 1 | 2020-04-06T20:57:36.000Z | 2020-04-06T20:57:36.000Z | packages/python/plotly/plotly/graph_objs/choroplethmapbox/colorbar/title/__init__.py | potpath/plotly.py | 46cd47f441d8bda9b14b4ba66a33f02731faf8f0 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/graph_objs/choroplethmapbox/colorbar/title/__init__.py | potpath/plotly.py | 46cd47f441d8bda9b14b4ba66a33f02731faf8f0 | [
"MIT"
] | null | null | null | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "choroplethmapbox.colorbar.title"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.choroplethmapb
ox.colorbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.choroplethmapbox.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choroplethmapbox.colorbar.title.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.choroplethmapbox.colorbar.title import font as v_font
# Initialize validators
# ---------------------
self._validators["color"] = v_font.ColorValidator()
self._validators["family"] = v_font.FamilyValidator()
self._validators["size"] = v_font.SizeValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Font"]
| 37.21645 | 84 | 0.569966 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
@property
def color(self):
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def size(self):
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def _parent_path_str(self):
return "choroplethmapbox.colorbar.title"
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
super(Font, self).__init__("font")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.choroplethmapbox.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choroplethmapbox.colorbar.title.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.choroplethmapbox.colorbar.title import font as v_font
# Initialize validators
# ---------------------
self._validators["color"] = v_font.ColorValidator()
self._validators["family"] = v_font.FamilyValidator()
self._validators["size"] = v_font.SizeValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Font"]
| true | true |
f7fae2385af890f33740936bbe812f72574fa5c1 | 1,071 | py | Python | stations/tests/test_new_list.py | sharksmhi/stations | e0b893d4f14239452ced4b1d0b4fd48f2a444c5f | [
"MIT"
] | null | null | null | stations/tests/test_new_list.py | sharksmhi/stations | e0b893d4f14239452ced4b1d0b4fd48f2a444c5f | [
"MIT"
] | null | null | null | stations/tests/test_new_list.py | sharksmhi/stations | e0b893d4f14239452ced4b1d0b4fd48f2a444c5f | [
"MIT"
] | null | null | null | # Copyright (c) 2020 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
"""
Created on 2020-10-02 12:18
@author: a002028
"""
from stations.main import App
if __name__ == '__main__':
app = App()
new_stations = {'statn': ['Hästholmen Syd', 'Svartskär Ost'],
'lat_sweref99tm': ['6360582', '6363345'],
'lon_sweref99tm': ['317200', '310970'],
'lat_dd': [],
'lon_dd': []}
from stations.utils import transform_ref_system
for la, lo in zip(new_stations['lat_sweref99tm'], new_stations['lon_sweref99tm']):
lat_dd, lon_dd = transform_ref_system(lat=la, lon=lo)
new_stations['lat_dd'].append(round(lat_dd, 8))
new_stations['lon_dd'].append(round(lon_dd, 8))
app.lists.append_new_list(
name='new_stations',
data=new_stations,
attributes={k: k for k in list(new_stations)}
)
# app.write_list(writer='map', list_names=['new_stations'])
| 32.454545 | 86 | 0.623716 |
from stations.main import App
if __name__ == '__main__':
app = App()
new_stations = {'statn': ['Hästholmen Syd', 'Svartskär Ost'],
'lat_sweref99tm': ['6360582', '6363345'],
'lon_sweref99tm': ['317200', '310970'],
'lat_dd': [],
'lon_dd': []}
from stations.utils import transform_ref_system
for la, lo in zip(new_stations['lat_sweref99tm'], new_stations['lon_sweref99tm']):
lat_dd, lon_dd = transform_ref_system(lat=la, lon=lo)
new_stations['lat_dd'].append(round(lat_dd, 8))
new_stations['lon_dd'].append(round(lon_dd, 8))
app.lists.append_new_list(
name='new_stations',
data=new_stations,
attributes={k: k for k in list(new_stations)}
)
| true | true |
f7fae249993c94a2f582fc95aaefcc26a86f7bf2 | 119 | py | Python | practice_3/generator.py | VelcroFly/Python.-Algorithms | acd4711e670b6ad7d5b7f20a14984319e6775a3d | [
"MIT"
] | null | null | null | practice_3/generator.py | VelcroFly/Python.-Algorithms | acd4711e670b6ad7d5b7f20a14984319e6775a3d | [
"MIT"
] | null | null | null | practice_3/generator.py | VelcroFly/Python.-Algorithms | acd4711e670b6ad7d5b7f20a14984319e6775a3d | [
"MIT"
] | null | null | null | import random
SIZE = 10
MIN_ITEM = 0
MAX_ITEM = 100
array = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)]
| 17 | 65 | 0.731092 | import random
SIZE = 10
MIN_ITEM = 0
MAX_ITEM = 100
array = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)]
| true | true |
f7fae24c6c21df916e3345b6bc704fbb3c3986a6 | 597 | py | Python | tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/sklearn/metrics/cluster/unsupervised.py | iLuSIAnn/test | 10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e | [
"Apache-2.0"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/sklearn/metrics/cluster/unsupervised.py | iLuSIAnn/test | 10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e | [
"Apache-2.0"
] | 37 | 2020-10-20T08:30:53.000Z | 2020-12-22T13:15:45.000Z | tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/sklearn/metrics/cluster/unsupervised.py | iLuSIAnn/test | 10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e | [
"Apache-2.0"
] | 13 | 2020-09-07T07:24:35.000Z | 2022-02-24T04:56:16.000Z |
# THIS FILE WAS AUTOMATICALLY GENERATED BY deprecated_modules.py
import sys
# mypy error: Module X has no attribute y (typically for C extensions)
from . import _unsupervised # type: ignore
from ...externals._pep562 import Pep562
from ...utils.deprecation import _raise_dep_warning_if_not_pytest
deprecated_path = 'sklearn.metrics.cluster.unsupervised'
correct_import_path = 'sklearn.metrics.cluster'
_raise_dep_warning_if_not_pytest(deprecated_path, correct_import_path)
def __getattr__(name):
return getattr(_unsupervised, name)
if not sys.version_info >= (3, 7):
Pep562(__name__)
| 31.421053 | 70 | 0.802345 |
import sys
from . import _unsupervised
from ...externals._pep562 import Pep562
from ...utils.deprecation import _raise_dep_warning_if_not_pytest
deprecated_path = 'sklearn.metrics.cluster.unsupervised'
correct_import_path = 'sklearn.metrics.cluster'
_raise_dep_warning_if_not_pytest(deprecated_path, correct_import_path)
def __getattr__(name):
return getattr(_unsupervised, name)
if not sys.version_info >= (3, 7):
Pep562(__name__)
| true | true |
f7fae2d1bd94eb43bb2ecd9b1b3b04cd44fff7dc | 14,973 | py | Python | whatwhy/data_analysis/whatwhy_predictor.py | stevengt/whatwhy | bf6f87fd20eebfc89240835c3121ec3079632133 | [
"MIT"
] | 2 | 2020-07-29T07:26:47.000Z | 2020-07-29T07:26:55.000Z | whatwhy/data_analysis/whatwhy_predictor.py | stevengt/whatwhy | bf6f87fd20eebfc89240835c3121ec3079632133 | [
"MIT"
] | null | null | null | whatwhy/data_analysis/whatwhy_predictor.py | stevengt/whatwhy | bf6f87fd20eebfc89240835c3121ec3079632133 | [
"MIT"
] | null | null | null | import os
import pickle
import numpy as np
from sklearn.model_selection import train_test_split
from .seq2seq_model import Seq2SeqModel
from .vectorizer import TokenVectorizer
class WhatWhyPredictor():
"""
Predicts a sequence of text which answers the question 'why?' given some input 'what'.
The prediction model is trained by vectorizing lists of token sequences and passing
the results to a Seq2SeqModel and calling its fit() method. After training, the
predict() methods can be used to predict 'why' text from 'what' text.
The Seq2SeqModel, vectorizers, and vectorized data sets can be specified manually
or saved and loaded from files using the save/load methods.
"""
def __init__( self, word2vec_model=None,
max_num_tokens_per_sample=10,
vocab_index=None ):
"""
Creates a WhatWhyPredictor instance using the specified parameters.
If no parameters are specified, then they should be loaded from
a file using the load() methods.
Params:
word2vec_model : [Optional] A pre-trained gensim Word2Vec model.
max_num_tokens_per_sample : [Optional] Maximum number of tokens to include in a sample sequence.
Any extra tokens will be truncated.
vocab_index : [Optional] A pre-built VocabularyIndex of the data set. This can
help reduce the size of one-hot encoded words in the
vocabulary, compared to that of pre-trained word2vec models.
"""
self.word2vec_model = word2vec_model
self.max_num_tokens_per_sample = max_num_tokens_per_sample
self.vocab_index = vocab_index
self.what_token_vectorizer = None
self.why_token_vectorizer = None
self.X_train = None
self.X_test = None
self.Y_train = None
self.Y_test = None
self.indeces_train = None
self.indeces_test = None
# If word2vec_model is None, then the decoder should be loaded from a pickle file instead.
if word2vec_model is not None:
self.decoder = TokenVectorizer( word2vec_model=word2vec_model,
num_tokens_per_sample=self.max_num_tokens_per_sample,
vocab_index=self.vocab_index )
@staticmethod
def load_from_pickle_file(dir_name):
"""
Loads a WhatWhyPredictor instance from a pickle
file 'whatwhy_predictor.p' in the specified directory.
"""
with open( os.path.join(dir_name, "whatwhy_predictor.p") , "rb" ) as in_file:
return pickle.load(in_file)
def fit_tokens( self, lists_of_what_tokens=None,
lists_of_why_tokens=None,
epochs=1,
batch_size=None ):
"""Trains a Seq2SeqModel on lists that contain sequences (lists) of 'what' and 'why' tokens."""
X_train, X_test, Y_train, Y_test, indeces_train, indeces_test = self.get_train_and_test_data( lists_of_what_tokens=lists_of_what_tokens,
lists_of_why_tokens=lists_of_why_tokens )
self.seq2seq_model = Seq2SeqModel(X_train, X_test, Y_train, Y_test)
self.seq2seq_model.fit(epochs=epochs, batch_size=batch_size)
def predict(self, list_of_what_tokens):
"""
Predicts a string of 'why' text from an input sequence of 'what' tokens.
The following instance fields should be initialized or loaded before calling this method.
word2vec_model
max_num_tokens_per_sample
seq2seq_model
decoder
"""
lists_of_what_tokens = [list_of_what_tokens]
return self.predict_all(lists_of_what_tokens)[0]
def predict_all(self, lists_of_what_tokens):
"""
Predicts strings of 'why' text from input sequences of 'what' tokens.
The following instance fields should be initialized or loaded before calling this method.
word2vec_model
max_num_tokens_per_sample
seq2seq_model
decoder
"""
embedded_what_tokens = TokenVectorizer( word2vec_model=self.word2vec_model,
tokens_lists=lists_of_what_tokens,
num_tokens_per_sample=self.max_num_tokens_per_sample,
vocab_index=self.vocab_index ).get_embeddings()
one_hot_predictions = self.seq2seq_model.predict_all(embedded_what_tokens)
predictions = self.decoder.decode_multiple_one_hot_samples(one_hot_predictions)
return predictions
def compare_predictions_to_actual(self, input_tokens, predictions, actual_vals):
for i, prediction in enumerate(predictions):
print(f"'What' Input : { ' '.join(input_tokens[i]) }")
print(f"'Why' Actual : { actual_vals[i] }")
print(f"'Why' Predicted : { prediction }")
print("---------------------------------------------")
def compare_test_set_to_predictions(self, max_num_examples=None):
if max_num_examples is None:
max_num_examples = self.X_test.shape[0]
X_test = self.X_test[:max_num_examples,:,:]
Y_test = self.Y_test[:max_num_examples,:,:]
indeces_test = self.indeces_test[:max_num_examples]
input_tokens_test = [ self.what_token_vectorizer.tokens_lists[index] for index in indeces_test ]
actual_vals = self.decoder.decode_multiple_one_hot_samples(Y_test)
one_hot_predictions = self.seq2seq_model.predict_all(X_test)
predictions = self.decoder.decode_multiple_one_hot_samples(one_hot_predictions)
self.compare_predictions_to_actual(input_tokens_test, predictions, actual_vals)
def compare_train_set_to_predictions(self, max_num_examples=None):
if max_num_examples is None:
max_num_examples = self.X_train.shape[0]
X_train = self.X_train[:max_num_examples,:,:]
Y_train = self.Y_train[:max_num_examples,:,:]
indeces_train = self.indeces_train[:max_num_examples]
input_tokens_train = [ self.what_token_vectorizer.tokens_lists[index] for index in indeces_train ]
actual_vals = self.decoder.decode_multiple_one_hot_samples(Y_train)
one_hot_predictions = self.seq2seq_model.predict_all(X_train)
predictions = self.decoder.decode_multiple_one_hot_samples(one_hot_predictions)
self.compare_predictions_to_actual(input_tokens_train, predictions, actual_vals)
def get_what_and_why_token_vectorizers(self, lists_of_what_tokens=None, lists_of_why_tokens=None):
"""
Returns TokenVectorizers for the lists of what/why token sequences.
The instance fields 'word2vec_model', 'max_num_tokens_per_sample', and
optionally 'vocab_index' should be initialized before calling this method.
"""
if self.what_token_vectorizer is None or self.why_token_vectorizer is None:
self.set_what_and_why_token_vectorizers_from_lists(lists_of_what_tokens, lists_of_why_tokens)
return self.what_token_vectorizer, self.why_token_vectorizer
def set_what_and_why_token_vectorizers_from_lists(self, lists_of_what_tokens, lists_of_why_tokens):
"""
Initializes TokenVectorizers for the lists of what/why token sequences.
The instance fields 'word2vec_model', 'max_num_tokens_per_sample', and
optionally 'vocab_index' should be initialized before calling this method.
"""
self.what_token_vectorizer = TokenVectorizer( word2vec_model=self.word2vec_model,
tokens_lists=lists_of_what_tokens,
num_tokens_per_sample=self.max_num_tokens_per_sample,
vocab_index=self.vocab_index )
self.why_token_vectorizer = TokenVectorizer( word2vec_model=self.word2vec_model,
tokens_lists=lists_of_why_tokens,
num_tokens_per_sample=self.max_num_tokens_per_sample,
vocab_index=self.vocab_index )
def get_train_and_test_data( self, lists_of_what_tokens=None,
lists_of_why_tokens=None,
test_size=0.20,
random_state=42 ):
"""
Splits a data set of what/why tokens into test and train sets
if they have not already been separated.
"""
if self.X_train is None or self.X_test is None or self.Y_train is None or self.Y_test is None:
what_token_vectorizer, why_token_vectorizer = self.get_what_and_why_token_vectorizers(lists_of_what_tokens, lists_of_why_tokens)
embedded_what_tokens = what_token_vectorizer.get_embeddings()
one_hot_why_tokens = why_token_vectorizer.get_one_hot_encodings()
indeces = np.arange( len(what_token_vectorizer.tokens_lists) )
self.X_train, self.X_test, self.Y_train, self.Y_test, self.indeces_train, self.indeces_test = train_test_split( embedded_what_tokens,
one_hot_why_tokens,
indeces,
test_size=test_size,
random_state=random_state )
return self.X_train, self.X_test, self.Y_train, self.Y_test, self.indeces_train, self.indeces_test
def save_to_pickle_file(self, dir_name):
"""
Saves the WhatWhyPredictor instance to a pickle
file 'whatwhy_predictor.p' in the specified directory.
"""
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
with open( os.path.join(dir_name, "whatwhy_predictor.p") , "wb" ) as out_file:
pickle.dump(self, out_file, protocol=4)
def save_seq2seq_model(self, model_dir):
"""
Saves the underlying tensorflow.keras model's weights to
a file 'model.h5' in the specified directory.
"""
self.seq2seq_model.save_model(model_dir)
def load_seq2seq_model_from_saved_tf_model(self, model_dir):
"""
Intializes the Seq2SeqModel by loading weights from
a file 'model.h5' in the specified directory.
"""
X_train, X_test, Y_train, Y_test, indeces_train, indeces_test = self.get_train_and_test_data()
self.seq2seq_model = Seq2SeqModel(X_train, X_test, Y_train, Y_test).load_from_saved_tf_model(model_dir)
def save_train_and_test_data_to_pickle_files(self, dir_name, lists_of_what_tokens=None, lists_of_why_tokens=None):
"""
Splits a data set of what/why tokens into test and train sets
if they have not already been separated and saves them in pickle files.
"""
X_train, X_test, Y_train, Y_test, indeces_train, indeces_test = self.get_train_and_test_data( lists_of_what_tokens=lists_of_what_tokens,
lists_of_why_tokens=lists_of_why_tokens )
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
with open( os.path.join(dir_name, "X_train.p") , "wb" ) as out_file:
pickle.dump(X_train, out_file, protocol=4)
with open( os.path.join(dir_name, "X_test.p") , "wb" ) as out_file:
pickle.dump(X_test, out_file, protocol=4)
with open( os.path.join(dir_name, "Y_train.p") , "wb" ) as out_file:
pickle.dump(Y_train, out_file, protocol=4)
with open( os.path.join(dir_name, "Y_test.p") , "wb" ) as out_file:
pickle.dump(Y_test, out_file, protocol=4)
with open( os.path.join(dir_name, "indeces_train.p") , "wb" ) as out_file:
pickle.dump(indeces_train, out_file, protocol=4)
with open( os.path.join(dir_name, "indeces_test.p") , "wb" ) as out_file:
pickle.dump(indeces_test, out_file, protocol=4)
def load_train_and_test_data_from_pickle_files(self, dir_name):
with open( os.path.join(dir_name, "X_train.p") , "rb" ) as in_file:
self.X_train = pickle.load(in_file)
with open( os.path.join(dir_name, "X_test.p") , "rb" ) as in_file:
self.X_test = pickle.load(in_file)
with open( os.path.join(dir_name, "Y_train.p") , "rb" ) as in_file:
self.Y_train = pickle.load(in_file)
with open( os.path.join(dir_name, "Y_test.p") , "rb" ) as in_file:
self.Y_test = pickle.load(in_file)
with open( os.path.join(dir_name, "indeces_train.p") , "rb" ) as in_file:
self.indeces_train = pickle.load(in_file)
with open( os.path.join(dir_name, "indeces_test.p") , "rb" ) as in_file:
self.indeces_test = pickle.load(in_file)
def save_token_vectorizers_to_pickle_files(self, target_dir, lists_of_what_tokens=None, lists_of_why_tokens=None):
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
what_token_vectorizer, why_token_vectorizer = self.get_what_and_why_token_vectorizers(lists_of_what_tokens, lists_of_why_tokens)
what_token_vectorizer.get_embeddings()
why_token_vectorizer.get_one_hot_encodings()
what_token_vectorizer.save_to_pickle_file( os.path.join(target_dir, "what_tokenizer.p") )
why_token_vectorizer.save_to_pickle_file( os.path.join(target_dir, "why_tokenizer.p") )
self.decoder.save_to_pickle_file( os.path.join(target_dir, "decoder.p") )
def load_token_vectorizers_from_pickle_files(self, dir_name):
self.what_token_vectorizer = TokenVectorizer.load_from_pickle_file( os.path.join(dir_name, "what_tokenizer.p") )
self.why_token_vectorizer = TokenVectorizer.load_from_pickle_file( os.path.join(dir_name, "why_tokenizer.p") )
self.decoder = TokenVectorizer.load_from_pickle_file( os.path.join(dir_name, "decoder.p") )
self.word2vec_model = self.decoder.word2vec_model
self.vocab_index = self.decoder.vocab_index
self.max_num_tokens_per_sample = self.decoder.num_tokens_per_sample
| 55.66171 | 151 | 0.629199 | import os
import pickle
import numpy as np
from sklearn.model_selection import train_test_split
from .seq2seq_model import Seq2SeqModel
from .vectorizer import TokenVectorizer
class WhatWhyPredictor():
def __init__( self, word2vec_model=None,
max_num_tokens_per_sample=10,
vocab_index=None ):
self.word2vec_model = word2vec_model
self.max_num_tokens_per_sample = max_num_tokens_per_sample
self.vocab_index = vocab_index
self.what_token_vectorizer = None
self.why_token_vectorizer = None
self.X_train = None
self.X_test = None
self.Y_train = None
self.Y_test = None
self.indeces_train = None
self.indeces_test = None
if word2vec_model is not None:
self.decoder = TokenVectorizer( word2vec_model=word2vec_model,
num_tokens_per_sample=self.max_num_tokens_per_sample,
vocab_index=self.vocab_index )
@staticmethod
def load_from_pickle_file(dir_name):
with open( os.path.join(dir_name, "whatwhy_predictor.p") , "rb" ) as in_file:
return pickle.load(in_file)
def fit_tokens( self, lists_of_what_tokens=None,
lists_of_why_tokens=None,
epochs=1,
batch_size=None ):
X_train, X_test, Y_train, Y_test, indeces_train, indeces_test = self.get_train_and_test_data( lists_of_what_tokens=lists_of_what_tokens,
lists_of_why_tokens=lists_of_why_tokens )
self.seq2seq_model = Seq2SeqModel(X_train, X_test, Y_train, Y_test)
self.seq2seq_model.fit(epochs=epochs, batch_size=batch_size)
def predict(self, list_of_what_tokens):
lists_of_what_tokens = [list_of_what_tokens]
return self.predict_all(lists_of_what_tokens)[0]
def predict_all(self, lists_of_what_tokens):
embedded_what_tokens = TokenVectorizer( word2vec_model=self.word2vec_model,
tokens_lists=lists_of_what_tokens,
num_tokens_per_sample=self.max_num_tokens_per_sample,
vocab_index=self.vocab_index ).get_embeddings()
one_hot_predictions = self.seq2seq_model.predict_all(embedded_what_tokens)
predictions = self.decoder.decode_multiple_one_hot_samples(one_hot_predictions)
return predictions
def compare_predictions_to_actual(self, input_tokens, predictions, actual_vals):
for i, prediction in enumerate(predictions):
print(f"'What' Input : { ' '.join(input_tokens[i]) }")
print(f"'Why' Actual : { actual_vals[i] }")
print(f"'Why' Predicted : { prediction }")
print("---------------------------------------------")
def compare_test_set_to_predictions(self, max_num_examples=None):
if max_num_examples is None:
max_num_examples = self.X_test.shape[0]
X_test = self.X_test[:max_num_examples,:,:]
Y_test = self.Y_test[:max_num_examples,:,:]
indeces_test = self.indeces_test[:max_num_examples]
input_tokens_test = [ self.what_token_vectorizer.tokens_lists[index] for index in indeces_test ]
actual_vals = self.decoder.decode_multiple_one_hot_samples(Y_test)
one_hot_predictions = self.seq2seq_model.predict_all(X_test)
predictions = self.decoder.decode_multiple_one_hot_samples(one_hot_predictions)
self.compare_predictions_to_actual(input_tokens_test, predictions, actual_vals)
def compare_train_set_to_predictions(self, max_num_examples=None):
if max_num_examples is None:
max_num_examples = self.X_train.shape[0]
X_train = self.X_train[:max_num_examples,:,:]
Y_train = self.Y_train[:max_num_examples,:,:]
indeces_train = self.indeces_train[:max_num_examples]
input_tokens_train = [ self.what_token_vectorizer.tokens_lists[index] for index in indeces_train ]
actual_vals = self.decoder.decode_multiple_one_hot_samples(Y_train)
one_hot_predictions = self.seq2seq_model.predict_all(X_train)
predictions = self.decoder.decode_multiple_one_hot_samples(one_hot_predictions)
self.compare_predictions_to_actual(input_tokens_train, predictions, actual_vals)
def get_what_and_why_token_vectorizers(self, lists_of_what_tokens=None, lists_of_why_tokens=None):
if self.what_token_vectorizer is None or self.why_token_vectorizer is None:
self.set_what_and_why_token_vectorizers_from_lists(lists_of_what_tokens, lists_of_why_tokens)
return self.what_token_vectorizer, self.why_token_vectorizer
def set_what_and_why_token_vectorizers_from_lists(self, lists_of_what_tokens, lists_of_why_tokens):
self.what_token_vectorizer = TokenVectorizer( word2vec_model=self.word2vec_model,
tokens_lists=lists_of_what_tokens,
num_tokens_per_sample=self.max_num_tokens_per_sample,
vocab_index=self.vocab_index )
self.why_token_vectorizer = TokenVectorizer( word2vec_model=self.word2vec_model,
tokens_lists=lists_of_why_tokens,
num_tokens_per_sample=self.max_num_tokens_per_sample,
vocab_index=self.vocab_index )
def get_train_and_test_data( self, lists_of_what_tokens=None,
lists_of_why_tokens=None,
test_size=0.20,
random_state=42 ):
if self.X_train is None or self.X_test is None or self.Y_train is None or self.Y_test is None:
what_token_vectorizer, why_token_vectorizer = self.get_what_and_why_token_vectorizers(lists_of_what_tokens, lists_of_why_tokens)
embedded_what_tokens = what_token_vectorizer.get_embeddings()
one_hot_why_tokens = why_token_vectorizer.get_one_hot_encodings()
indeces = np.arange( len(what_token_vectorizer.tokens_lists) )
self.X_train, self.X_test, self.Y_train, self.Y_test, self.indeces_train, self.indeces_test = train_test_split( embedded_what_tokens,
one_hot_why_tokens,
indeces,
test_size=test_size,
random_state=random_state )
return self.X_train, self.X_test, self.Y_train, self.Y_test, self.indeces_train, self.indeces_test
def save_to_pickle_file(self, dir_name):
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
with open( os.path.join(dir_name, "whatwhy_predictor.p") , "wb" ) as out_file:
pickle.dump(self, out_file, protocol=4)
def save_seq2seq_model(self, model_dir):
self.seq2seq_model.save_model(model_dir)
def load_seq2seq_model_from_saved_tf_model(self, model_dir):
X_train, X_test, Y_train, Y_test, indeces_train, indeces_test = self.get_train_and_test_data()
self.seq2seq_model = Seq2SeqModel(X_train, X_test, Y_train, Y_test).load_from_saved_tf_model(model_dir)
def save_train_and_test_data_to_pickle_files(self, dir_name, lists_of_what_tokens=None, lists_of_why_tokens=None):
X_train, X_test, Y_train, Y_test, indeces_train, indeces_test = self.get_train_and_test_data( lists_of_what_tokens=lists_of_what_tokens,
lists_of_why_tokens=lists_of_why_tokens )
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
with open( os.path.join(dir_name, "X_train.p") , "wb" ) as out_file:
pickle.dump(X_train, out_file, protocol=4)
with open( os.path.join(dir_name, "X_test.p") , "wb" ) as out_file:
pickle.dump(X_test, out_file, protocol=4)
with open( os.path.join(dir_name, "Y_train.p") , "wb" ) as out_file:
pickle.dump(Y_train, out_file, protocol=4)
with open( os.path.join(dir_name, "Y_test.p") , "wb" ) as out_file:
pickle.dump(Y_test, out_file, protocol=4)
with open( os.path.join(dir_name, "indeces_train.p") , "wb" ) as out_file:
pickle.dump(indeces_train, out_file, protocol=4)
with open( os.path.join(dir_name, "indeces_test.p") , "wb" ) as out_file:
pickle.dump(indeces_test, out_file, protocol=4)
def load_train_and_test_data_from_pickle_files(self, dir_name):
with open( os.path.join(dir_name, "X_train.p") , "rb" ) as in_file:
self.X_train = pickle.load(in_file)
with open( os.path.join(dir_name, "X_test.p") , "rb" ) as in_file:
self.X_test = pickle.load(in_file)
with open( os.path.join(dir_name, "Y_train.p") , "rb" ) as in_file:
self.Y_train = pickle.load(in_file)
with open( os.path.join(dir_name, "Y_test.p") , "rb" ) as in_file:
self.Y_test = pickle.load(in_file)
with open( os.path.join(dir_name, "indeces_train.p") , "rb" ) as in_file:
self.indeces_train = pickle.load(in_file)
with open( os.path.join(dir_name, "indeces_test.p") , "rb" ) as in_file:
self.indeces_test = pickle.load(in_file)
def save_token_vectorizers_to_pickle_files(self, target_dir, lists_of_what_tokens=None, lists_of_why_tokens=None):
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
what_token_vectorizer, why_token_vectorizer = self.get_what_and_why_token_vectorizers(lists_of_what_tokens, lists_of_why_tokens)
what_token_vectorizer.get_embeddings()
why_token_vectorizer.get_one_hot_encodings()
what_token_vectorizer.save_to_pickle_file( os.path.join(target_dir, "what_tokenizer.p") )
why_token_vectorizer.save_to_pickle_file( os.path.join(target_dir, "why_tokenizer.p") )
self.decoder.save_to_pickle_file( os.path.join(target_dir, "decoder.p") )
def load_token_vectorizers_from_pickle_files(self, dir_name):
self.what_token_vectorizer = TokenVectorizer.load_from_pickle_file( os.path.join(dir_name, "what_tokenizer.p") )
self.why_token_vectorizer = TokenVectorizer.load_from_pickle_file( os.path.join(dir_name, "why_tokenizer.p") )
self.decoder = TokenVectorizer.load_from_pickle_file( os.path.join(dir_name, "decoder.p") )
self.word2vec_model = self.decoder.word2vec_model
self.vocab_index = self.decoder.vocab_index
self.max_num_tokens_per_sample = self.decoder.num_tokens_per_sample
| true | true |
f7fae3098295008416850842b04bffa80d9dbca2 | 972 | py | Python | thenounproject/user.py | yakupadakli/python-thenounproject | 937e89e37921c9224713ee64d3b54061f11d3333 | [
"MIT"
] | 4 | 2017-12-29T19:00:34.000Z | 2021-11-06T04:58:05.000Z | thenounproject/user.py | yakupadakli/python-thenounproject | 937e89e37921c9224713ee64d3b54061f11d3333 | [
"MIT"
] | null | null | null | thenounproject/user.py | yakupadakli/python-thenounproject | 937e89e37921c9224713ee64d3b54061f11d3333 | [
"MIT"
] | null | null | null | # coding=utf-8
from thenounproject.client import Client
from thenounproject.models import Collection as CollectionModel
from thenounproject.models import Icon as IconModel
class User(Client):
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
self.url = "user"
def collections(self, user_id):
result = self._get("/%s/%s/collections" % (self.url, user_id))
return CollectionModel.parse_list(result.get("collections"))
def collection_by_slug(self, user_id, collection_slug):
result = self._get("/%s/%s/collections/%s" % (self.url, user_id, collection_slug))
return CollectionModel.parse(result.get("collection"))
def uploads(self, user_slug, limit=None, offset=None, page=None):
params = {"limit": limit, "offset": offset, "page": page}
result = self._get("/%s/%s/uploads" % (self.url, user_slug), params=params)
return IconModel.parse_list(result.get("uploads"))
| 38.88 | 90 | 0.685185 |
from thenounproject.client import Client
from thenounproject.models import Collection as CollectionModel
from thenounproject.models import Icon as IconModel
class User(Client):
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
self.url = "user"
def collections(self, user_id):
result = self._get("/%s/%s/collections" % (self.url, user_id))
return CollectionModel.parse_list(result.get("collections"))
def collection_by_slug(self, user_id, collection_slug):
result = self._get("/%s/%s/collections/%s" % (self.url, user_id, collection_slug))
return CollectionModel.parse(result.get("collection"))
def uploads(self, user_slug, limit=None, offset=None, page=None):
params = {"limit": limit, "offset": offset, "page": page}
result = self._get("/%s/%s/uploads" % (self.url, user_slug), params=params)
return IconModel.parse_list(result.get("uploads"))
| true | true |
f7fae3c57b300b76afd8e5a485439aee9fefdbbe | 4,373 | py | Python | pfe/player-feature-extractor/torchreid/utils/loggers.py | dimahwang88/py-mcftracker | b7e845efa3c0f560fe59f2d1c8765087774e78e5 | [
"MIT"
] | 3,465 | 2018-03-12T23:07:01.000Z | 2022-03-31T10:12:55.000Z | pfe/player-feature-extractor/torchreid/utils/loggers.py | dimahwang88/py-mcftracker | b7e845efa3c0f560fe59f2d1c8765087774e78e5 | [
"MIT"
] | 477 | 2018-03-27T14:37:55.000Z | 2022-03-28T07:21:53.000Z | pfe/player-feature-extractor/torchreid/utils/loggers.py | dimahwang88/py-mcftracker | b7e845efa3c0f560fe59f2d1c8765087774e78e5 | [
"MIT"
] | 990 | 2018-03-15T00:37:16.000Z | 2022-03-27T14:30:00.000Z | from __future__ import absolute_import
import os
import sys
import os.path as osp
from .tools import mkdir_if_missing
__all__ = ['Logger', 'RankLogger']
class Logger(object):
"""Writes console output to external text file.
Imported from `<https://github.com/Cysu/open-reid/blob/master/reid/utils/logging.py>`_
Args:
fpath (str): directory to save logging file.
Examples::
>>> import sys
>>> import os
>>> import os.path as osp
>>> from torchreid.utils import Logger
>>> save_dir = 'log/resnet50-softmax-market1501'
>>> log_name = 'train.log'
>>> sys.stdout = Logger(osp.join(args.save_dir, log_name))
"""
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
if fpath is not None:
mkdir_if_missing(osp.dirname(fpath))
self.file = open(fpath, 'w')
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
self.file.write(msg)
def flush(self):
self.console.flush()
if self.file is not None:
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if self.file is not None:
self.file.close()
class RankLogger(object):
"""Records the rank1 matching accuracy obtained for each
test dataset at specified evaluation steps and provides a function
to show the summarized results, which are convenient for analysis.
Args:
sources (str or list): source dataset name(s).
targets (str or list): target dataset name(s).
Examples::
>>> from torchreid.utils import RankLogger
>>> s = 'market1501'
>>> t = 'market1501'
>>> ranklogger = RankLogger(s, t)
>>> ranklogger.write(t, 10, 0.5)
>>> ranklogger.write(t, 20, 0.7)
>>> ranklogger.write(t, 30, 0.9)
>>> ranklogger.show_summary()
>>> # You will see:
>>> # => Show performance summary
>>> # market1501 (source)
>>> # - epoch 10 rank1 50.0%
>>> # - epoch 20 rank1 70.0%
>>> # - epoch 30 rank1 90.0%
>>> # If there are multiple test datasets
>>> t = ['market1501', 'dukemtmcreid']
>>> ranklogger = RankLogger(s, t)
>>> ranklogger.write(t[0], 10, 0.5)
>>> ranklogger.write(t[0], 20, 0.7)
>>> ranklogger.write(t[0], 30, 0.9)
>>> ranklogger.write(t[1], 10, 0.1)
>>> ranklogger.write(t[1], 20, 0.2)
>>> ranklogger.write(t[1], 30, 0.3)
>>> ranklogger.show_summary()
>>> # You can see:
>>> # => Show performance summary
>>> # market1501 (source)
>>> # - epoch 10 rank1 50.0%
>>> # - epoch 20 rank1 70.0%
>>> # - epoch 30 rank1 90.0%
>>> # dukemtmcreid (target)
>>> # - epoch 10 rank1 10.0%
>>> # - epoch 20 rank1 20.0%
>>> # - epoch 30 rank1 30.0%
"""
def __init__(self, sources, targets):
self.sources = sources
self.targets = targets
if isinstance(self.sources, str):
self.sources = [self.sources]
if isinstance(self.targets, str):
self.targets = [self.targets]
self.logger = {
name: {
'epoch': [],
'rank1': []
}
for name in self.targets
}
def write(self, name, epoch, rank1):
"""Writes result.
Args:
name (str): dataset name.
epoch (int): current epoch.
rank1 (float): rank1 result.
"""
self.logger[name]['epoch'].append(epoch)
self.logger[name]['rank1'].append(rank1)
def show_summary(self):
"""Shows saved results."""
print('=> Show performance summary')
for name in self.targets:
from_where = 'source' if name in self.sources else 'target'
print('{} ({})'.format(name, from_where))
for epoch, rank1 in zip(
self.logger[name]['epoch'], self.logger[name]['rank1']
):
print('- epoch {}\t rank1 {:.1%}'.format(epoch, rank1))
| 29.748299 | 90 | 0.536931 | from __future__ import absolute_import
import os
import sys
import os.path as osp
from .tools import mkdir_if_missing
__all__ = ['Logger', 'RankLogger']
class Logger(object):
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
if fpath is not None:
mkdir_if_missing(osp.dirname(fpath))
self.file = open(fpath, 'w')
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
self.file.write(msg)
def flush(self):
self.console.flush()
if self.file is not None:
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if self.file is not None:
self.file.close()
class RankLogger(object):
def __init__(self, sources, targets):
self.sources = sources
self.targets = targets
if isinstance(self.sources, str):
self.sources = [self.sources]
if isinstance(self.targets, str):
self.targets = [self.targets]
self.logger = {
name: {
'epoch': [],
'rank1': []
}
for name in self.targets
}
def write(self, name, epoch, rank1):
self.logger[name]['epoch'].append(epoch)
self.logger[name]['rank1'].append(rank1)
def show_summary(self):
print('=> Show performance summary')
for name in self.targets:
from_where = 'source' if name in self.sources else 'target'
print('{} ({})'.format(name, from_where))
for epoch, rank1 in zip(
self.logger[name]['epoch'], self.logger[name]['rank1']
):
print('- epoch {}\t rank1 {:.1%}'.format(epoch, rank1))
| true | true |
f7fae43b47e0e86cb363499010b040240a6b1465 | 705 | py | Python | master/bopytest-code/code/tasks_proj/src/tasks/config.py | AlexRogalskiy/DevArtifacts | 931aabb8cbf27656151c54856eb2ea7d1153203a | [
"MIT"
] | 4 | 2018-09-07T15:35:24.000Z | 2019-03-27T09:48:12.000Z | master/bopytest-code/code/tasks_proj/src/tasks/config.py | AlexRogalskiy/DevArtifacts | 931aabb8cbf27656151c54856eb2ea7d1153203a | [
"MIT"
] | 371 | 2020-03-04T21:51:56.000Z | 2022-03-31T20:59:11.000Z | master/bopytest-code/code/tasks_proj/src/tasks/config.py | AlexRogalskiy/DevArtifacts | 931aabb8cbf27656151c54856eb2ea7d1153203a | [
"MIT"
] | 3 | 2019-06-18T19:57:17.000Z | 2020-11-06T03:55:08.000Z | from collections import namedtuple
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
import os
TasksConfig = namedtuple('TasksConfig', ['db_path', 'db_type',])
def get_config():
parser = ConfigParser()
config_file = os.path.expanduser('~/.tasks.config')
if not os.path.exists(config_file):
tasks_db_path = '~/tasks_db/'
tasks_db_type = 'tiny'
else:
parser.read(config_file)
tasks_db_path = parser.get('TASKS', 'tasks_db_path')
tasks_db_type = parser.get('TASKS', 'tasks_db_type')
tasks_db_path = os.path.expanduser(tasks_db_path)
return TasksConfig(tasks_db_path, tasks_db_type)
| 30.652174 | 64 | 0.706383 | from collections import namedtuple
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
import os
TasksConfig = namedtuple('TasksConfig', ['db_path', 'db_type',])
def get_config():
parser = ConfigParser()
config_file = os.path.expanduser('~/.tasks.config')
if not os.path.exists(config_file):
tasks_db_path = '~/tasks_db/'
tasks_db_type = 'tiny'
else:
parser.read(config_file)
tasks_db_path = parser.get('TASKS', 'tasks_db_path')
tasks_db_type = parser.get('TASKS', 'tasks_db_type')
tasks_db_path = os.path.expanduser(tasks_db_path)
return TasksConfig(tasks_db_path, tasks_db_type)
| true | true |
f7fae46156e3aa1f08dcb127807cd3a96393759c | 56,049 | py | Python | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_09_01/aio/operations/_domains_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 2 | 2021-03-24T06:26:11.000Z | 2021-04-18T15:55:59.000Z | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_09_01/aio/operations/_domains_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 4 | 2019-04-17T17:57:49.000Z | 2020-04-24T21:11:22.000Z | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_09_01/aio/operations/_domains_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DomainsOperations:
"""DomainsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2020_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def check_availability(
self,
identifier: "_models.NameIdentifier",
**kwargs
) -> "_models.DomainAvailabilityCheckResult":
"""Check if a domain is available for registration.
Description for Check if a domain is available for registration.
:param identifier: Name of the domain.
:type identifier: ~azure.mgmt.web.v2020_09_01.models.NameIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainAvailabilityCheckResult, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.DomainAvailabilityCheckResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainAvailabilityCheckResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.check_availability.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(identifier, 'NameIdentifier')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainAvailabilityCheckResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/checkDomainAvailability'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.DomainCollection"]:
"""Get all domains in a subscription.
Description for Get all domains in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DomainCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_09_01.models.DomainCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DomainCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/domains'} # type: ignore
async def get_control_center_sso_request(
self,
**kwargs
) -> "_models.DomainControlCenterSsoRequest":
"""Generate a single sign-on request for the domain management portal.
Description for Generate a single sign-on request for the domain management portal.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainControlCenterSsoRequest, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.DomainControlCenterSsoRequest
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainControlCenterSsoRequest"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get_control_center_sso_request.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainControlCenterSsoRequest', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_control_center_sso_request.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/generateSsoRequest'} # type: ignore
def list_recommendations(
self,
parameters: "_models.DomainRecommendationSearchParameters",
**kwargs
) -> AsyncIterable["_models.NameIdentifierCollection"]:
"""Get domain name recommendations based on keywords.
Description for Get domain name recommendations based on keywords.
:param parameters: Search parameters for domain name recommendations.
:type parameters: ~azure.mgmt.web.v2020_09_01.models.DomainRecommendationSearchParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NameIdentifierCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_09_01.models.NameIdentifierCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NameIdentifierCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = "application/json"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_recommendations.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DomainRecommendationSearchParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DomainRecommendationSearchParameters')
body_content_kwargs['content'] = body_content
request = self._client.get(url, query_parameters, header_parameters, **body_content_kwargs)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NameIdentifierCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_recommendations.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/listDomainRecommendations'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.DomainCollection"]:
"""Get all domains in a resource group.
Description for Get all domains in a resource group.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DomainCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_09_01.models.DomainCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DomainCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains'} # type: ignore
async def get(
self,
resource_group_name: str,
domain_name: str,
**kwargs
) -> "_models.Domain":
"""Get a domain.
Description for Get a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Domain, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.Domain
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.Domain",
**kwargs
) -> "_models.Domain":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str', pattern=r'[a-zA-Z0-9][a-zA-Z0-9\.-]+'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(domain, 'Domain')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Domain', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.Domain",
**kwargs
) -> AsyncLROPoller["_models.Domain"]:
"""Creates or updates a domain.
Description for Creates or updates a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param domain: Domain registration information.
:type domain: ~azure.mgmt.web.v2020_09_01.models.Domain
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Domain or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.web.v2020_09_01.models.Domain]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
domain=domain,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str', pattern=r'[a-zA-Z0-9][a-zA-Z0-9\.-]+'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
domain_name: str,
force_hard_delete_domain: Optional[bool] = None,
**kwargs
) -> None:
"""Delete a domain.
Description for Delete a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param force_hard_delete_domain: Specify :code:`<code>true</code>` to delete the domain
immediately. The default is :code:`<code>false</code>` which deletes the domain after 24 hours.
:type force_hard_delete_domain: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if force_hard_delete_domain is not None:
query_parameters['forceHardDeleteDomain'] = self._serialize.query("force_hard_delete_domain", force_hard_delete_domain, 'bool')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
async def update(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.DomainPatchResource",
**kwargs
) -> "_models.Domain":
"""Creates or updates a domain.
Description for Creates or updates a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param domain: Domain registration information.
:type domain: ~azure.mgmt.web.v2020_09_01.models.DomainPatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Domain, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.Domain
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Domain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str', pattern=r'[a-zA-Z0-9][a-zA-Z0-9\.-]+'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(domain, 'DomainPatchResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Domain', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'} # type: ignore
def list_ownership_identifiers(
self,
resource_group_name: str,
domain_name: str,
**kwargs
) -> AsyncIterable["_models.DomainOwnershipIdentifierCollection"]:
"""Lists domain ownership identifiers.
Description for Lists domain ownership identifiers.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DomainOwnershipIdentifierCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_09_01.models.DomainOwnershipIdentifierCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifierCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_ownership_identifiers.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DomainOwnershipIdentifierCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_ownership_identifiers.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers'} # type: ignore
async def get_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
**kwargs
) -> "_models.DomainOwnershipIdentifier":
"""Get ownership identifier for domain.
Description for Get ownership identifier for domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainOwnershipIdentifier, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.DomainOwnershipIdentifier
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifier"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get_ownership_identifier.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
async def create_or_update_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
domain_ownership_identifier: "_models.DomainOwnershipIdentifier",
**kwargs
) -> "_models.DomainOwnershipIdentifier":
"""Creates an ownership identifier for a domain or updates identifier details for an existing identifer.
Description for Creates an ownership identifier for a domain or updates identifier details for
an existing identifer.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:param domain_ownership_identifier: A JSON representation of the domain ownership properties.
:type domain_ownership_identifier: ~azure.mgmt.web.v2020_09_01.models.DomainOwnershipIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainOwnershipIdentifier, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.DomainOwnershipIdentifier
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifier"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update_ownership_identifier.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(domain_ownership_identifier, 'DomainOwnershipIdentifier')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
async def delete_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
**kwargs
) -> None:
"""Delete ownership identifier for domain.
Description for Delete ownership identifier for domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.delete_ownership_identifier.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
async def update_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
domain_ownership_identifier: "_models.DomainOwnershipIdentifier",
**kwargs
) -> "_models.DomainOwnershipIdentifier":
"""Creates an ownership identifier for a domain or updates identifier details for an existing identifer.
Description for Creates an ownership identifier for a domain or updates identifier details for
an existing identifer.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of domain.
:type domain_name: str
:param name: Name of identifier.
:type name: str
:param domain_ownership_identifier: A JSON representation of the domain ownership properties.
:type domain_ownership_identifier: ~azure.mgmt.web.v2020_09_01.models.DomainOwnershipIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainOwnershipIdentifier, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.DomainOwnershipIdentifier
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainOwnershipIdentifier"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_ownership_identifier.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(domain_ownership_identifier, 'DomainOwnershipIdentifier')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'} # type: ignore
async def renew(
self,
resource_group_name: str,
domain_name: str,
**kwargs
) -> None:
"""Renew a domain.
Description for Renew a domain.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.renew.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
renew.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/renew'} # type: ignore
| 50.04375 | 240 | 0.667684 |
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DomainsOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def check_availability(
self,
identifier: "_models.NameIdentifier",
**kwargs
) -> "_models.DomainAvailabilityCheckResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.check_availability.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(identifier, 'NameIdentifier')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainAvailabilityCheckResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/checkDomainAvailability'}
def list(
self,
**kwargs
) -> AsyncIterable["_models.DomainCollection"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DomainCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/domains'}
async def get_control_center_sso_request(
self,
**kwargs
) -> "_models.DomainControlCenterSsoRequest":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self.get_control_center_sso_request.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainControlCenterSsoRequest', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_control_center_sso_request.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/generateSsoRequest'}
def list_recommendations(
self,
parameters: "_models.DomainRecommendationSearchParameters",
**kwargs
) -> AsyncIterable["_models.NameIdentifierCollection"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = "application/json"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_recommendations.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'DomainRecommendationSearchParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
else:
url = next_link
query_parameters = {}
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'DomainRecommendationSearchParameters')
body_content_kwargs['content'] = body_content
request = self._client.get(url, query_parameters, header_parameters, **body_content_kwargs)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NameIdentifierCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_recommendations.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/listDomainRecommendations'}
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.DomainCollection"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DomainCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains'}
async def get(
self,
resource_group_name: str,
domain_name: str,
**kwargs
) -> "_models.Domain":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'}
async def _create_or_update_initial(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.Domain",
**kwargs
) -> "_models.Domain":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._create_or_update_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str', pattern=r'[a-zA-Z0-9][a-zA-Z0-9\.-]+'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(domain, 'Domain')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Domain', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'}
async def begin_create_or_update(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.Domain",
**kwargs
) -> AsyncLROPoller["_models.Domain"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
domain=domain,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str', pattern=r'[a-zA-Z0-9][a-zA-Z0-9\.-]+'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'}
async def delete(
self,
resource_group_name: str,
domain_name: str,
force_hard_delete_domain: Optional[bool] = None,
**kwargs
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if force_hard_delete_domain is not None:
query_parameters['forceHardDeleteDomain'] = self._serialize.query("force_hard_delete_domain", force_hard_delete_domain, 'bool')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'}
async def update(
self,
resource_group_name: str,
domain_name: str,
domain: "_models.DomainPatchResource",
**kwargs
) -> "_models.Domain":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str', pattern=r'[a-zA-Z0-9][a-zA-Z0-9\.-]+'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(domain, 'DomainPatchResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Domain', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Domain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}'}
def list_ownership_identifiers(
self,
resource_group_name: str,
domain_name: str,
**kwargs
) -> AsyncIterable["_models.DomainOwnershipIdentifierCollection"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_ownership_identifiers.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DomainOwnershipIdentifierCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_ownership_identifiers.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers'}
async def get_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
**kwargs
) -> "_models.DomainOwnershipIdentifier":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self.get_ownership_identifier.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'}
async def create_or_update_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
domain_ownership_identifier: "_models.DomainOwnershipIdentifier",
**kwargs
) -> "_models.DomainOwnershipIdentifier":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.create_or_update_ownership_identifier.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(domain_ownership_identifier, 'DomainOwnershipIdentifier')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'}
async def delete_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self.delete_ownership_identifier.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'}
async def update_ownership_identifier(
self,
resource_group_name: str,
domain_name: str,
name: str,
domain_ownership_identifier: "_models.DomainOwnershipIdentifier",
**kwargs
) -> "_models.DomainOwnershipIdentifier":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.update_ownership_identifier.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(domain_ownership_identifier, 'DomainOwnershipIdentifier')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainOwnershipIdentifier', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_ownership_identifier.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/domainOwnershipIdentifiers/{name}'}
async def renew(
self,
resource_group_name: str,
domain_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self.renew.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
renew.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/renew'}
| true | true |
f7fae4bee5f3cae66ef8186f4fc0f04234af4809 | 8,916 | py | Python | python_modules/dagster/dagster_tests/cli_tests/workspace_tests/test_repository_load.py | johannkm/dagster-okteto | 7ad30528a4a92945967d68e59e27727a1e839c2b | [
"Apache-2.0"
] | 1 | 2020-08-10T23:03:37.000Z | 2020-08-10T23:03:37.000Z | python_modules/dagster/dagster_tests/cli_tests/workspace_tests/test_repository_load.py | johannkm/dagster-okteto | 7ad30528a4a92945967d68e59e27727a1e839c2b | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster_tests/cli_tests/workspace_tests/test_repository_load.py | johannkm/dagster-okteto | 7ad30528a4a92945967d68e59e27727a1e839c2b | [
"Apache-2.0"
] | 1 | 2020-08-20T14:20:31.000Z | 2020-08-20T14:20:31.000Z | import os
import re
from contextlib import contextmanager
import click
import pytest
from click.testing import CliRunner
from dagster.cli.workspace.cli_target import (
get_external_repository_from_kwargs,
repository_target_argument,
)
from dagster.core.host_representation import ExternalRepository
from dagster.core.instance import DagsterInstance
from dagster.utils import file_relative_path
def load_repository_via_cli_runner(cli_args):
capture_result = {'external_repo': None}
@click.command(name='test_repository_command')
@repository_target_argument
def command(**kwargs):
capture_result['external_repo'] = get_external_repository_from_kwargs(
kwargs, DagsterInstance.ephemeral()
)
runner = CliRunner()
result = runner.invoke(command, cli_args)
external_repo = capture_result['external_repo']
return result, external_repo
def successfully_load_repository_via_cli(cli_args):
result, external_repository = load_repository_via_cli_runner(cli_args)
assert result.exit_code == 0
assert isinstance(external_repository, ExternalRepository)
return external_repository
PYTHON_FILE_IN_NAMED_LOCATION_WORKSPACE = file_relative_path(
__file__, 'hello_world_in_file/python_file_with_named_location_workspace.yaml'
)
LEGACY_REPOSITORY = file_relative_path(__file__, 'hello_world_in_file/legacy_repository.yaml')
@pytest.mark.parametrize(
'cli_args',
(
# auto infer location and repo
['-w', PYTHON_FILE_IN_NAMED_LOCATION_WORKSPACE],
# auto infer location
['-w', PYTHON_FILE_IN_NAMED_LOCATION_WORKSPACE, '-r', 'hello_world_repository'],
# auto infer repository
['-w', PYTHON_FILE_IN_NAMED_LOCATION_WORKSPACE, '-l', 'hello_world_location'],
[
'-w',
PYTHON_FILE_IN_NAMED_LOCATION_WORKSPACE,
'-l',
'hello_world_location',
'-r',
'hello_world_repository',
],
# legacy repository
['-w', LEGACY_REPOSITORY],
# legacy repository with specified name
['-w', LEGACY_REPOSITORY, '-r', 'hello_world_repository'],
),
)
def test_valid_repository_target_combos_with_single_repo_single_location(cli_args):
if cli_args[1] == LEGACY_REPOSITORY:
with pytest.warns(
UserWarning,
match=re.escape(
'You are using the legacy repository yaml format. Please update your file '
),
):
external_repository = successfully_load_repository_via_cli(cli_args)
else:
external_repository = successfully_load_repository_via_cli(cli_args)
assert isinstance(external_repository, ExternalRepository)
assert external_repository.name == 'hello_world_repository'
def test_repository_target_argument_one_repo_and_specified_wrong():
result, _ = load_repository_via_cli_runner(
['-w', PYTHON_FILE_IN_NAMED_LOCATION_WORKSPACE, '-r', 'not_present']
)
assert result.exit_code == 2
assert (
'''Repository "not_present" not found in location "hello_world_location". '''
'''Found ['hello_world_repository'] instead.''' in result.stdout
)
def test_repository_target_argument_one_location_and_specified_wrong():
result, _ = load_repository_via_cli_runner(
['-w', PYTHON_FILE_IN_NAMED_LOCATION_WORKSPACE, '-l', 'location_not_present']
)
assert result.exit_code == 2
assert (
'''Location "location_not_present" not found in workspace. '''
'''Found ['hello_world_location'] instead.'''
) in result.stdout
MULTI_LOCATION_WORKSPACE = file_relative_path(__file__, 'multi_location/multi_location.yaml')
def test_valid_multi_location_from_file():
external_repository = successfully_load_repository_via_cli(
['-w', MULTI_LOCATION_WORKSPACE, '-l', 'loaded_from_file']
)
assert external_repository.name == 'hello_world_repository'
assert external_repository.handle.repository_location_handle.location_name == 'loaded_from_file'
def test_valid_multi_location_from_module():
external_repository = successfully_load_repository_via_cli(
['-w', MULTI_LOCATION_WORKSPACE, '-l', 'loaded_from_module']
)
assert external_repository.name == 'hello_world_repository'
assert (
external_repository.handle.repository_location_handle.location_name == 'loaded_from_module'
)
def test_missing_location_name_multi_location():
result, _ = load_repository_via_cli_runner(['-w', MULTI_LOCATION_WORKSPACE])
assert result.exit_code == 2
assert (
'''Must provide --location as there are more than one locations available. '''
'''Options are: ['loaded_from_file', 'loaded_from_module']'''
) in result.stdout
SINGLE_LOCATION_MULTI_REPO_WORKSPACE = file_relative_path(__file__, 'multi_repo/multi_repo.yaml')
def test_valid_multi_repo():
assert (
successfully_load_repository_via_cli(
['-w', SINGLE_LOCATION_MULTI_REPO_WORKSPACE, '-r', 'repo_one']
).name
== 'repo_one'
)
assert (
successfully_load_repository_via_cli(
['-w', SINGLE_LOCATION_MULTI_REPO_WORKSPACE, '-r', 'repo_two']
).name
== 'repo_two'
)
def test_missing_repo_name_in_multi_repo_location():
result, _ = load_repository_via_cli_runner(['-w', SINGLE_LOCATION_MULTI_REPO_WORKSPACE])
assert result.exit_code == 2
assert (
'''Must provide --repository as there are more than one repositories in '''
'''multi_repo. Options are: ['repo_one', 'repo_two'].'''
) in result.stdout
@contextmanager
def new_cwd(path):
old = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(old)
def test_legacy_repository_yaml_autoload():
with pytest.warns(
UserWarning,
match=re.escape(
'You are using the legacy repository yaml format. Please update your file '
),
):
with new_cwd(file_relative_path(__file__, 'legacy_repository_yaml')):
assert successfully_load_repository_via_cli([]).name == 'hello_world_repository'
def test_legacy_repository_yaml_dash_y():
with pytest.warns(
UserWarning,
match=re.escape(
'You have used -y or --repository-yaml to load a workspace. This is deprecated and '
'will be eliminated in 0.9.0.'
),
):
with new_cwd(file_relative_path(__file__, 'legacy_repository_yaml')):
assert (
successfully_load_repository_via_cli(['-y', 'repository.yaml']).name
== 'hello_world_repository'
)
def test_legacy_repository_yaml_module_autoload():
with pytest.warns(
UserWarning,
match=re.escape(
'You are using the legacy repository yaml format. Please update your file '
),
):
with new_cwd(file_relative_path(__file__, 'legacy_repository_yaml_module')):
assert successfully_load_repository_via_cli([]).name == 'hello_world_repository'
def test_legacy_repository_module_yaml_dash_y():
with pytest.warns(
UserWarning,
match=re.escape(
'You have used -y or --repository-yaml to load a workspace. This is deprecated and '
'will be eliminated in 0.9.0.'
),
):
with new_cwd(file_relative_path(__file__, 'legacy_repository_yaml_module')):
assert (
successfully_load_repository_via_cli(['-y', 'repository.yaml']).name
== 'hello_world_repository'
)
def test_local_directory_module():
cli_args = [
'-w',
file_relative_path(__file__, 'hello_world_in_module/local_directory_module_workspace.yaml'),
]
result, _ = load_repository_via_cli_runner(cli_args)
# repository loading should fail even though pytest is being run from the current directory
# because we removed module resolution from the working directory
assert result.exit_code != 0
@pytest.mark.parametrize(
'cli_args',
(
# load workspace with explicit working directory
[
'-w',
file_relative_path(
__file__, 'hello_world_file_in_directory/working_directory_workspace.yaml'
),
],
# load workspace with default working directory
[
'-w',
file_relative_path(
__file__, 'hello_world_file_in_directory/default_working_dir_workspace.yaml'
),
],
# load workspace with multiple working directory file targets
[
'-w',
file_relative_path(__file__, 'multi_file_target_workspace/workspace.yaml'),
'-l',
'one',
],
),
)
def test_local_directory_file(cli_args):
assert successfully_load_repository_via_cli(cli_args)
| 32.304348 | 100 | 0.682369 | import os
import re
from contextlib import contextmanager
import click
import pytest
from click.testing import CliRunner
from dagster.cli.workspace.cli_target import (
get_external_repository_from_kwargs,
repository_target_argument,
)
from dagster.core.host_representation import ExternalRepository
from dagster.core.instance import DagsterInstance
from dagster.utils import file_relative_path
def load_repository_via_cli_runner(cli_args):
capture_result = {'external_repo': None}
@click.command(name='test_repository_command')
@repository_target_argument
def command(**kwargs):
capture_result['external_repo'] = get_external_repository_from_kwargs(
kwargs, DagsterInstance.ephemeral()
)
runner = CliRunner()
result = runner.invoke(command, cli_args)
external_repo = capture_result['external_repo']
return result, external_repo
def successfully_load_repository_via_cli(cli_args):
result, external_repository = load_repository_via_cli_runner(cli_args)
assert result.exit_code == 0
assert isinstance(external_repository, ExternalRepository)
return external_repository
PYTHON_FILE_IN_NAMED_LOCATION_WORKSPACE = file_relative_path(
__file__, 'hello_world_in_file/python_file_with_named_location_workspace.yaml'
)
LEGACY_REPOSITORY = file_relative_path(__file__, 'hello_world_in_file/legacy_repository.yaml')
@pytest.mark.parametrize(
'cli_args',
(
['-w', PYTHON_FILE_IN_NAMED_LOCATION_WORKSPACE],
['-w', PYTHON_FILE_IN_NAMED_LOCATION_WORKSPACE, '-r', 'hello_world_repository'],
['-w', PYTHON_FILE_IN_NAMED_LOCATION_WORKSPACE, '-l', 'hello_world_location'],
[
'-w',
PYTHON_FILE_IN_NAMED_LOCATION_WORKSPACE,
'-l',
'hello_world_location',
'-r',
'hello_world_repository',
],
['-w', LEGACY_REPOSITORY],
['-w', LEGACY_REPOSITORY, '-r', 'hello_world_repository'],
),
)
def test_valid_repository_target_combos_with_single_repo_single_location(cli_args):
if cli_args[1] == LEGACY_REPOSITORY:
with pytest.warns(
UserWarning,
match=re.escape(
'You are using the legacy repository yaml format. Please update your file '
),
):
external_repository = successfully_load_repository_via_cli(cli_args)
else:
external_repository = successfully_load_repository_via_cli(cli_args)
assert isinstance(external_repository, ExternalRepository)
assert external_repository.name == 'hello_world_repository'
def test_repository_target_argument_one_repo_and_specified_wrong():
result, _ = load_repository_via_cli_runner(
['-w', PYTHON_FILE_IN_NAMED_LOCATION_WORKSPACE, '-r', 'not_present']
)
assert result.exit_code == 2
assert (
'''Repository "not_present" not found in location "hello_world_location". '''
'''Found ['hello_world_repository'] instead.''' in result.stdout
)
def test_repository_target_argument_one_location_and_specified_wrong():
result, _ = load_repository_via_cli_runner(
['-w', PYTHON_FILE_IN_NAMED_LOCATION_WORKSPACE, '-l', 'location_not_present']
)
assert result.exit_code == 2
assert (
'''Location "location_not_present" not found in workspace. '''
'''Found ['hello_world_location'] instead.'''
) in result.stdout
MULTI_LOCATION_WORKSPACE = file_relative_path(__file__, 'multi_location/multi_location.yaml')
def test_valid_multi_location_from_file():
external_repository = successfully_load_repository_via_cli(
['-w', MULTI_LOCATION_WORKSPACE, '-l', 'loaded_from_file']
)
assert external_repository.name == 'hello_world_repository'
assert external_repository.handle.repository_location_handle.location_name == 'loaded_from_file'
def test_valid_multi_location_from_module():
external_repository = successfully_load_repository_via_cli(
['-w', MULTI_LOCATION_WORKSPACE, '-l', 'loaded_from_module']
)
assert external_repository.name == 'hello_world_repository'
assert (
external_repository.handle.repository_location_handle.location_name == 'loaded_from_module'
)
def test_missing_location_name_multi_location():
result, _ = load_repository_via_cli_runner(['-w', MULTI_LOCATION_WORKSPACE])
assert result.exit_code == 2
assert (
'''Must provide --location as there are more than one locations available. '''
'''Options are: ['loaded_from_file', 'loaded_from_module']'''
) in result.stdout
SINGLE_LOCATION_MULTI_REPO_WORKSPACE = file_relative_path(__file__, 'multi_repo/multi_repo.yaml')
def test_valid_multi_repo():
assert (
successfully_load_repository_via_cli(
['-w', SINGLE_LOCATION_MULTI_REPO_WORKSPACE, '-r', 'repo_one']
).name
== 'repo_one'
)
assert (
successfully_load_repository_via_cli(
['-w', SINGLE_LOCATION_MULTI_REPO_WORKSPACE, '-r', 'repo_two']
).name
== 'repo_two'
)
def test_missing_repo_name_in_multi_repo_location():
result, _ = load_repository_via_cli_runner(['-w', SINGLE_LOCATION_MULTI_REPO_WORKSPACE])
assert result.exit_code == 2
assert (
'''Must provide --repository as there are more than one repositories in '''
'''multi_repo. Options are: ['repo_one', 'repo_two'].'''
) in result.stdout
@contextmanager
def new_cwd(path):
old = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(old)
def test_legacy_repository_yaml_autoload():
with pytest.warns(
UserWarning,
match=re.escape(
'You are using the legacy repository yaml format. Please update your file '
),
):
with new_cwd(file_relative_path(__file__, 'legacy_repository_yaml')):
assert successfully_load_repository_via_cli([]).name == 'hello_world_repository'
def test_legacy_repository_yaml_dash_y():
with pytest.warns(
UserWarning,
match=re.escape(
'You have used -y or --repository-yaml to load a workspace. This is deprecated and '
'will be eliminated in 0.9.0.'
),
):
with new_cwd(file_relative_path(__file__, 'legacy_repository_yaml')):
assert (
successfully_load_repository_via_cli(['-y', 'repository.yaml']).name
== 'hello_world_repository'
)
def test_legacy_repository_yaml_module_autoload():
with pytest.warns(
UserWarning,
match=re.escape(
'You are using the legacy repository yaml format. Please update your file '
),
):
with new_cwd(file_relative_path(__file__, 'legacy_repository_yaml_module')):
assert successfully_load_repository_via_cli([]).name == 'hello_world_repository'
def test_legacy_repository_module_yaml_dash_y():
with pytest.warns(
UserWarning,
match=re.escape(
'You have used -y or --repository-yaml to load a workspace. This is deprecated and '
'will be eliminated in 0.9.0.'
),
):
with new_cwd(file_relative_path(__file__, 'legacy_repository_yaml_module')):
assert (
successfully_load_repository_via_cli(['-y', 'repository.yaml']).name
== 'hello_world_repository'
)
def test_local_directory_module():
cli_args = [
'-w',
file_relative_path(__file__, 'hello_world_in_module/local_directory_module_workspace.yaml'),
]
result, _ = load_repository_via_cli_runner(cli_args)
assert result.exit_code != 0
@pytest.mark.parametrize(
'cli_args',
(
[
'-w',
file_relative_path(
__file__, 'hello_world_file_in_directory/working_directory_workspace.yaml'
),
],
[
'-w',
file_relative_path(
__file__, 'hello_world_file_in_directory/default_working_dir_workspace.yaml'
),
],
[
'-w',
file_relative_path(__file__, 'multi_file_target_workspace/workspace.yaml'),
'-l',
'one',
],
),
)
def test_local_directory_file(cli_args):
assert successfully_load_repository_via_cli(cli_args)
| true | true |
f7fae54038bdef582ab82ea0531d0a9f591a3b42 | 4,679 | py | Python | import-site-tree-from-csv/import-waf-site-tree-from-csv.py | imperva/mx-toolbox | 639e32d169bb96c4f2ae3a4dedaefe1eb81c35f3 | [
"MIT"
] | 9 | 2019-10-09T20:15:50.000Z | 2022-02-08T07:12:13.000Z | import-site-tree-from-csv/import-waf-site-tree-from-csv.py | imperva/mx-toolbox | 639e32d169bb96c4f2ae3a4dedaefe1eb81c35f3 | [
"MIT"
] | 4 | 2019-08-22T13:08:17.000Z | 2021-04-27T01:16:51.000Z | import-site-tree-from-csv/import-waf-site-tree-from-csv.py | imperva/mx-toolbox | 639e32d169bb96c4f2ae3a4dedaefe1eb81c35f3 | [
"MIT"
] | 8 | 2019-09-25T10:55:59.000Z | 2021-09-01T09:13:37.000Z | #!/usr/bin/env python
import ss
import sys
import json
import csv
import requests
import logging
import urllib
from subprocess import PIPE,Popen
import pyparsing
############ ENV Settings ############
logging.basicConfig(filename="import_site_tree_from_csv.log", filemode='w', format='%(name)s - %(levelname)s - %(message)s')
############ GLOBALS ############
CONFIGFILE = 'config.json'
CONFIG = {}
try:
with open(CONFIGFILE, 'r') as data:
CONFIG = json.load(data)
except:
print("Missing \""+CONFIGFILE+"\" file, create file named \""+CONFIGFILE+"\" with the following contents:\n{\n\t\"log_level\": \"debug\",\n\t\"log_file_name\": \"gateway_statistics.log\",\n\t\"environment\": \"dev\",\n\t\"is_userspace\":false,\n\t\"environment\": \"dev\",\n\t\"log_search\": {\n\t\t\"enabled\": true,\n\t\t\"files\": [{\n\t\t\t\"path\": \"/var/log/messages\",\n\t\t\t\"search_patterns\": [{\n\t\t\t\t\t\"name\":\"YOUR_EVENT_NAME\",\n\t\t\t\t\t\"pattern\":\"some text pattern\"\n\t\t\t\t}, {\n\t\t\t\t\t\"name\":\"YOUR_EVENT_NAME_2\",\n\t\t\t\t\t\"pattern\":\"some other text pattern\"\n\t\t\t\t}\n\t\t\t]\n\t\t}]\n\t},\n\t\"newrelic\": {\n\t\t\"enabled\": false,\n\t\t\"account_id\": \"ACCOUNT_ID\",\n\t\t\"api_key\": \"API_KEY\",\n\t\t\"event_type\": \"GWStats\"\n\t},\n\t\"influxdb\": {\n\t\t\"enabled\": true,\n\t\t\"host\": \"http://1.2.3.4:8086/write?db=imperva_performance_stats\"\n\t},\n\t\"syslog\": {\n\t\t\"enabled\": true,\n\t\t\"host\": \"1.2.3.4\",\n\t\t\"port\": 514\n\t}\n}")
exit()
if len(sys.argv)<2:
print("[ERROR] Missing argument, please specify the path to the csv to import. \n Example: python import-waf-site-tree-from-csv.py /path/to/mysitetree.csv")
logging.warning("[ERROR] Missing argument, please specify the path to the csv to import. Example: python import-waf-site-tree-from-csv.py /path/to/mysitetree.csv")
quit()
try:
CSV_FILE_PATH = sys.argv[1]
except:
print('Path to csv is missing, please specify a path to csv file you are looking to import. Example: python import-waf-site-tree-from-csv.py "path/to/yourfile.csv"')
exit()
def run():
sites = ss.ParseCsvWaf(CSV_FILE_PATH)
mx_host = CONFIG["mx"]["endpoint"]
session_id = ss.login(mx_host, CONFIG["mx"]["username"], CONFIG["mx"]["password"])
for site_name in sites:
site = sites[site_name]
logging.warning("Adding site '"+site_name+"' to site tree.")
response = ss.makeCall(CONFIG["mx"]["endpoint"],session_id, "/conf/sites/"+site_name,"POST",json.dumps({}))
if ss.ErrorCheck(response):
for server_group_name in site:
server_group = site[server_group_name]
logging.warning("Adding server group '"+server_group_name+"' to site '"+site_name+"' to site tree.")
response = ss.makeCall(CONFIG["mx"]["endpoint"],session_id, "/conf/serverGroups/"+site_name+"/"+server_group_name,"POST",json.dumps({}))
if ss.ErrorCheck(response):
for server_ip in server_group["server_ips"]:
response = ss.makeCall(CONFIG["mx"]["endpoint"],session_id, "/conf/serverGroups/"+site_name+"/"+server_group_name+"/protectedIPs/"+server_ip+"?gatewayGroup="+server_group["server_ips"][server_ip],"POST",json.dumps({}))
for service_name in server_group["services"]:
service = server_group["services"][service_name]
data = {
"ports":list(service["ports"].keys()),
"sslPorts":list(service["sslPorts"].keys())
}
response = ss.makeCall(CONFIG["mx"]["endpoint"],session_id, "/conf/webServices/"+site_name+"/"+server_group_name+"/"+service_name,"POST",json.dumps(data))
for ssl_key_name in service["sslCerts"]:
sslCertObj = service["sslCerts"][ssl_key_name]
response = ss.makeCall(CONFIG["mx"]["endpoint"],session_id, "/conf/webServices/"+site_name+"/"+server_group_name+"/"+service_name+"/sslCertificates/"+ssl_key_name,"POST",json.dumps(sslCertObj))
for krp_alias_name in service["krpConfigs"]:
krp_rule = service["krpConfigs"][krp_alias_name]
response = ss.makeCall(CONFIG["mx"]["endpoint"],session_id, "/conf/webServices/"+site_name+"/"+server_group_name+"/"+service_name+"/krpInboundRules/"+krp_rule["gateway_group"]+"/"+krp_rule["gateway_krp_alias_name"]+"/"+krp_rule["krp_inbound_port"],"POST",json.dumps(krp_rule["krpRules"]))
if __name__ == '__main__':
run()
| 64.09589 | 1,011 | 0.613593 |
import ss
import sys
import json
import csv
import requests
import logging
import urllib
from subprocess import PIPE,Popen
import pyparsing
\",\n\t\t\t\t\t\"pattern\":\"some other text pattern\"\n\t\t\t\t}\n\t\t\t]\n\t\t}]\n\t},\n\t\"newrelic\": {\n\t\t\"enabled\": false,\n\t\t\"account_id\": \"ACCOUNT_ID\",\n\t\t\"api_key\": \"API_KEY\",\n\t\t\"event_type\": \"GWStats\"\n\t},\n\t\"influxdb\": {\n\t\t\"enabled\": true,\n\t\t\"host\": \"http://1.2.3.4:8086/write?db=imperva_performance_stats\"\n\t},\n\t\"syslog\": {\n\t\t\"enabled\": true,\n\t\t\"host\": \"1.2.3.4\",\n\t\t\"port\": 514\n\t}\n}")
exit()
if len(sys.argv)<2:
print("[ERROR] Missing argument, please specify the path to the csv to import. \n Example: python import-waf-site-tree-from-csv.py /path/to/mysitetree.csv")
logging.warning("[ERROR] Missing argument, please specify the path to the csv to import. Example: python import-waf-site-tree-from-csv.py /path/to/mysitetree.csv")
quit()
try:
CSV_FILE_PATH = sys.argv[1]
except:
print('Path to csv is missing, please specify a path to csv file you are looking to import. Example: python import-waf-site-tree-from-csv.py "path/to/yourfile.csv"')
exit()
def run():
sites = ss.ParseCsvWaf(CSV_FILE_PATH)
mx_host = CONFIG["mx"]["endpoint"]
session_id = ss.login(mx_host, CONFIG["mx"]["username"], CONFIG["mx"]["password"])
for site_name in sites:
site = sites[site_name]
logging.warning("Adding site '"+site_name+"' to site tree.")
response = ss.makeCall(CONFIG["mx"]["endpoint"],session_id, "/conf/sites/"+site_name,"POST",json.dumps({}))
if ss.ErrorCheck(response):
for server_group_name in site:
server_group = site[server_group_name]
logging.warning("Adding server group '"+server_group_name+"' to site '"+site_name+"' to site tree.")
response = ss.makeCall(CONFIG["mx"]["endpoint"],session_id, "/conf/serverGroups/"+site_name+"/"+server_group_name,"POST",json.dumps({}))
if ss.ErrorCheck(response):
for server_ip in server_group["server_ips"]:
response = ss.makeCall(CONFIG["mx"]["endpoint"],session_id, "/conf/serverGroups/"+site_name+"/"+server_group_name+"/protectedIPs/"+server_ip+"?gatewayGroup="+server_group["server_ips"][server_ip],"POST",json.dumps({}))
for service_name in server_group["services"]:
service = server_group["services"][service_name]
data = {
"ports":list(service["ports"].keys()),
"sslPorts":list(service["sslPorts"].keys())
}
response = ss.makeCall(CONFIG["mx"]["endpoint"],session_id, "/conf/webServices/"+site_name+"/"+server_group_name+"/"+service_name,"POST",json.dumps(data))
for ssl_key_name in service["sslCerts"]:
sslCertObj = service["sslCerts"][ssl_key_name]
response = ss.makeCall(CONFIG["mx"]["endpoint"],session_id, "/conf/webServices/"+site_name+"/"+server_group_name+"/"+service_name+"/sslCertificates/"+ssl_key_name,"POST",json.dumps(sslCertObj))
for krp_alias_name in service["krpConfigs"]:
krp_rule = service["krpConfigs"][krp_alias_name]
response = ss.makeCall(CONFIG["mx"]["endpoint"],session_id, "/conf/webServices/"+site_name+"/"+server_group_name+"/"+service_name+"/krpInboundRules/"+krp_rule["gateway_group"]+"/"+krp_rule["gateway_krp_alias_name"]+"/"+krp_rule["krp_inbound_port"],"POST",json.dumps(krp_rule["krpRules"]))
if __name__ == '__main__':
run()
| true | true |
f7fae543a4087e8a07b08590ae381482a421133e | 475 | py | Python | meiduo_mall/apps/oauth/models.py | joinik/meiduo_mall | 08da8e789941ee0bb4d9cd658c4faaa4f9f0f67a | [
"MIT"
] | null | null | null | meiduo_mall/apps/oauth/models.py | joinik/meiduo_mall | 08da8e789941ee0bb4d9cd658c4faaa4f9f0f67a | [
"MIT"
] | null | null | null | meiduo_mall/apps/oauth/models.py | joinik/meiduo_mall | 08da8e789941ee0bb4d9cd658c4faaa4f9f0f67a | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
from django.db import models
from utils.myModles import BaseModel
class OAuthQQUser(BaseModel):
"""QQ登录用户数据"""
user = models.ForeignKey('users.User', on_delete=models.CASCADE, verbose_name='用户',)
openid = models.CharField(max_length=64, verbose_name='openid', db_index=True)
class Meta:
db_table = 'tb_oauth_qq'
verbose_name = 'QQ登录用户数据'
verbose_name_plural = verbose_name
| 25 | 88 | 0.715789 | from django.db import models
from django.db import models
from utils.myModles import BaseModel
class OAuthQQUser(BaseModel):
user = models.ForeignKey('users.User', on_delete=models.CASCADE, verbose_name='用户',)
openid = models.CharField(max_length=64, verbose_name='openid', db_index=True)
class Meta:
db_table = 'tb_oauth_qq'
verbose_name = 'QQ登录用户数据'
verbose_name_plural = verbose_name
| true | true |
f7fae54bde4fb44979f1451662ee97c93c65d8c6 | 1,592 | py | Python | upload/oss.py | princewang1994/markdown_image_uploader | a2d6869d3f7b3adf3f3fef458744ed488203403e | [
"MIT"
] | 1 | 2018-11-25T09:02:31.000Z | 2018-11-25T09:02:31.000Z | upload/oss.py | princewang1994/markdown_image_uploader | a2d6869d3f7b3adf3f3fef458744ed488203403e | [
"MIT"
] | null | null | null | upload/oss.py | princewang1994/markdown_image_uploader | a2d6869d3f7b3adf3f3fef458744ed488203403e | [
"MIT"
] | null | null | null | import json
import oss2
import os
def read_config():
config_dir = os.path.join(os.environ['HOME'], '.oss')
config_path = os.path.join(config_dir, 'oss.conf')
# if config file not exist, make blank one
if not os.path.exists(config_path):
print('Config file not found, making blank config file to `{}`'.format(config_path))
if not os.path.exists(config_dir):
os.mkdir(config_dir)
make_blank_config(config_path)
config = json.load(open(config_path))
return config
def make_blank_config(config_file):
config = {
'endpoint': 'Your Endpoint(Without http://)',
'ak': 'Your AccessKey',
'sk': 'Your SecretKey',
'bucket': 'Your Bucket Name'
}
text = json.dumps(config, indent=4)
with open(config_file, 'w') as f:
f.write(text)
def upload_to_oss(filename, path, config):
"""
Params:
filename(str): filename in oss
path(str): local file path
config: oss config
Returns:
object url
"""
# authentication
ak = config['ak']
sk = config['sk']
endpoint = config['endpoint']
bucket_name = config['bucket']
auth = oss2.Auth(ak, sk)
bucket = oss2.Bucket(auth, 'http://' + endpoint, bucket_name)
try:
result = bucket.put_object_from_file(filename, path)
if result.status != 200:
raise Exception
# generate url
url = 'http://{}.{}/{}'.format(bucket_name, endpoint, filename)
return url
except:
print('Upload {} failed.'.format(path))
raise
| 27.929825 | 92 | 0.601759 | import json
import oss2
import os
def read_config():
config_dir = os.path.join(os.environ['HOME'], '.oss')
config_path = os.path.join(config_dir, 'oss.conf')
if not os.path.exists(config_path):
print('Config file not found, making blank config file to `{}`'.format(config_path))
if not os.path.exists(config_dir):
os.mkdir(config_dir)
make_blank_config(config_path)
config = json.load(open(config_path))
return config
def make_blank_config(config_file):
config = {
'endpoint': 'Your Endpoint(Without http://)',
'ak': 'Your AccessKey',
'sk': 'Your SecretKey',
'bucket': 'Your Bucket Name'
}
text = json.dumps(config, indent=4)
with open(config_file, 'w') as f:
f.write(text)
def upload_to_oss(filename, path, config):
ak = config['ak']
sk = config['sk']
endpoint = config['endpoint']
bucket_name = config['bucket']
auth = oss2.Auth(ak, sk)
bucket = oss2.Bucket(auth, 'http://' + endpoint, bucket_name)
try:
result = bucket.put_object_from_file(filename, path)
if result.status != 200:
raise Exception
url = 'http://{}.{}/{}'.format(bucket_name, endpoint, filename)
return url
except:
print('Upload {} failed.'.format(path))
raise
| true | true |
f7fae5b290f5c4d3278b95aa4fa4e6caeea20959 | 3,257 | py | Python | tests/functional/gtcs/test_gtcs_proc_isql_10.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2022-02-05T11:37:13.000Z | 2022-02-05T11:37:13.000Z | tests/functional/gtcs/test_gtcs_proc_isql_10.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-09-03T11:47:00.000Z | 2021-09-03T12:42:10.000Z | tests/functional/gtcs/test_gtcs_proc_isql_10.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-06-30T14:14:16.000Z | 2021-06-30T14:14:16.000Z | #coding:utf-8
#
# id: functional.gtcs.gtcs_proc_isql_10
# title: gtcs-proc-isql-10
# decription:
# Original test see in:
# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROC_ISQL_10.script
# SQL script for creating test database ('gtcs_sp1.fbk') and fill it with some data:
# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/PROCS_QA_INIT_ISQL.script
# Checked on: 4.0.0.1803 SS; 3.0.6.33265 SS; 2.5.9.27149 SC.
#
# tracker_id:
# min_versions: ['2.5.0']
# versions: 2.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5
# resources: None
substitutions_1 = [('=', ''), ('[ \t]+', ' ')]
init_script_1 = """"""
db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1)
test_script_1 = """
set bail on;
set term ^;
create procedure proc10 returns( a varchar(20), b varchar(5), c integer) as
begin
for
select pname, color, weight
from p where color = 'Red'
order by weight
into :a,:b,:c
do
suspend;
end
^
set term ;^
set count on;
execute procedure proc10 ;
select 'point-1' msg, p.* from proc10 p;
select 'point-2' msg, max(p.a) from proc10 p;
select 'point-3' msg, p.c from proc10 p;
select 'point-4' msg, p.a, p.c from proc10 p order by p.a;
select 'point-5' msg, p.a, avg(p.c) from proc10 p group by p.a having avg(p.c) > 15;
select 'point-6' msg, p.a, avg(p.c) from proc10 p group by p.a;
select 'point-7' msg, p.a, p.c from proc10 p where p.c > (select avg(x.c) from proc10 x);
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
A B C
Nut Red 12
MSG A B C
point-1 Nut Red 12
point-1 Screw Red 14
point-1 Cog Red 19
Records affected: 3
MSG MAX
point-2 Screw
Records affected: 1
MSG C
point-3 12
point-3 14
point-3 19
Records affected: 3
MSG A C
point-4 Cog 19
point-4 Nut 12
point-4 Screw 14
Records affected: 3
MSG A AVG
point-5 Cog 19
Records affected: 1
MSG A AVG
point-6 Cog 19
point-6 Nut 12
point-6 Screw 14
Records affected: 3
MSG A C
point-7 Cog 19
Records affected: 1
"""
@pytest.mark.version('>=2.5')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| 30.726415 | 109 | 0.480811 |
import pytest
from firebird.qa import db_factory, isql_act, Action
substitutions_1 = [('=', ''), ('[ \t]+', ' ')]
init_script_1 = """"""
db_1 = db_factory(from_backup='gtcs_sp1.fbk', init=init_script_1)
test_script_1 = """
set bail on;
set term ^;
create procedure proc10 returns( a varchar(20), b varchar(5), c integer) as
begin
for
select pname, color, weight
from p where color = 'Red'
order by weight
into :a,:b,:c
do
suspend;
end
^
set term ;^
set count on;
execute procedure proc10 ;
select 'point-1' msg, p.* from proc10 p;
select 'point-2' msg, max(p.a) from proc10 p;
select 'point-3' msg, p.c from proc10 p;
select 'point-4' msg, p.a, p.c from proc10 p order by p.a;
select 'point-5' msg, p.a, avg(p.c) from proc10 p group by p.a having avg(p.c) > 15;
select 'point-6' msg, p.a, avg(p.c) from proc10 p group by p.a;
select 'point-7' msg, p.a, p.c from proc10 p where p.c > (select avg(x.c) from proc10 x);
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
A B C
Nut Red 12
MSG A B C
point-1 Nut Red 12
point-1 Screw Red 14
point-1 Cog Red 19
Records affected: 3
MSG MAX
point-2 Screw
Records affected: 1
MSG C
point-3 12
point-3 14
point-3 19
Records affected: 3
MSG A C
point-4 Cog 19
point-4 Nut 12
point-4 Screw 14
Records affected: 3
MSG A AVG
point-5 Cog 19
Records affected: 1
MSG A AVG
point-6 Cog 19
point-6 Nut 12
point-6 Screw 14
Records affected: 3
MSG A C
point-7 Cog 19
Records affected: 1
"""
@pytest.mark.version('>=2.5')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| true | true |
f7fae5da27627ff16f9469c14ed68b2ee88aca8c | 11,860 | py | Python | instapy/comment_util.py | kofway/InstaPy | fecc59de48395da55d74fafa93344710ef7d29fd | [
"MIT"
] | null | null | null | instapy/comment_util.py | kofway/InstaPy | fecc59de48395da55d74fafa93344710ef7d29fd | [
"MIT"
] | null | null | null | instapy/comment_util.py | kofway/InstaPy | fecc59de48395da55d74fafa93344710ef7d29fd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""" Module which handles the commenting features """
import random
import emoji
from .time_util import sleep
from .util import update_activity
from .util import add_user_to_blacklist
from .util import click_element
from .util import get_action_delay
from .util import explicit_wait
from .util import extract_text_from_element
from .util import web_address_navigator
from .like_util import get_media_edge_comment_string
from .quota_supervisor import quota_supervisor
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import InvalidElementStateException
from selenium.common.exceptions import NoSuchElementException
from .xpath import read_xpath
def get_comment_input(browser):
comment_input = browser.find_elements_by_xpath(
read_xpath(get_comment_input.__name__,"comment_input"))
if len(comment_input) <= 0:
comment_input = browser.find_elements_by_xpath(
read_xpath(get_comment_input.__name__,"placeholder"))
return comment_input
def open_comment_section(browser, logger):
missing_comment_elem_warning = (
"--> Comment Button Not Found!"
"\t~may cause issues with browser windows of smaller widths")
comment_elem = browser.find_elements_by_xpath(
read_xpath(open_comment_section.__name__,"comment_elem"))
if len(comment_elem) > 0:
try:
click_element(browser, comment_elem[0])
except WebDriverException:
logger.warning(missing_comment_elem_warning)
else:
logger.warning(missing_comment_elem_warning)
def comment_image(browser, username, comments, blacklist, logger, logfolder):
"""Checks if it should comment on the image"""
# check action availability
if quota_supervisor('comments') == 'jump':
return False, "jumped"
rand_comment = (random.choice(comments).format(username))
rand_comment = emoji.demojize(rand_comment)
rand_comment = emoji.emojize(rand_comment, use_aliases=True)
open_comment_section(browser, logger)
comment_input = get_comment_input(browser)
try:
if len(comment_input) > 0:
comment_input[0].clear()
comment_input = get_comment_input(browser)
# below, an extra space is added to force
# the input box to update the reactJS core
comment_to_be_sent = rand_comment + ' '
browser.execute_script(
"arguments[0].value = arguments[1];",
comment_input[0], comment_to_be_sent)
# below, it also will remove that extra space added above
# COS '\b' is a backspace char in ASCII
comment_input[0].send_keys('\b')
comment_input = get_comment_input(browser)
comment_input[0].submit()
update_activity('comments')
if blacklist['enabled'] is True:
action = 'commented'
add_user_to_blacklist(username,
blacklist['campaign'],
action,
logger,
logfolder)
else:
logger.warning("--> Comment Action Likely Failed!"
"\t~comment Element was not found")
return False, "commenting disabled"
except InvalidElementStateException:
logger.warning("--> Comment Action Likely Failed!"
"\t~encountered `InvalidElementStateException` :/")
return False, "invalid element state"
logger.info("--> Commented: {}".format(rand_comment.encode('utf-8')))
# get the post-comment delay time to sleep
naply = get_action_delay("comment")
sleep(naply)
return True, "success"
def verify_commenting(browser, max, min, mand_words, logger):
"""
Get the amount of existing existing comments and
compare it against max & min values defined by user
"""
commenting_state, msg = is_commenting_enabled(browser, logger)
if commenting_state is not True:
disapproval_reason = "--> Not commenting! {}".format(msg)
return False, disapproval_reason
comments_count, msg = get_comments_count(browser, logger)
if comments_count is None:
disapproval_reason = "--> Not commenting! {}".format(msg)
return False, disapproval_reason
if max is not None and comments_count > max:
disapproval_reason = (
"Not commented on this post! ~more comments exist"
" off maximum limit at {}"
.format(comments_count))
return False, disapproval_reason
elif min is not None and comments_count < min:
disapproval_reason = (
"Not commented on this post! ~less comments exist"
" off minumum limit at {}"
.format(comments_count))
return False, disapproval_reason
if len(mand_words) != 0:
try:
post_desc = browser.execute_script(
"return window._sharedData.entry_data."
"PostPage[0].graphql.shortcode_media."
"edge_media_to_caption.edges[0]['node']['text']"
).lower()
except Exception:
post_desc = None
try:
first_comment = browser.execute_script(
"return window._sharedData.entry_data."
"PostPage[0].graphql.shortcode_media."
"edge_media_to_comment.edges[0]['node']['text']"
).lower()
except Exception:
first_comment = None
if ((post_desc is not None and not any(mand_word.lower() in
post_desc for mand_word in
mand_words)) or
(first_comment is not None and not any(
mand_word.lower() in first_comment for
mand_word in mand_words))):
return False, 'mandantory words not in post desc'
if (post_desc is None and first_comment is None):
return False, "couldn't get post description and comments"
return True, 'Approval'
def get_comments_on_post(browser,
owner,
poster,
amount,
post_link,
ignore_users,
randomize,
logger):
""" Fetch comments data on posts """
web_address_navigator(browser, post_link)
orig_amount = amount
if randomize is True:
amount = amount * 3
# check if commenting on the post is enabled
commenting_state, msg = is_commenting_enabled(browser, logger)
if commenting_state is not True:
logger.info(msg)
return None
# check if there are any comments in the post
comments_count, msg = get_comments_count(browser, logger)
if not comments_count:
logger.info(msg)
return None
# get comments & commenters information
comments_block_XPath = read_xpath(get_comments_on_post.__name__,"comments_block") # efficient location
# path
like_button_full_XPath = read_xpath(get_comments_on_post.__name__,"like_button_full_XPath")
unlike_button_full_XPath = read_xpath(get_comments_on_post.__name__,"unlike_button_full_XPath")
comments = []
commenters = []
# wait for page fully load [IMPORTANT!]
explicit_wait(browser, "PFL", [], logger, 10)
try:
all_comment_like_buttons = browser.find_elements_by_xpath(
like_button_full_XPath)
if all_comment_like_buttons:
comments_block = browser.find_elements_by_xpath(
comments_block_XPath)
for comment_line in comments_block:
commenter_elem = comment_line.find_element_by_xpath(read_xpath(get_comments_on_post.__name__,"commenter_elem"))
commenter = extract_text_from_element(commenter_elem)
if (commenter and
commenter not in [owner, poster, ignore_users] and
commenter not in commenters):
commenters.append(commenter)
else:
continue
comment_elem = comment_line.find_elements_by_tag_name(
"span")[0]
comment = extract_text_from_element(comment_elem)
if comment:
comments.append(comment)
else:
commenters.remove(commenters[-1])
continue
else:
comment_unlike_buttons = browser.find_elements_by_xpath(
unlike_button_full_XPath)
if comment_unlike_buttons:
logger.info("There are {} comments on this post and all "
"of them are already liked."
.format(len(comment_unlike_buttons)))
else:
logger.info(
"There are no any comments available on this post.")
return None
except NoSuchElementException:
logger.info("Failed to get comments on this post.")
return None
if not comments:
logger.info("Could not grab any usable comments from this post..")
return None
else:
comment_data = list(zip(commenters, comments))
if randomize is True:
random.shuffle(comment_data)
if len(comment_data) < orig_amount:
logger.info("Could grab only {} usable comments from this post.."
.format(len(comment_data)))
else:
logger.info("Grabbed {} usable comments from this post.."
.format(len(comment_data)))
return comment_data
def is_commenting_enabled(browser, logger):
""" Find out if commenting on the post is enabled """
try:
comments_disabled = browser.execute_script(
"return window._sharedData.entry_data."
"PostPage[0].graphql.shortcode_media.comments_disabled")
except WebDriverException:
try:
browser.execute_script("location.reload()")
update_activity()
comments_disabled = browser.execute_script(
"return window._sharedData.entry_data."
"PostPage[0].graphql.shortcode_media.comments_disabled")
except Exception as e:
msg = ("Failed to check comments' status for verification!\n\t{}"
.format(str(e).encode("utf-8")))
return False, "Failure"
if comments_disabled is True:
msg = "Comments are disabled for this post."
return False, msg
return True, "Success"
def get_comments_count(browser, logger):
""" Get the number of total comments in the post """
try:
media = browser.execute_script(
"return window._sharedData.entry_data."
"PostPage[0].graphql.shortcode_media")
media_edge_string = get_media_edge_comment_string(media)
comments_count = media[media_edge_string]['count']
except Exception as e:
try:
comments_count = browser.execute_script(
"return window._sharedData.entry_data."
"PostPage[0].graphql.shortcode_media."
"edge_media_preview_comment.count")
except Exception as e:
msg = ("Failed to get comments' count!\n\t{}"
.format(str(e).encode("utf-8")))
return None, msg
if not comments_count:
if comments_count == 0:
msg = "There are no any comments in the post."
return 0, msg
else:
msg = "Couldn't get comments' count."
return None, msg
return comments_count, "Success"
| 35.297619 | 127 | 0.611804 |
import random
import emoji
from .time_util import sleep
from .util import update_activity
from .util import add_user_to_blacklist
from .util import click_element
from .util import get_action_delay
from .util import explicit_wait
from .util import extract_text_from_element
from .util import web_address_navigator
from .like_util import get_media_edge_comment_string
from .quota_supervisor import quota_supervisor
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import InvalidElementStateException
from selenium.common.exceptions import NoSuchElementException
from .xpath import read_xpath
def get_comment_input(browser):
comment_input = browser.find_elements_by_xpath(
read_xpath(get_comment_input.__name__,"comment_input"))
if len(comment_input) <= 0:
comment_input = browser.find_elements_by_xpath(
read_xpath(get_comment_input.__name__,"placeholder"))
return comment_input
def open_comment_section(browser, logger):
missing_comment_elem_warning = (
"--> Comment Button Not Found!"
"\t~may cause issues with browser windows of smaller widths")
comment_elem = browser.find_elements_by_xpath(
read_xpath(open_comment_section.__name__,"comment_elem"))
if len(comment_elem) > 0:
try:
click_element(browser, comment_elem[0])
except WebDriverException:
logger.warning(missing_comment_elem_warning)
else:
logger.warning(missing_comment_elem_warning)
def comment_image(browser, username, comments, blacklist, logger, logfolder):
if quota_supervisor('comments') == 'jump':
return False, "jumped"
rand_comment = (random.choice(comments).format(username))
rand_comment = emoji.demojize(rand_comment)
rand_comment = emoji.emojize(rand_comment, use_aliases=True)
open_comment_section(browser, logger)
comment_input = get_comment_input(browser)
try:
if len(comment_input) > 0:
comment_input[0].clear()
comment_input = get_comment_input(browser)
comment_to_be_sent = rand_comment + ' '
browser.execute_script(
"arguments[0].value = arguments[1];",
comment_input[0], comment_to_be_sent)
comment_input[0].send_keys('\b')
comment_input = get_comment_input(browser)
comment_input[0].submit()
update_activity('comments')
if blacklist['enabled'] is True:
action = 'commented'
add_user_to_blacklist(username,
blacklist['campaign'],
action,
logger,
logfolder)
else:
logger.warning("--> Comment Action Likely Failed!"
"\t~comment Element was not found")
return False, "commenting disabled"
except InvalidElementStateException:
logger.warning("--> Comment Action Likely Failed!"
"\t~encountered `InvalidElementStateException` :/")
return False, "invalid element state"
logger.info("--> Commented: {}".format(rand_comment.encode('utf-8')))
naply = get_action_delay("comment")
sleep(naply)
return True, "success"
def verify_commenting(browser, max, min, mand_words, logger):
commenting_state, msg = is_commenting_enabled(browser, logger)
if commenting_state is not True:
disapproval_reason = "--> Not commenting! {}".format(msg)
return False, disapproval_reason
comments_count, msg = get_comments_count(browser, logger)
if comments_count is None:
disapproval_reason = "--> Not commenting! {}".format(msg)
return False, disapproval_reason
if max is not None and comments_count > max:
disapproval_reason = (
"Not commented on this post! ~more comments exist"
" off maximum limit at {}"
.format(comments_count))
return False, disapproval_reason
elif min is not None and comments_count < min:
disapproval_reason = (
"Not commented on this post! ~less comments exist"
" off minumum limit at {}"
.format(comments_count))
return False, disapproval_reason
if len(mand_words) != 0:
try:
post_desc = browser.execute_script(
"return window._sharedData.entry_data."
"PostPage[0].graphql.shortcode_media."
"edge_media_to_caption.edges[0]['node']['text']"
).lower()
except Exception:
post_desc = None
try:
first_comment = browser.execute_script(
"return window._sharedData.entry_data."
"PostPage[0].graphql.shortcode_media."
"edge_media_to_comment.edges[0]['node']['text']"
).lower()
except Exception:
first_comment = None
if ((post_desc is not None and not any(mand_word.lower() in
post_desc for mand_word in
mand_words)) or
(first_comment is not None and not any(
mand_word.lower() in first_comment for
mand_word in mand_words))):
return False, 'mandantory words not in post desc'
if (post_desc is None and first_comment is None):
return False, "couldn't get post description and comments"
return True, 'Approval'
def get_comments_on_post(browser,
owner,
poster,
amount,
post_link,
ignore_users,
randomize,
logger):
web_address_navigator(browser, post_link)
orig_amount = amount
if randomize is True:
amount = amount * 3
# check if commenting on the post is enabled
commenting_state, msg = is_commenting_enabled(browser, logger)
if commenting_state is not True:
logger.info(msg)
return None
# check if there are any comments in the post
comments_count, msg = get_comments_count(browser, logger)
if not comments_count:
logger.info(msg)
return None
# get comments & commenters information
comments_block_XPath = read_xpath(get_comments_on_post.__name__,"comments_block") # efficient location
# path
like_button_full_XPath = read_xpath(get_comments_on_post.__name__,"like_button_full_XPath")
unlike_button_full_XPath = read_xpath(get_comments_on_post.__name__,"unlike_button_full_XPath")
comments = []
commenters = []
# wait for page fully load [IMPORTANT!]
explicit_wait(browser, "PFL", [], logger, 10)
try:
all_comment_like_buttons = browser.find_elements_by_xpath(
like_button_full_XPath)
if all_comment_like_buttons:
comments_block = browser.find_elements_by_xpath(
comments_block_XPath)
for comment_line in comments_block:
commenter_elem = comment_line.find_element_by_xpath(read_xpath(get_comments_on_post.__name__,"commenter_elem"))
commenter = extract_text_from_element(commenter_elem)
if (commenter and
commenter not in [owner, poster, ignore_users] and
commenter not in commenters):
commenters.append(commenter)
else:
continue
comment_elem = comment_line.find_elements_by_tag_name(
"span")[0]
comment = extract_text_from_element(comment_elem)
if comment:
comments.append(comment)
else:
commenters.remove(commenters[-1])
continue
else:
comment_unlike_buttons = browser.find_elements_by_xpath(
unlike_button_full_XPath)
if comment_unlike_buttons:
logger.info("There are {} comments on this post and all "
"of them are already liked."
.format(len(comment_unlike_buttons)))
else:
logger.info(
"There are no any comments available on this post.")
return None
except NoSuchElementException:
logger.info("Failed to get comments on this post.")
return None
if not comments:
logger.info("Could not grab any usable comments from this post..")
return None
else:
comment_data = list(zip(commenters, comments))
if randomize is True:
random.shuffle(comment_data)
if len(comment_data) < orig_amount:
logger.info("Could grab only {} usable comments from this post.."
.format(len(comment_data)))
else:
logger.info("Grabbed {} usable comments from this post.."
.format(len(comment_data)))
return comment_data
def is_commenting_enabled(browser, logger):
try:
comments_disabled = browser.execute_script(
"return window._sharedData.entry_data."
"PostPage[0].graphql.shortcode_media.comments_disabled")
except WebDriverException:
try:
browser.execute_script("location.reload()")
update_activity()
comments_disabled = browser.execute_script(
"return window._sharedData.entry_data."
"PostPage[0].graphql.shortcode_media.comments_disabled")
except Exception as e:
msg = ("Failed to check comments' status for verification!\n\t{}"
.format(str(e).encode("utf-8")))
return False, "Failure"
if comments_disabled is True:
msg = "Comments are disabled for this post."
return False, msg
return True, "Success"
def get_comments_count(browser, logger):
try:
media = browser.execute_script(
"return window._sharedData.entry_data."
"PostPage[0].graphql.shortcode_media")
media_edge_string = get_media_edge_comment_string(media)
comments_count = media[media_edge_string]['count']
except Exception as e:
try:
comments_count = browser.execute_script(
"return window._sharedData.entry_data."
"PostPage[0].graphql.shortcode_media."
"edge_media_preview_comment.count")
except Exception as e:
msg = ("Failed to get comments' count!\n\t{}"
.format(str(e).encode("utf-8")))
return None, msg
if not comments_count:
if comments_count == 0:
msg = "There are no any comments in the post."
return 0, msg
else:
msg = "Couldn't get comments' count."
return None, msg
return comments_count, "Success"
| true | true |
f7fae696115bcd6de53129321c3c07b9f6603efc | 2,722 | py | Python | cnocr/cnocr/hyperparams/cn_hyperparams.py | DCMMC/chineseocr | 0b8772615239ea7f212b1ab5bc75183e7e9f16b0 | [
"MIT"
] | null | null | null | cnocr/cnocr/hyperparams/cn_hyperparams.py | DCMMC/chineseocr | 0b8772615239ea7f212b1ab5bc75183e7e9f16b0 | [
"MIT"
] | null | null | null | cnocr/cnocr/hyperparams/cn_hyperparams.py | DCMMC/chineseocr | 0b8772615239ea7f212b1ab5bc75183e7e9f16b0 | [
"MIT"
] | null | null | null | from __future__ import print_function
class CnHyperparams(object):
"""
Hyperparameters for LSTM network
"""
def __init__(self):
# Training hyper parameters
# self._train_epoch_size = 2560000
# self._eval_epoch_size = 3000
self._num_epoch = 20
self.optimizer = "Adam"
self._learning_rate = 0.001
self.wd = 0.00001
self.clip_gradient = None # `None`: don't use clip gradient
# self._momentum = 0.9
# self._bn_mom = 0.9
# self._workspace = 512
self._batch_size = 128
self._num_classes = 6426 # 应该是6426的。。 5990
# self._img_width = 280
# 20 chinese chars with 560 width
self._img_width = 560
self._img_height = 32
# LSTM hyper parameters
self.seq_model_type = 'lstm'
self._num_hidden = 100
self._num_lstm_layer = 2
# 模型对于图片宽度压缩的比例(模型中的卷积层造成的);由模型决定,不同模型不一样
self.seq_len_cmpr_ratio = None
# 序列长度;由模型决定,不同模型不一样
self._seq_length = None
self._num_label = 20
self._drop_out = 0.5
def __repr__(self):
return str(self.__dict__)
def set_seq_length(self, seq_len):
self._seq_length = seq_len
# @property
# def train_epoch_size(self):
# return self._train_epoch_size
#
# @property
# def eval_epoch_size(self):
# return self._eval_epoch_size
@property
def num_epoch(self):
return self._num_epoch
@property
def learning_rate(self):
return self._learning_rate
@property
def momentum(self):
return self._momentum
# @property
# def bn_mom(self):
# return self._bn_mom
#
# @property
# def workspace(self):
# return self._workspace
@property
def loss_type(self):
return "ctc"
@property
def batch_size(self):
return self._batch_size
@property
def num_classes(self):
return self._num_classes
@property
def img_width(self):
return self._img_width
@property
def img_height(self):
return self._img_height
@property
def depth(self):
return self._depth
@property
def growrate(self):
return self._growrate
@property
def reduction(self):
return self._reduction
@property
def num_hidden(self):
return self._num_hidden
@property
def num_lstm_layer(self):
return self._num_lstm_layer
@property
def seq_length(self):
return self._seq_length
@property
def num_label(self):
return self._num_label
@property
def dropout(self):
return self._drop_out
| 21.776 | 68 | 0.608376 | from __future__ import print_function
class CnHyperparams(object):
def __init__(self):
self._num_epoch = 20
self.optimizer = "Adam"
self._learning_rate = 0.001
self.wd = 0.00001
self.clip_gradient = None
# self._momentum = 0.9
# self._bn_mom = 0.9
# self._workspace = 512
self._batch_size = 128
self._num_classes = 6426 # 应该是6426的。。 5990
# self._img_width = 280
# 20 chinese chars with 560 width
self._img_width = 560
self._img_height = 32
# LSTM hyper parameters
self.seq_model_type = 'lstm'
self._num_hidden = 100
self._num_lstm_layer = 2
# 模型对于图片宽度压缩的比例(模型中的卷积层造成的);由模型决定,不同模型不一样
self.seq_len_cmpr_ratio = None
# 序列长度;由模型决定,不同模型不一样
self._seq_length = None
self._num_label = 20
self._drop_out = 0.5
def __repr__(self):
return str(self.__dict__)
def set_seq_length(self, seq_len):
self._seq_length = seq_len
# @property
# def train_epoch_size(self):
# return self._train_epoch_size
#
# @property
# def eval_epoch_size(self):
# return self._eval_epoch_size
@property
def num_epoch(self):
return self._num_epoch
@property
def learning_rate(self):
return self._learning_rate
@property
def momentum(self):
return self._momentum
# @property
# def bn_mom(self):
# return self._bn_mom
#
# @property
# def workspace(self):
# return self._workspace
@property
def loss_type(self):
return "ctc"
@property
def batch_size(self):
return self._batch_size
@property
def num_classes(self):
return self._num_classes
@property
def img_width(self):
return self._img_width
@property
def img_height(self):
return self._img_height
@property
def depth(self):
return self._depth
@property
def growrate(self):
return self._growrate
@property
def reduction(self):
return self._reduction
@property
def num_hidden(self):
return self._num_hidden
@property
def num_lstm_layer(self):
return self._num_lstm_layer
@property
def seq_length(self):
return self._seq_length
@property
def num_label(self):
return self._num_label
@property
def dropout(self):
return self._drop_out
| true | true |
f7fae754dc0aea9851d4c62e59ef1c34ba7a12f2 | 1,485 | py | Python | Loops.py | ZareefWG/file | 5dd86278475fcd901407f300747bac700b947572 | [
"BSD-2-Clause"
] | null | null | null | Loops.py | ZareefWG/file | 5dd86278475fcd901407f300747bac700b947572 | [
"BSD-2-Clause"
] | null | null | null | Loops.py | ZareefWG/file | 5dd86278475fcd901407f300747bac700b947572 | [
"BSD-2-Clause"
] | null | null | null | variable = input ()
for x in variable:
print (x)
fruits = ["apple","banana", "cherry"]
for x in fruits:
if x == "banana":
continue
print (x)
for x in "Nazrul":
print (x)
for x in range (20,41,2):
print (x)
for x in "Nazrul":
print (x)
if x== "z":
break
for x in "Nazrul":
if x== "z":
break
print (x)
for x in range (0,100,5):
if x== 80:
break
print (x)
for num in range (20,100,2):
if num == 81:
break
print (num)
n = 0
for x in "Nazrul":
print (x)
if x== "z":
break
for i in range(20, 100, 2):
break
print(i)
for x in range(0,101,10):
if x > 10 and x <100:
continue
print (x)
for x in range (11):
print (x)
num = int(input())
for x in range (num):
print (x)
if x == num - 1:
print ("Its done")
num = int(input())
for x in range (num):
print (x)
else:
print ("Its done")
num = int(input())
for x in range (num):
print (x)
else:
print (x+1)
for x in ("ABCEFGHIJKLMNOPQRSTUVWXYZ"):
for y in range (1,11):
print (y,x)
for x in range(0,101,10):
if x > 10 and x <100:
continue
print (x)
i = 0
while i < 7:
print (i)
i = 0
while i < 7:
i = i + 1
print (i)
i = 0
while i < 7:
i = i + 1
print (i)
i = 0
while i < 7:
print (i)
if i == 4:
break
i = i + 1
i = 0
while i < 7:
i = i + 1
if i == 4:
continue
print (i) | 12.478992 | 39 | 0.482828 | variable = input ()
for x in variable:
print (x)
fruits = ["apple","banana", "cherry"]
for x in fruits:
if x == "banana":
continue
print (x)
for x in "Nazrul":
print (x)
for x in range (20,41,2):
print (x)
for x in "Nazrul":
print (x)
if x== "z":
break
for x in "Nazrul":
if x== "z":
break
print (x)
for x in range (0,100,5):
if x== 80:
break
print (x)
for num in range (20,100,2):
if num == 81:
break
print (num)
n = 0
for x in "Nazrul":
print (x)
if x== "z":
break
for i in range(20, 100, 2):
break
print(i)
for x in range(0,101,10):
if x > 10 and x <100:
continue
print (x)
for x in range (11):
print (x)
num = int(input())
for x in range (num):
print (x)
if x == num - 1:
print ("Its done")
num = int(input())
for x in range (num):
print (x)
else:
print ("Its done")
num = int(input())
for x in range (num):
print (x)
else:
print (x+1)
for x in ("ABCEFGHIJKLMNOPQRSTUVWXYZ"):
for y in range (1,11):
print (y,x)
for x in range(0,101,10):
if x > 10 and x <100:
continue
print (x)
i = 0
while i < 7:
print (i)
i = 0
while i < 7:
i = i + 1
print (i)
i = 0
while i < 7:
i = i + 1
print (i)
i = 0
while i < 7:
print (i)
if i == 4:
break
i = i + 1
i = 0
while i < 7:
i = i + 1
if i == 4:
continue
print (i) | true | true |
f7fae762f4cb88518cafe5a28a193b4c2c4fe552 | 4,006 | py | Python | src/models.py | zhao-tong/DeepFD-pyTorch | dc4260af3beabef097a0d3ae3eb926feb21b4793 | [
"MIT"
] | 21 | 2019-07-24T19:43:17.000Z | 2022-03-29T03:34:24.000Z | src/models.py | JiaWu-Repository/DeepFD-pyTorch | 584c00d5814d6803be45433905a4140a6bd4fd18 | [
"MIT"
] | null | null | null | src/models.py | JiaWu-Repository/DeepFD-pyTorch | 584c00d5814d6803be45433905a4140a6bd4fd18 | [
"MIT"
] | 47 | 2020-07-06T01:12:11.000Z | 2020-12-02T11:55:14.000Z | __author__ = 'Tong Zhao'
__email__ = 'tzhao2@nd.edu'
import os
import sys
import copy
import torch
import random
import numpy as np
from scipy.sparse import csr_matrix
import torch.nn as nn
import torch.nn.functional as F
class Classification(nn.Module):
def __init__(self, emb_size):
super(Classification, self).__init__()
self.fc1 = nn.Linear(emb_size, 64)
self.fc2 = nn.Linear(64, 2)
def init_params(self):
for param in self.parameters():
if len(param.size()) == 2:
nn.init.xavier_uniform_(param)
else:
# initialize all bias as zeros
nn.init.constant_(param, 0.0)
def forward(self, embeds):
x = F.elu_(self.fc1(embeds))
x = F.elu_(self.fc2(x))
logists = torch.log_softmax(x, 1)
return logists
class DeepFD(nn.Module):
def __init__(self, features, feat_size, hidden_size, emb_size):
super(DeepFD, self).__init__()
self.features = features
self.fc1 = nn.Linear(feat_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, emb_size)
self.fc3 = nn.Linear(emb_size, hidden_size)
self.fc4 = nn.Linear(hidden_size, feat_size)
def init_params(self):
for param in self.parameters():
if len(param.size()) == 2:
nn.init.xavier_uniform_(param)
else:
# initialize all bias as zeros
nn.init.constant_(param, 0.0)
def forward(self, nodes_batch):
feats = self.features[nodes_batch]
x_en = F.relu_(self.fc1(feats))
embs = F.relu_(self.fc2(x_en))
x_de = F.relu_(self.fc3(embs))
recon = F.relu_(self.fc4(x_de))
return embs, recon
class Loss_DeepFD():
def __init__(self, features, graph_simi, device, alpha, beta, gamma):
self.features = features
self.graph_simi = graph_simi
self.device = device
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.node_pairs = {}
self.original_nodes_batch = None
self.extended_nodes_batch = None
def extend_nodes(self, nodes_batch, training_cps):
self.original_nodes_batch = copy.deepcopy(nodes_batch)
self.node_pairs = {}
self.extended_nodes_batch = set(nodes_batch)
for node in nodes_batch:
cps = training_cps[node]
self.node_pairs[node] = cps
for cp in cps:
self.extended_nodes_batch.add(cp[1])
self.extended_nodes_batch = list(self.extended_nodes_batch)
return self.extended_nodes_batch
def get_loss(self, nodes_batch, embs_batch, recon_batch):
# calculate loss_simi and loss+recon,
# loss_reg is included in SGD optimizer as weight_decay
loss_recon = self.get_loss_recon(nodes_batch, recon_batch)
loss_simi = self.get_loss_simi(embs_batch)
loss = loss_recon + self.alpha * loss_simi
return loss
def get_loss_simi(self, embs_batch):
node2index = {n:i for i,n in enumerate(self.extended_nodes_batch)}
simi_feat = []
simi_embs = []
for node, cps in self.node_pairs.items():
for i, j in cps:
simi_feat.append(torch.FloatTensor([self.graph_simi[i, j]]))
dis_ij = (embs_batch[node2index[i]] - embs_batch[node2index[j]]) ** 2
dis_ij = torch.exp(-dis_ij.sum())
simi_embs.append(dis_ij.view(1))
simi_feat = torch.cat(simi_feat, 0).to(self.device)
simi_embs = torch.cat(simi_embs, 0)
L = simi_feat * ((simi_embs - simi_feat) ** 2)
return L.mean()
def get_loss_recon(self, nodes_batch, recon_batch):
feats_batch = self.features[nodes_batch]
H_batch = (feats_batch * (self.beta - 1)) + 1
assert feats_batch.size() == recon_batch.size() == H_batch.size()
L = ((recon_batch - feats_batch) * H_batch) ** 2
return L.mean() | 34.834783 | 85 | 0.614578 | __author__ = 'Tong Zhao'
__email__ = 'tzhao2@nd.edu'
import os
import sys
import copy
import torch
import random
import numpy as np
from scipy.sparse import csr_matrix
import torch.nn as nn
import torch.nn.functional as F
class Classification(nn.Module):
def __init__(self, emb_size):
super(Classification, self).__init__()
self.fc1 = nn.Linear(emb_size, 64)
self.fc2 = nn.Linear(64, 2)
def init_params(self):
for param in self.parameters():
if len(param.size()) == 2:
nn.init.xavier_uniform_(param)
else:
nn.init.constant_(param, 0.0)
def forward(self, embeds):
x = F.elu_(self.fc1(embeds))
x = F.elu_(self.fc2(x))
logists = torch.log_softmax(x, 1)
return logists
class DeepFD(nn.Module):
def __init__(self, features, feat_size, hidden_size, emb_size):
super(DeepFD, self).__init__()
self.features = features
self.fc1 = nn.Linear(feat_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, emb_size)
self.fc3 = nn.Linear(emb_size, hidden_size)
self.fc4 = nn.Linear(hidden_size, feat_size)
def init_params(self):
for param in self.parameters():
if len(param.size()) == 2:
nn.init.xavier_uniform_(param)
else:
nn.init.constant_(param, 0.0)
def forward(self, nodes_batch):
feats = self.features[nodes_batch]
x_en = F.relu_(self.fc1(feats))
embs = F.relu_(self.fc2(x_en))
x_de = F.relu_(self.fc3(embs))
recon = F.relu_(self.fc4(x_de))
return embs, recon
class Loss_DeepFD():
def __init__(self, features, graph_simi, device, alpha, beta, gamma):
self.features = features
self.graph_simi = graph_simi
self.device = device
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.node_pairs = {}
self.original_nodes_batch = None
self.extended_nodes_batch = None
def extend_nodes(self, nodes_batch, training_cps):
self.original_nodes_batch = copy.deepcopy(nodes_batch)
self.node_pairs = {}
self.extended_nodes_batch = set(nodes_batch)
for node in nodes_batch:
cps = training_cps[node]
self.node_pairs[node] = cps
for cp in cps:
self.extended_nodes_batch.add(cp[1])
self.extended_nodes_batch = list(self.extended_nodes_batch)
return self.extended_nodes_batch
def get_loss(self, nodes_batch, embs_batch, recon_batch):
loss_recon = self.get_loss_recon(nodes_batch, recon_batch)
loss_simi = self.get_loss_simi(embs_batch)
loss = loss_recon + self.alpha * loss_simi
return loss
def get_loss_simi(self, embs_batch):
node2index = {n:i for i,n in enumerate(self.extended_nodes_batch)}
simi_feat = []
simi_embs = []
for node, cps in self.node_pairs.items():
for i, j in cps:
simi_feat.append(torch.FloatTensor([self.graph_simi[i, j]]))
dis_ij = (embs_batch[node2index[i]] - embs_batch[node2index[j]]) ** 2
dis_ij = torch.exp(-dis_ij.sum())
simi_embs.append(dis_ij.view(1))
simi_feat = torch.cat(simi_feat, 0).to(self.device)
simi_embs = torch.cat(simi_embs, 0)
L = simi_feat * ((simi_embs - simi_feat) ** 2)
return L.mean()
def get_loss_recon(self, nodes_batch, recon_batch):
feats_batch = self.features[nodes_batch]
H_batch = (feats_batch * (self.beta - 1)) + 1
assert feats_batch.size() == recon_batch.size() == H_batch.size()
L = ((recon_batch - feats_batch) * H_batch) ** 2
return L.mean() | true | true |
f7fae778d5c677e9a791486ca106c6239c3ca5f7 | 3,650 | py | Python | forms.py | lqh-0514/crowdsourcing_vis | c1d6dec7668cf377902ab45fde8c8f3ab575e632 | [
"MIT"
] | 1 | 2020-03-02T01:13:11.000Z | 2020-03-02T01:13:11.000Z | forms.py | lqh-0514/crowdsourcing_vis | c1d6dec7668cf377902ab45fde8c8f3ab575e632 | [
"MIT"
] | null | null | null | forms.py | lqh-0514/crowdsourcing_vis | c1d6dec7668cf377902ab45fde8c8f3ab575e632 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import TextField, IntegerField, TextAreaField, SubmitField, RadioField, SelectField,StringField
from wtforms import validators, ValidationError
from wtforms.validators import DataRequired
def validate_staff(form, field):
if field.data == "":
raise ValidationError("Not a valid choice")
class UserForm(FlaskForm):
# name = TextField("Name Of Student",[validators.Required("Please enter your name.")])
Gender = RadioField('性别', choices = [('M','男'),('F','女')],validators=[DataRequired('请填写一个选项')])
Age = SelectField('年龄', choices=[('','---'),('0','10-17'),('1','18-30'),('2','31-45'),('3','45-60'),('4','60+')],validators=[validate_staff])
Address = RadioField("是否(曾)在上海生活?", choices = [('T','是'),('F','否')], validators=[DataRequired('请填写一个选项')])
Background = RadioField("是否(曾)有建筑、规划、景观等相关专业背景?", choices = [('T','有'),('F','无')], validators=[DataRequired('请填写一个选项')])
text = StringField('您认为,哪些因素会影响您是否喜欢在一条街道活动?(请按主次顺序)')
properties_1 = SelectField('properties', choices = [('','---'),('use', '街区功能(业态)是否丰富'),('age','新/历史风貌'),('type','房屋形态(例如有地标性或造型独特建筑)'),('stre','街道尺度(楼高街宽比)是否舒适'),('size','街区大小'),
('den','房屋密集或稀疏'),('scene','视觉丰富度(例如有沿街店面或通透性的围墙)'),('view','视野开敞度'),('wal','可步行性'),('fac','街道设施(例如座椅等)'),('act','开放的室外活动空间'),('gre','景观绿化')],validators=[validate_staff],default='')
properties_2 = SelectField('properties', choices = [('','---'),('use', '街区功能(业态)是否丰富'),('age','新/历史风貌'),('type','房屋形态(例如有地标性或造型独特建筑)'),('stre','街道尺度(楼高街宽比)是否舒适'),('size','街区大小'),
('den','房屋密集或稀疏'),('scene','视觉丰富度(例如有沿街店面或通透性的围墙)'),('view','视野开敞度'),('wal','可步行性'),('fac','街道设施(例如座椅等)'),('act','开放的室外活动空间'),('gre','景观绿化')],validators=[validate_staff],default='')
properties_3 = SelectField('properties', choices = [('','---'),('use', '街区功能(业态)是否丰富'),('age','新/历史风貌'),('type','房屋形态(例如有地标性或造型独特建筑)'),('stre','街道尺度(楼高街宽比)是否舒适'),('size','街区大小'),
('den','房屋密集或稀疏'),('scene','视觉丰富度(例如有沿街店面或通透性的围墙)'),('view','视野开敞度'),('wal','可步行性'),('fac','街道设施(例如座椅等)'),('act','开放的室外活动空间'),('gre','景观绿化')],validators=[validate_staff],default='')
submit = SubmitField("提交问卷")
class PropertyForm(FlaskForm):
text_left = StringField('left')
text_right = StringField('right')
text_left_video = StringField('left')
text_right_video = StringField('right')
submit = SubmitField("下一处场景")
class ConfirmForm(FlaskForm):
text = StringField('您认为,哪些因素会影响您是否喜欢在一条街道活动?(请按主次顺序)')
properties_1 = SelectField('properties', choices = [('','---'),('use', '街区功能(业态)是否丰富'),('age','新/历史风貌'),('type','房屋形态(例如有地标性或造型独特建筑)'),('stre','街道尺度(楼高街宽比)是否舒适'),('size','街区大小'),
('den','房屋密集或稀疏'),('scene','视觉丰富度(例如有沿街店面或通透性的围墙)'),('view','视野开敞度'),('wal','可步行性'),('fac','街道设施(例如座椅等)'),('act','开放的室外活动空间'),('gre','景观绿化')],validators=[validate_staff],default='')
properties_2 = SelectField('properties', choices = [('','---'),('use', '街区功能(业态)是否丰富'),('age','新/历史风貌'),('type','房屋形态(例如有地标性或造型独特建筑)'),('stre','街道尺度(楼高街宽比)是否舒适'),('size','街区大小'),
('den','房屋密集或稀疏'),('scene','视觉丰富度(例如有沿街店面或通透性的围墙)'),('view','视野开敞度'),('wal','可步行性'),('fac','街道设施(例如座椅等)'),('act','开放的室外活动空间'),('gre','景观绿化')],validators=[validate_staff],default='')
properties_3 = SelectField('properties', choices = [('','---'),('use', '街区功能(业态)是否丰富'),('age','新/历史风貌'),('type','房屋形态(例如有地标性或造型独特建筑)'),('stre','街道尺度(楼高街宽比)是否舒适'),('size','街区大小'),
('den','房屋密集或稀疏'),('scene','视觉丰富度(例如有沿街店面或通透性的围墙)'),('view','视野开敞度'),('wal','可步行性'),('fac','街道设施(例如座椅等)'),('act','开放的室外活动空间'),('gre','景观绿化')],validators=[validate_staff],default='')
wechat = StringField('参与抽取10张百元天猫超市礼品卡,请留下您的微信号作为联系方式')
submit = SubmitField("提交问卷")
| 89.02439 | 187 | 0.631233 | from flask_wtf import FlaskForm
from wtforms import TextField, IntegerField, TextAreaField, SubmitField, RadioField, SelectField,StringField
from wtforms import validators, ValidationError
from wtforms.validators import DataRequired
def validate_staff(form, field):
if field.data == "":
raise ValidationError("Not a valid choice")
class UserForm(FlaskForm):
Gender = RadioField('性别', choices = [('M','男'),('F','女')],validators=[DataRequired('请填写一个选项')])
Age = SelectField('年龄', choices=[('','---'),('0','10-17'),('1','18-30'),('2','31-45'),('3','45-60'),('4','60+')],validators=[validate_staff])
Address = RadioField("是否(曾)在上海生活?", choices = [('T','是'),('F','否')], validators=[DataRequired('请填写一个选项')])
Background = RadioField("是否(曾)有建筑、规划、景观等相关专业背景?", choices = [('T','有'),('F','无')], validators=[DataRequired('请填写一个选项')])
text = StringField('您认为,哪些因素会影响您是否喜欢在一条街道活动?(请按主次顺序)')
properties_1 = SelectField('properties', choices = [('','---'),('use', '街区功能(业态)是否丰富'),('age','新/历史风貌'),('type','房屋形态(例如有地标性或造型独特建筑)'),('stre','街道尺度(楼高街宽比)是否舒适'),('size','街区大小'),
('den','房屋密集或稀疏'),('scene','视觉丰富度(例如有沿街店面或通透性的围墙)'),('view','视野开敞度'),('wal','可步行性'),('fac','街道设施(例如座椅等)'),('act','开放的室外活动空间'),('gre','景观绿化')],validators=[validate_staff],default='')
properties_2 = SelectField('properties', choices = [('','---'),('use', '街区功能(业态)是否丰富'),('age','新/历史风貌'),('type','房屋形态(例如有地标性或造型独特建筑)'),('stre','街道尺度(楼高街宽比)是否舒适'),('size','街区大小'),
('den','房屋密集或稀疏'),('scene','视觉丰富度(例如有沿街店面或通透性的围墙)'),('view','视野开敞度'),('wal','可步行性'),('fac','街道设施(例如座椅等)'),('act','开放的室外活动空间'),('gre','景观绿化')],validators=[validate_staff],default='')
properties_3 = SelectField('properties', choices = [('','---'),('use', '街区功能(业态)是否丰富'),('age','新/历史风貌'),('type','房屋形态(例如有地标性或造型独特建筑)'),('stre','街道尺度(楼高街宽比)是否舒适'),('size','街区大小'),
('den','房屋密集或稀疏'),('scene','视觉丰富度(例如有沿街店面或通透性的围墙)'),('view','视野开敞度'),('wal','可步行性'),('fac','街道设施(例如座椅等)'),('act','开放的室外活动空间'),('gre','景观绿化')],validators=[validate_staff],default='')
submit = SubmitField("提交问卷")
class PropertyForm(FlaskForm):
text_left = StringField('left')
text_right = StringField('right')
text_left_video = StringField('left')
text_right_video = StringField('right')
submit = SubmitField("下一处场景")
class ConfirmForm(FlaskForm):
text = StringField('您认为,哪些因素会影响您是否喜欢在一条街道活动?(请按主次顺序)')
properties_1 = SelectField('properties', choices = [('','---'),('use', '街区功能(业态)是否丰富'),('age','新/历史风貌'),('type','房屋形态(例如有地标性或造型独特建筑)'),('stre','街道尺度(楼高街宽比)是否舒适'),('size','街区大小'),
('den','房屋密集或稀疏'),('scene','视觉丰富度(例如有沿街店面或通透性的围墙)'),('view','视野开敞度'),('wal','可步行性'),('fac','街道设施(例如座椅等)'),('act','开放的室外活动空间'),('gre','景观绿化')],validators=[validate_staff],default='')
properties_2 = SelectField('properties', choices = [('','---'),('use', '街区功能(业态)是否丰富'),('age','新/历史风貌'),('type','房屋形态(例如有地标性或造型独特建筑)'),('stre','街道尺度(楼高街宽比)是否舒适'),('size','街区大小'),
('den','房屋密集或稀疏'),('scene','视觉丰富度(例如有沿街店面或通透性的围墙)'),('view','视野开敞度'),('wal','可步行性'),('fac','街道设施(例如座椅等)'),('act','开放的室外活动空间'),('gre','景观绿化')],validators=[validate_staff],default='')
properties_3 = SelectField('properties', choices = [('','---'),('use', '街区功能(业态)是否丰富'),('age','新/历史风貌'),('type','房屋形态(例如有地标性或造型独特建筑)'),('stre','街道尺度(楼高街宽比)是否舒适'),('size','街区大小'),
('den','房屋密集或稀疏'),('scene','视觉丰富度(例如有沿街店面或通透性的围墙)'),('view','视野开敞度'),('wal','可步行性'),('fac','街道设施(例如座椅等)'),('act','开放的室外活动空间'),('gre','景观绿化')],validators=[validate_staff],default='')
wechat = StringField('参与抽取10张百元天猫超市礼品卡,请留下您的微信号作为联系方式')
submit = SubmitField("提交问卷")
| true | true |
f7fae844ae67eac2f1c0b866356426490fbcfeec | 1,141 | py | Python | clustereval/stability.py | vinay-swamy/clustereval | d199cf0f8f232c35602633d8821249e6578d080a | [
"MIT"
] | null | null | null | clustereval/stability.py | vinay-swamy/clustereval | d199cf0f8f232c35602633d8821249e6578d080a | [
"MIT"
] | 10 | 2021-04-07T16:37:18.000Z | 2021-06-08T17:54:54.000Z | clustereval/stability.py | vinay-swamy/clustereval | d199cf0f8f232c35602633d8821249e6578d080a | [
"MIT"
] | null | null | null | #%%
import pandas as pd
import numpy as np
import glob
import os
import re
import pickle
from multiprocessing import Pool
def entropy(exp): # both are dfs with two columsn, Barcode,cluster
# calc H_tot
entropy = (exp
.groupby("labels")
.count()
.reset_index(drop=True)
.assign(prop=lambda x: x/exp.shape[0],
H=lambda x: x['prop'] * np.log(x['prop']))
['H'].sum()*-1)
return entropy
def H_k(ref, exp):
exp = exp[exp.Barcode.isin(ref['Barcode']) ]
if exp.shape[0] == 0:
return 0
else:
h_k = entropy(exp)
return h_k
def calc_stability(tup):
ref = tup[0]
meta_df = tup[1]
exp_df_list = tup[2]
runname = tup[3]
# try:
H_k_scores = np.asarray([
[H_k(group[1], exp) for exp in exp_df_list] / meta_df['H_tot']
for group in ref.groupby("labels")
]).sum(axis=1)
H_k_scores = 1 - (H_k_scores / len(exp_df_list))
clusters = [group[0] for group in ref.groupby("labels")]
return pd.DataFrame.from_dict({runname: clusters, 'H_k_scores': H_k_scores})
# %%
| 25.355556 | 80 | 0.577564 |
import pandas as pd
import numpy as np
import glob
import os
import re
import pickle
from multiprocessing import Pool
def entropy(exp):
entropy = (exp
.groupby("labels")
.count()
.reset_index(drop=True)
.assign(prop=lambda x: x/exp.shape[0],
H=lambda x: x['prop'] * np.log(x['prop']))
['H'].sum()*-1)
return entropy
def H_k(ref, exp):
exp = exp[exp.Barcode.isin(ref['Barcode']) ]
if exp.shape[0] == 0:
return 0
else:
h_k = entropy(exp)
return h_k
def calc_stability(tup):
ref = tup[0]
meta_df = tup[1]
exp_df_list = tup[2]
runname = tup[3]
H_k_scores = np.asarray([
[H_k(group[1], exp) for exp in exp_df_list] / meta_df['H_tot']
for group in ref.groupby("labels")
]).sum(axis=1)
H_k_scores = 1 - (H_k_scores / len(exp_df_list))
clusters = [group[0] for group in ref.groupby("labels")]
return pd.DataFrame.from_dict({runname: clusters, 'H_k_scores': H_k_scores})
| true | true |
f7fae8bbf7d5d863c086e38eb9777c0898e2a2bc | 7,667 | py | Python | examples/pwr_run/checkpointing/final/no_safeguard/job68.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/final/no_safeguard/job68.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/final/no_safeguard/job68.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.007
args_model = 'densenet121'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_no_safeguard/' + job_name + '*'
total_epochs = 19
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '121' in args_model:
base_model = DenseNet121(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '169' in args_model:
base_model = DenseNet169(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '201' in args_model:
base_model = DenseNet201(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
model.add(base_model)
#model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_no_safeguard/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| 33.190476 | 118 | 0.692579 |
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
batch_size = 256
args_lr = 0.007
args_model = 'densenet121'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_no_safeguard/' + job_name + '*'
total_epochs = 19
starting_epoch = 0
pid = os.getpid()
message = job_name + ' pid ' + str(pid)
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
subtract_pixel_mean = True
n = 3
model_type = args.tc
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '121' in args_model:
base_model = DenseNet121(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '169' in args_model:
base_model = DenseNet169(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '201' in args_model:
base_model = DenseNet201(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
model.add(base_model)
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
print(model_type)
current_epoch = 0
| true | true |
f7fae8d52769b2b31f761614ee04357f2193528a | 1,678 | py | Python | demo/index_ml_tmdb.py | cxcx/elasticsearch-learning-to-rank | 1e0404e1d69da113f3b912191a01617bf7fe9c60 | [
"Apache-2.0"
] | 3 | 2018-06-27T06:53:32.000Z | 2021-11-24T12:18:38.000Z | demo/index_ml_tmdb.py | mauliksoneji/elasticsearch-learning-to-rank | 3075d1cd015c2b5e9f4c24c069bca75674687879 | [
"ICU",
"ECL-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | demo/index_ml_tmdb.py | mauliksoneji/elasticsearch-learning-to-rank | 3075d1cd015c2b5e9f4c24c069bca75674687879 | [
"ICU",
"ECL-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2018-11-28T08:02:33.000Z | 2019-02-07T14:20:01.000Z | import json
import elasticsearch.helpers
from log_conf import Logger
from utils import elastic_connection
def enrich(movie):
""" Enrich for search purposes """
if 'title' in movie:
movie['title_sent'] = 'SENTINEL_BEGIN ' + movie['title']
def reindex(es_connection, analysis_settings=None, mapping_settings=None, movie_dict=None, index='tmdb'):
if movie_dict is None:
movie_dict = {}
if mapping_settings is None:
mapping_settings = {}
if analysis_settings is None:
analysis_settings = {}
settings = {
"settings": {
"number_of_shards": 1,
"number_of_replicas": 0,
"index": {
"analysis": analysis_settings,
}}}
if mapping_settings:
settings['mappings'] = mapping_settings # C
es_connection.indices.delete(index, ignore=[400, 404])
es_connection.indices.create(index, body=settings)
elasticsearch.helpers.bulk(es, bulk_docs(movie_dict, index))
def bulk_docs(movie_dict, index):
for movie_id, movie in movie_dict.items():
if 'release_date' in movie and movie['release_date'] == "":
del movie['release_date']
enrich(movie)
add_cmd = {"_index": index, # E
"_type": "movie",
"_id": movie_id,
"_source": movie}
yield add_cmd
if 'title' in movie:
Logger.logger.info("%s added to %s" % (movie['title'].encode('utf-8'), index))
if __name__ == "__main__":
es = elastic_connection(timeout=30)
tmdb_movie_dict = json.loads(open('tmdb.json').read())
reindex(es, movie_dict=tmdb_movie_dict)
| 29.438596 | 105 | 0.613826 | import json
import elasticsearch.helpers
from log_conf import Logger
from utils import elastic_connection
def enrich(movie):
if 'title' in movie:
movie['title_sent'] = 'SENTINEL_BEGIN ' + movie['title']
def reindex(es_connection, analysis_settings=None, mapping_settings=None, movie_dict=None, index='tmdb'):
if movie_dict is None:
movie_dict = {}
if mapping_settings is None:
mapping_settings = {}
if analysis_settings is None:
analysis_settings = {}
settings = {
"settings": {
"number_of_shards": 1,
"number_of_replicas": 0,
"index": {
"analysis": analysis_settings,
}}}
if mapping_settings:
settings['mappings'] = mapping_settings
es_connection.indices.delete(index, ignore=[400, 404])
es_connection.indices.create(index, body=settings)
elasticsearch.helpers.bulk(es, bulk_docs(movie_dict, index))
def bulk_docs(movie_dict, index):
for movie_id, movie in movie_dict.items():
if 'release_date' in movie and movie['release_date'] == "":
del movie['release_date']
enrich(movie)
add_cmd = {"_index": index,
"_type": "movie",
"_id": movie_id,
"_source": movie}
yield add_cmd
if 'title' in movie:
Logger.logger.info("%s added to %s" % (movie['title'].encode('utf-8'), index))
if __name__ == "__main__":
es = elastic_connection(timeout=30)
tmdb_movie_dict = json.loads(open('tmdb.json').read())
reindex(es, movie_dict=tmdb_movie_dict)
| true | true |
f7faea5e916bae45efcab42eb77bb476e521a6bc | 89 | py | Python | chargeamps/__init__.py | kirei/python-chargeamps | b2412166a33b1bbd0fd37526b3f8c2d28897e58b | [
"BSD-2-Clause"
] | 3 | 2021-02-17T19:20:43.000Z | 2022-03-04T20:44:35.000Z | chargeamps/__init__.py | kirei/python-chargeamps | b2412166a33b1bbd0fd37526b3f8c2d28897e58b | [
"BSD-2-Clause"
] | 4 | 2020-12-14T13:29:55.000Z | 2022-03-20T13:05:51.000Z | chargeamps/__init__.py | kirei/python-chargeamps | b2412166a33b1bbd0fd37526b3f8c2d28897e58b | [
"BSD-2-Clause"
] | 4 | 2020-06-16T08:39:31.000Z | 2022-03-04T18:04:23.000Z | import pkg_resources
__version__ = pkg_resources.get_distribution("chargeamps").version
| 22.25 | 66 | 0.853933 | import pkg_resources
__version__ = pkg_resources.get_distribution("chargeamps").version
| true | true |
f7faea6537878c37f245956202e2622e18f4d9cb | 2,517 | py | Python | challenge.py | iraida07/challenge-python-07 | 348576187aa3c4dd5fba5c60e0338f88068629e7 | [
"MIT"
] | null | null | null | challenge.py | iraida07/challenge-python-07 | 348576187aa3c4dd5fba5c60e0338f88068629e7 | [
"MIT"
] | null | null | null | challenge.py | iraida07/challenge-python-07 | 348576187aa3c4dd5fba5c60e0338f88068629e7 | [
"MIT"
] | null | null | null | DATA = [
{
'name': 'Facundo',
'age': 72,
'organization': 'Platzi',
'position': 'Technical Mentor',
'language': 'python',
},
{
'name': 'Luisana',
'age': 33,
'organization': 'Globant',
'position': 'UX Designer',
'language': 'javascript',
},
{
'name': 'Héctor',
'age': 19,
'organization': 'Platzi',
'position': 'Associate',
'language': 'ruby',
},
{
'name': 'Gabriel',
'age': 20,
'organization': 'Platzi',
'position': 'Associate',
'language': 'javascript',
},
{
'name': 'Mariandrea',
'age': 30,
'organization': 'Platzi',
'position': 'QA Manager',
'language': 'java',
},
{
'name': 'Karo',
'age': 23,
'organization': 'Everis',
'position': 'Backend Developer',
'language': 'python',
},
{
'name': 'Ariel',
'age': 32,
'organization': 'Rappi',
'position': 'Support',
'language': '',
},
{
'name': 'Juan',
'age': 17,
'organization': '',
'position': 'Student',
'language': 'go',
},
{
'name': 'Pablo',
'age': 32,
'organization': 'Master',
'position': 'Human Resources Manager',
'language': 'python',
},
{
'name': 'Lorena',
'age': 56,
'organization': 'Python Organization',
'position': 'Language Maker',
'language': 'python',
},
]
def run():
all_python_devs = filter(lambda x : x['language'] == 'python', DATA)
all_Platzi_workers = filter(lambda x : x['organization'] == 'Platzi', DATA)
adults = filter(lambda x : x['age'] > 18, DATA)
workers = list(map(lambda x: dict(x, **{'Homeless':True}) if x['organization'] == '' else dict(x, **{'Homeless':False}), DATA))
old_people = list(map(lambda x: dict(x, **{'old':True}) if x['age'] > 30 else dict(x, **{'Old':False}), DATA))
print('Python devs: ')
for dev in all_python_devs:
print(dev['name'])
print('\n\n')
print('Platzi workers: ')
for worker in all_Platzi_workers:
print(worker['name'])
print('\n\n')
print('Adults: ')
for adult in adults:
print(adult['name'])
print('\n\n')
print(workers)
print('\n\n')
print(old_people)
print('\n\n')
if __name__ == '__main__':
run()
| 23.091743 | 131 | 0.471593 | DATA = [
{
'name': 'Facundo',
'age': 72,
'organization': 'Platzi',
'position': 'Technical Mentor',
'language': 'python',
},
{
'name': 'Luisana',
'age': 33,
'organization': 'Globant',
'position': 'UX Designer',
'language': 'javascript',
},
{
'name': 'Héctor',
'age': 19,
'organization': 'Platzi',
'position': 'Associate',
'language': 'ruby',
},
{
'name': 'Gabriel',
'age': 20,
'organization': 'Platzi',
'position': 'Associate',
'language': 'javascript',
},
{
'name': 'Mariandrea',
'age': 30,
'organization': 'Platzi',
'position': 'QA Manager',
'language': 'java',
},
{
'name': 'Karo',
'age': 23,
'organization': 'Everis',
'position': 'Backend Developer',
'language': 'python',
},
{
'name': 'Ariel',
'age': 32,
'organization': 'Rappi',
'position': 'Support',
'language': '',
},
{
'name': 'Juan',
'age': 17,
'organization': '',
'position': 'Student',
'language': 'go',
},
{
'name': 'Pablo',
'age': 32,
'organization': 'Master',
'position': 'Human Resources Manager',
'language': 'python',
},
{
'name': 'Lorena',
'age': 56,
'organization': 'Python Organization',
'position': 'Language Maker',
'language': 'python',
},
]
def run():
all_python_devs = filter(lambda x : x['language'] == 'python', DATA)
all_Platzi_workers = filter(lambda x : x['organization'] == 'Platzi', DATA)
adults = filter(lambda x : x['age'] > 18, DATA)
workers = list(map(lambda x: dict(x, **{'Homeless':True}) if x['organization'] == '' else dict(x, **{'Homeless':False}), DATA))
old_people = list(map(lambda x: dict(x, **{'old':True}) if x['age'] > 30 else dict(x, **{'Old':False}), DATA))
print('Python devs: ')
for dev in all_python_devs:
print(dev['name'])
print('\n\n')
print('Platzi workers: ')
for worker in all_Platzi_workers:
print(worker['name'])
print('\n\n')
print('Adults: ')
for adult in adults:
print(adult['name'])
print('\n\n')
print(workers)
print('\n\n')
print(old_people)
print('\n\n')
if __name__ == '__main__':
run()
| true | true |
f7faeae7cfb012678091ac2a8a1f2883fd2a8296 | 8,541 | py | Python | biostar/server/middleware.py | Torres63/biostar | b7292abe9d541d5cec003812050f7723fad80d8e | [
"MIT"
] | null | null | null | biostar/server/middleware.py | Torres63/biostar | b7292abe9d541d5cec003812050f7723fad80d8e | [
"MIT"
] | null | null | null | biostar/server/middleware.py | Torres63/biostar | b7292abe9d541d5cec003812050f7723fad80d8e | [
"MIT"
] | null | null | null | __author__ = 'ialbert'
from django.contrib import messages
from django.conf import settings
import hmac, logging, re
from datetime import timedelta
from django.contrib.auth import authenticate, login, logout
from biostar.apps.users.models import User, Profile
from biostar import const
from django.core.cache import cache
from biostar.apps.posts.models import Post, Vote
from biostar.apps.messages.models import Message
from biostar.apps.planet.models import BlogPost
from collections import defaultdict
from biostar.awards import create_user_award, check_user_profile
logger = logging.getLogger(__name__)
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
def get_ip(request):
ip1 = request.META.get('REMOTE_ADDR', '')
ip2 = request.META.get('HTTP_X_FORWARDED_FOR', '').split(",")[0].strip()
ip = ip1 or ip2 or '0.0.0.0'
return ip
class AutoSignupAdapter(DefaultSocialAccountAdapter):
def pre_social_login(self, request, sociallogin):
# This social login already exists.
if sociallogin.is_existing:
return
# Check if we could/should connect it.
try:
email = sociallogin.account.extra_data.get('email')
#verified = sociallogin.account.extra_data.get('verified_email')
if email:
user = User.objects.get(email=email)
sociallogin.connect(request, user)
except User.DoesNotExist:
pass
class ExternalAuth(object):
'''
This is an "autentication" that relies on the user being valid.
We're just following the Django interfaces here.
'''
def authenticate(self, email, valid=False):
# Check the username/password and return a User.
if valid:
user = User.objects.get(email=email)
user.backend = "%s.%s" % (__name__, self.__class__.__name__)
print user.backend
return user
else:
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
def valid_external_login(request):
"Attempts to perform an external login"
for name, key in settings.EXTERNAL_AUTH:
value = request.COOKIES.get(name)
if value:
try:
email, digest1 = value.split(":")
digest2 = hmac.new(key, email).hexdigest()
valid = (digest1 == digest2)
if not valid:
raise Exception("digests do not match")
except Exception, exc:
logger.error(exc)
return False
# If we made it this far the data is valid.
user, flag = User.objects.get_or_create(email=email)
if flag:
logger.info("created user %s" % user.email)
# Authenticate with local info.
user = ExternalAuth().authenticate(email=user.email, valid=valid)
login(request=request, user=user)
return True
return False
SESSION_KEY, ANON_USER = settings.SESSION_KEY, "anon-user"
def get_counts(request, weeks=settings.COUNT_INTERVAL_WEEKS):
"Returns the number of counts for each post type in the interval that has passed"
user = request.user
now = const.now()
# Authenticated users get counts since their last login.
if user.is_authenticated():
since = user.profile.last_login
else:
since = now - timedelta(weeks=weeks)
# This fetches the posts since last login.
posts = Post.objects.filter(type__in=Post.TOP_LEVEL, status=Post.OPEN, creation_date__gt=since).order_by(
'-id').only("id").prefetch_related("tag_set")
posts = posts[:200]
counts = defaultdict(int)
# How many news posts.
counts['latest'] = len(posts)
# Produce counts per tag.
for post in posts:
for tag in post.tag_set.all():
counts[tag.name] += 1
# Fill in the unanswered counts.
counts['open'] = Post.objects.filter(type=Post.QUESTION, reply_count=0, status=Post.OPEN,
creation_date__gt=since).count()
# How many new planet posts
counts['planet'] = BlogPost.objects.filter(insert_date__gt=since).count()
# Compute a few more counts for the user.
if user.is_authenticated():
# These are the new messages since the last login.
counts['messages'] = Message.objects.filter(user=user, unread=True, sent_at__gt=since).count()
# These are the new votes since the last login.
counts['votes'] = Vote.objects.filter(post__author=user, date__gt=since).count()
return counts
class Visit(object):
"""
Sets visit specific parameters on objects.
"""
def process_request(self, request, weeks=settings.COUNT_INTERVAL_WEEKS):
global SESSION_KEY, ANON_USER
user, session = request.user, request.session
# Suspended users are logged out immediately.
if user.is_authenticated() and user.is_suspended:
logout(request)
messages.error(request, 'Sorry, this account has been suspended. Please contact the administrators.')
# Add attributes to anonymous users.
if not user.is_authenticated():
# This attribute is required inside templates.
user.is_moderator = user.is_admin = False
# Check external logins.
if settings.EXTERNAL_AUTH and valid_external_login(request):
messages.success(request, "Login completed")
# We do this to detect when an anonymous session turns into a logged in one.
if ANON_USER not in session:
session[ANON_USER] = True
# User attributes that refresh at given intervals.
if user.is_authenticated():
# The time between two count refreshes.
elapsed = (const.now() - user.profile.last_login).seconds
# The user has an anonymous session already.
# Update the user login data now.
if ANON_USER in session:
del session[ANON_USER]
elapsed = settings.SESSION_UPDATE_SECONDS + 1
# The user session will be updated.
if elapsed > settings.SESSION_UPDATE_SECONDS:
# Set the last login time.
Profile.objects.filter(user_id=user.id).update(last_login=const.now())
# Compute the counts.
counts = get_counts(request)
# Store the counts in the session for later use.
session[SESSION_KEY] = counts
# Create user awards if possible.
create_user_award.delay(user=user)
# check user and fill in details
check_user_profile.delay(ip=get_ip(request), user=user)
# Get the counts from the session or the cache.
counts = session.get(SESSION_KEY) or cache.get(SESSION_KEY)
# No sessions found, set the them into the session.
if not counts:
# Compute the counts
counts = get_counts(request)
# Put them into the session.
session[SESSION_KEY] = counts
# Store them in the cache for the next anonymous user.
cache.set(SESSION_KEY, counts, settings.SESSION_UPDATE_SECONDS)
# Seconds
TIME_PERIOD = 24 * 3600
# How many visit within that time period.
MAX_VISITS = 50
# Allowed IPs, triplets
WHITE_LIST = [
]
# Make lookup faster.
WHITE_LIST = set(WHITE_LIST)
from django.shortcuts import redirect
class Ban(object):
"""
Sets visit specific parameters on objects.
"""
def process_request(self, request):
user = request.user
if user.is_anonymous():
oip = get_ip(request)
ips = oip.split(".")[:-1]
ip = ".".join(ips)
if ip in WHITE_LIST:
return
if ip not in cache:
cache.set(ip, 0, TIME_PERIOD)
value = cache.get(ip)
if value >= MAX_VISITS:
# Raise redirect exception
now = const.now()
message = "%s\tbanned\t%s\t%s\n" % (now, ip, oip)
logger.error(message)
fp = open("/home/www-data/biostar-central/banned-ips.txt", "a")
fp.write(message)
fp.close()
return redirect('/static/message.txt')
else:
cache.incr(ip)
| 32.109023 | 113 | 0.618429 | __author__ = 'ialbert'
from django.contrib import messages
from django.conf import settings
import hmac, logging, re
from datetime import timedelta
from django.contrib.auth import authenticate, login, logout
from biostar.apps.users.models import User, Profile
from biostar import const
from django.core.cache import cache
from biostar.apps.posts.models import Post, Vote
from biostar.apps.messages.models import Message
from biostar.apps.planet.models import BlogPost
from collections import defaultdict
from biostar.awards import create_user_award, check_user_profile
logger = logging.getLogger(__name__)
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
def get_ip(request):
ip1 = request.META.get('REMOTE_ADDR', '')
ip2 = request.META.get('HTTP_X_FORWARDED_FOR', '').split(",")[0].strip()
ip = ip1 or ip2 or '0.0.0.0'
return ip
class AutoSignupAdapter(DefaultSocialAccountAdapter):
def pre_social_login(self, request, sociallogin):
if sociallogin.is_existing:
return
try:
email = sociallogin.account.extra_data.get('email')
if email:
user = User.objects.get(email=email)
sociallogin.connect(request, user)
except User.DoesNotExist:
pass
class ExternalAuth(object):
'''
This is an "autentication" that relies on the user being valid.
We're just following the Django interfaces here.
'''
def authenticate(self, email, valid=False):
# Check the username/password and return a User.
if valid:
user = User.objects.get(email=email)
user.backend = "%s.%s" % (__name__, self.__class__.__name__)
print user.backend
return user
else:
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
def valid_external_login(request):
"Attempts to perform an external login"
for name, key in settings.EXTERNAL_AUTH:
value = request.COOKIES.get(name)
if value:
try:
email, digest1 = value.split(":")
digest2 = hmac.new(key, email).hexdigest()
valid = (digest1 == digest2)
if not valid:
raise Exception("digests do not match")
except Exception, exc:
logger.error(exc)
return False
# If we made it this far the data is valid.
user, flag = User.objects.get_or_create(email=email)
if flag:
logger.info("created user %s" % user.email)
# Authenticate with local info.
user = ExternalAuth().authenticate(email=user.email, valid=valid)
login(request=request, user=user)
return True
return False
SESSION_KEY, ANON_USER = settings.SESSION_KEY, "anon-user"
def get_counts(request, weeks=settings.COUNT_INTERVAL_WEEKS):
"Returns the number of counts for each post type in the interval that has passed"
user = request.user
now = const.now()
# Authenticated users get counts since their last login.
if user.is_authenticated():
since = user.profile.last_login
else:
since = now - timedelta(weeks=weeks)
# This fetches the posts since last login.
posts = Post.objects.filter(type__in=Post.TOP_LEVEL, status=Post.OPEN, creation_date__gt=since).order_by(
'-id').only("id").prefetch_related("tag_set")
posts = posts[:200]
counts = defaultdict(int)
# How many news posts.
counts['latest'] = len(posts)
# Produce counts per tag.
for post in posts:
for tag in post.tag_set.all():
counts[tag.name] += 1
# Fill in the unanswered counts.
counts['open'] = Post.objects.filter(type=Post.QUESTION, reply_count=0, status=Post.OPEN,
creation_date__gt=since).count()
# How many new planet posts
counts['planet'] = BlogPost.objects.filter(insert_date__gt=since).count()
# Compute a few more counts for the user.
if user.is_authenticated():
# These are the new messages since the last login.
counts['messages'] = Message.objects.filter(user=user, unread=True, sent_at__gt=since).count()
# These are the new votes since the last login.
counts['votes'] = Vote.objects.filter(post__author=user, date__gt=since).count()
return counts
class Visit(object):
"""
Sets visit specific parameters on objects.
"""
def process_request(self, request, weeks=settings.COUNT_INTERVAL_WEEKS):
global SESSION_KEY, ANON_USER
user, session = request.user, request.session
# Suspended users are logged out immediately.
if user.is_authenticated() and user.is_suspended:
logout(request)
messages.error(request, 'Sorry, this account has been suspended. Please contact the administrators.')
# Add attributes to anonymous users.
if not user.is_authenticated():
# This attribute is required inside templates.
user.is_moderator = user.is_admin = False
# Check external logins.
if settings.EXTERNAL_AUTH and valid_external_login(request):
messages.success(request, "Login completed")
# We do this to detect when an anonymous session turns into a logged in one.
if ANON_USER not in session:
session[ANON_USER] = True
# User attributes that refresh at given intervals.
if user.is_authenticated():
# The time between two count refreshes.
elapsed = (const.now() - user.profile.last_login).seconds
# The user has an anonymous session already.
# Update the user login data now.
if ANON_USER in session:
del session[ANON_USER]
elapsed = settings.SESSION_UPDATE_SECONDS + 1
# The user session will be updated.
if elapsed > settings.SESSION_UPDATE_SECONDS:
# Set the last login time.
Profile.objects.filter(user_id=user.id).update(last_login=const.now())
# Compute the counts.
counts = get_counts(request)
# Store the counts in the session for later use.
session[SESSION_KEY] = counts
# Create user awards if possible.
create_user_award.delay(user=user)
# check user and fill in details
check_user_profile.delay(ip=get_ip(request), user=user)
# Get the counts from the session or the cache.
counts = session.get(SESSION_KEY) or cache.get(SESSION_KEY)
# No sessions found, set the them into the session.
if not counts:
# Compute the counts
counts = get_counts(request)
# Put them into the session.
session[SESSION_KEY] = counts
# Store them in the cache for the next anonymous user.
cache.set(SESSION_KEY, counts, settings.SESSION_UPDATE_SECONDS)
# Seconds
TIME_PERIOD = 24 * 3600
# How many visit within that time period.
MAX_VISITS = 50
# Allowed IPs, triplets
WHITE_LIST = [
]
# Make lookup faster.
WHITE_LIST = set(WHITE_LIST)
from django.shortcuts import redirect
class Ban(object):
"""
Sets visit specific parameters on objects.
"""
def process_request(self, request):
user = request.user
if user.is_anonymous():
oip = get_ip(request)
ips = oip.split(".")[:-1]
ip = ".".join(ips)
if ip in WHITE_LIST:
return
if ip not in cache:
cache.set(ip, 0, TIME_PERIOD)
value = cache.get(ip)
if value >= MAX_VISITS:
# Raise redirect exception
now = const.now()
message = "%s\tbanned\t%s\t%s\n" % (now, ip, oip)
logger.error(message)
fp = open("/home/www-data/biostar-central/banned-ips.txt", "a")
fp.write(message)
fp.close()
return redirect('/static/message.txt')
else:
cache.incr(ip)
| false | true |
f7faec20bce35ac87f91e934453793f8592c8220 | 5,826 | py | Python | contrib/linearize/linearize-data.py | Kodacoin/Kodacoin | 5d64d00f71b24da16363b3ac80d7b180dd6cbf61 | [
"MIT"
] | null | null | null | contrib/linearize/linearize-data.py | Kodacoin/Kodacoin | 5d64d00f71b24da16363b3ac80d7b180dd6cbf61 | [
"MIT"
] | null | null | null | contrib/linearize/linearize-data.py | Kodacoin/Kodacoin | 5d64d00f71b24da16363b3ac80d7b180dd6cbf61 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
import struct
import re
import os
import base64
import httplib
import sys
import hashlib
import datetime
import time
import ltc_scrypt
settings = {}
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
def calc_hdr_hash(blk_hdr):
hash1 = hashlib.sha256()
hash1.update(blk_hdr)
hash1_o = hash1.digest()
hash2 = hashlib.sha256()
hash2.update(hash1_o)
hash2_o = hash2.digest()
return hash2_o
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
return hash_str
def calc_scrypt_hash_str(blk_hdr):
hash = ltc_scrypt.getPoWHash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
def mkblockset(blkindex):
blkmap = {}
for hash in blkindex:
blkmap[hash] = True
return blkmap
def copydata(settings, blkindex, blkset):
inFn = 1
inF = None
outFn = 0
outsz = 0
outF = None
outFname = None
blkCount = 0
lastDate = datetime.datetime(2000, 1, 1)
highTS = 1408893517 - 315360000
timestampSplit = False
fileOutput = True
setFileTime = False
maxOutSz = settings['max_out_sz']
if 'output' in settings:
fileOutput = False
if settings['file_timestamp'] != 0:
setFileTime = True
if settings['split_timestamp'] != 0:
timestampSplit = True
while True:
if not inF:
fname = "%s/blk%04d.dat" % (settings['input'], inFn)
print("Input file" + fname)
try:
inF = open(fname, "rb")
except IOError:
print "Done"
return
inhdr = inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
inF.close()
inF = None
inFn = inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != settings['netmagic']):
print("Invalid magic:" + inMagic)
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0]
rawblock = inF.read(inLen)
blk_hdr = rawblock[:80]
hash_str = 0
if blkCount > 319000:
hash_str = calc_hash_str(blk_hdr)
else:
hash_str = calc_scrypt_hash_str(blk_hdr)
if not hash_str in blkset:
print("Skipping unknown block " + hash_str)
continue
if blkindex[blkCount] != hash_str:
print("Out of order block.")
print("Expected " + blkindex[blkCount])
print("Got " + hash_str)
sys.exit(1)
if not fileOutput and ((outsz + inLen) > maxOutSz):
outF.close()
if setFileTime:
os.utime(outFname, (int(time.time()), highTS))
outF = None
outFname = None
outFn = outFn + 1
outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if timestampSplit and (blkDate > lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str)
lastDate = blkDate
if outF:
outF.close()
if setFileTime:
os.utime(outFname, (int(time.time()), highTS))
outF = None
outFname = None
outFn = outFn + 1
outsz = 0
if not outF:
if fileOutput:
outFname = settings['output_file']
else:
outFname = "%s/blk%05d.dat" % (settings['output'], outFn)
print("Output file" + outFname)
outF = open(outFname, "wb")
outF.write(inhdr)
outF.write(rawblock)
outsz = outsz + inLen + 8
blkCount = blkCount + 1
if blkTS > highTS:
highTS = blkTS
if (blkCount % 1000) == 0:
print("Wrote " + str(blkCount) + " blocks")
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize-data.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = '70352205'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000L * 1000 * 1000
settings['max_out_sz'] = long(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = settings['netmagic'].decode('hex')
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkset = mkblockset(blkindex)
if not "0000015acc71d29aa209d422f39f0458e338a4abe184f83db3453c8516be8fe0" in blkset:
print("not found")
else:
copydata(settings, blkindex, blkset)
| 23.211155 | 85 | 0.670614 |
import json
import struct
import re
import os
import base64
import httplib
import sys
import hashlib
import datetime
import time
import ltc_scrypt
settings = {}
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
def calc_hdr_hash(blk_hdr):
hash1 = hashlib.sha256()
hash1.update(blk_hdr)
hash1_o = hash1.digest()
hash2 = hashlib.sha256()
hash2.update(hash1_o)
hash2_o = hash2.digest()
return hash2_o
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
return hash_str
def calc_scrypt_hash_str(blk_hdr):
hash = ltc_scrypt.getPoWHash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
def mkblockset(blkindex):
blkmap = {}
for hash in blkindex:
blkmap[hash] = True
return blkmap
def copydata(settings, blkindex, blkset):
inFn = 1
inF = None
outFn = 0
outsz = 0
outF = None
outFname = None
blkCount = 0
lastDate = datetime.datetime(2000, 1, 1)
highTS = 1408893517 - 315360000
timestampSplit = False
fileOutput = True
setFileTime = False
maxOutSz = settings['max_out_sz']
if 'output' in settings:
fileOutput = False
if settings['file_timestamp'] != 0:
setFileTime = True
if settings['split_timestamp'] != 0:
timestampSplit = True
while True:
if not inF:
fname = "%s/blk%04d.dat" % (settings['input'], inFn)
print("Input file" + fname)
try:
inF = open(fname, "rb")
except IOError:
print "Done"
return
inhdr = inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
inF.close()
inF = None
inFn = inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != settings['netmagic']):
print("Invalid magic:" + inMagic)
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0]
rawblock = inF.read(inLen)
blk_hdr = rawblock[:80]
hash_str = 0
if blkCount > 319000:
hash_str = calc_hash_str(blk_hdr)
else:
hash_str = calc_scrypt_hash_str(blk_hdr)
if not hash_str in blkset:
print("Skipping unknown block " + hash_str)
continue
if blkindex[blkCount] != hash_str:
print("Out of order block.")
print("Expected " + blkindex[blkCount])
print("Got " + hash_str)
sys.exit(1)
if not fileOutput and ((outsz + inLen) > maxOutSz):
outF.close()
if setFileTime:
os.utime(outFname, (int(time.time()), highTS))
outF = None
outFname = None
outFn = outFn + 1
outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if timestampSplit and (blkDate > lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str)
lastDate = blkDate
if outF:
outF.close()
if setFileTime:
os.utime(outFname, (int(time.time()), highTS))
outF = None
outFname = None
outFn = outFn + 1
outsz = 0
if not outF:
if fileOutput:
outFname = settings['output_file']
else:
outFname = "%s/blk%05d.dat" % (settings['output'], outFn)
print("Output file" + outFname)
outF = open(outFname, "wb")
outF.write(inhdr)
outF.write(rawblock)
outsz = outsz + inLen + 8
blkCount = blkCount + 1
if blkTS > highTS:
highTS = blkTS
if (blkCount % 1000) == 0:
print("Wrote " + str(blkCount) + " blocks")
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize-data.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
m = re.search('^\s*#', line)
if m:
continue
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = '70352205'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000L * 1000 * 1000
settings['max_out_sz'] = long(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = settings['netmagic'].decode('hex')
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkset = mkblockset(blkindex)
if not "0000015acc71d29aa209d422f39f0458e338a4abe184f83db3453c8516be8fe0" in blkset:
print("not found")
else:
copydata(settings, blkindex, blkset)
| false | true |
f7faec710ec088ec6beec5ba67275806d5a09312 | 2,050 | py | Python | tests/unit/via/views/debug_test.py | mattdricker/via-1 | 5f4919518337540d183bc079abbd3ea502a0e281 | [
"BSD-2-Clause"
] | 113 | 2015-02-13T23:22:29.000Z | 2021-09-09T19:42:10.000Z | tests/unit/via/views/debug_test.py | mattdricker/via-1 | 5f4919518337540d183bc079abbd3ea502a0e281 | [
"BSD-2-Clause"
] | 265 | 2015-02-05T15:32:47.000Z | 2022-03-31T17:05:43.000Z | tests/unit/via/views/debug_test.py | mattdricker/via-1 | 5f4919518337540d183bc079abbd3ea502a0e281 | [
"BSD-2-Clause"
] | 70 | 2015-04-17T23:52:08.000Z | 2022-03-14T16:50:31.000Z | from unittest.mock import create_autospec, sentinel
import pytest
from h_matchers import Any
from pyramid.testing import DummyRequest
from via.views.debug import debug_headers
class TestDebugHeaders:
def test_it(self, pyramid_request):
pyramid_request.headers = {"Key": "Value"}
response = debug_headers(sentinel.context, pyramid_request)
pyramid_request.route_url.assert_called_once_with("debug_headers")
assert response.status_code == 200
assert response.text == Any.string.containing(
pyramid_request.route_url.return_value
)
assert response.text == Any.string.containing("Key")
assert response.text == Any.string.containing("Value")
def test_it_does_not_clean_headers_with_raw_true(
self, pyramid_request, clean_headers, OrderedDict
):
pyramid_request.GET["raw"] = "1"
debug_headers(sentinel.context, pyramid_request)
clean_headers.assert_not_called()
OrderedDict.assert_called_once_with(pyramid_request.headers)
def test_it_cleans_headers_with_raw_false(
self, pyramid_request, clean_headers, OrderedDict
):
pyramid_request.GET["raw"] = ""
debug_headers(sentinel.context, pyramid_request)
OrderedDict.assert_not_called()
clean_headers.assert_called_once_with(pyramid_request.headers)
@pytest.fixture
def OrderedDict(self, patch):
return patch("via.views.debug.OrderedDict")
@pytest.fixture
def clean_headers(self, patch):
clean_headers = patch("via.views.debug.clean_headers")
clean_headers.return_value = {"something": "JSON serialisable"}
return clean_headers
@pytest.fixture
def pyramid_request(self):
pyramid_request = DummyRequest()
# `route_url` seems to go big time bonkers if you use the built in one
pyramid_request.route_url = create_autospec(pyramid_request.route_url)
pyramid_request.route_url.return_value = "ROUTE_URL"
return pyramid_request
| 32.539683 | 78 | 0.715122 | from unittest.mock import create_autospec, sentinel
import pytest
from h_matchers import Any
from pyramid.testing import DummyRequest
from via.views.debug import debug_headers
class TestDebugHeaders:
def test_it(self, pyramid_request):
pyramid_request.headers = {"Key": "Value"}
response = debug_headers(sentinel.context, pyramid_request)
pyramid_request.route_url.assert_called_once_with("debug_headers")
assert response.status_code == 200
assert response.text == Any.string.containing(
pyramid_request.route_url.return_value
)
assert response.text == Any.string.containing("Key")
assert response.text == Any.string.containing("Value")
def test_it_does_not_clean_headers_with_raw_true(
self, pyramid_request, clean_headers, OrderedDict
):
pyramid_request.GET["raw"] = "1"
debug_headers(sentinel.context, pyramid_request)
clean_headers.assert_not_called()
OrderedDict.assert_called_once_with(pyramid_request.headers)
def test_it_cleans_headers_with_raw_false(
self, pyramid_request, clean_headers, OrderedDict
):
pyramid_request.GET["raw"] = ""
debug_headers(sentinel.context, pyramid_request)
OrderedDict.assert_not_called()
clean_headers.assert_called_once_with(pyramid_request.headers)
@pytest.fixture
def OrderedDict(self, patch):
return patch("via.views.debug.OrderedDict")
@pytest.fixture
def clean_headers(self, patch):
clean_headers = patch("via.views.debug.clean_headers")
clean_headers.return_value = {"something": "JSON serialisable"}
return clean_headers
@pytest.fixture
def pyramid_request(self):
pyramid_request = DummyRequest()
pyramid_request.route_url = create_autospec(pyramid_request.route_url)
pyramid_request.route_url.return_value = "ROUTE_URL"
return pyramid_request
| true | true |
f7faed08bee1ae8c8dd512a6057f7be7b40fe7fe | 2,002 | py | Python | Module2/Module2-lab5.py | irfanki/EDX | 272e774d00d05647423fa2f77c8526ceaa30109d | [
"MIT"
] | null | null | null | Module2/Module2-lab5.py | irfanki/EDX | 272e774d00d05647423fa2f77c8526ceaa30109d | [
"MIT"
] | null | null | null | Module2/Module2-lab5.py | irfanki/EDX | 272e774d00d05647423fa2f77c8526ceaa30109d | [
"MIT"
] | null | null | null | import pandas as pd
# TODO: Load up the table, and extract the dataset
# out of it. If you're having issues with this, look
# carefully at the sample code provided in the reading
#
# .. your code here ..
df = pd.read_html('http://www.espn.com/nhl/statistics/player/_/stat/points/sort/points/year/2015/seasontype/2', header=1)[0]
print(df.head(5))
# TODO: Rename the columns so that they match the
# column definitions provided to you on the website
#
# .. your code here ..
col_names = ['RK', 'Player', 'Team', 'Games Played', 'Goals', 'Assists',
'Points', 'Plus/Minus Rating', 'Penalty Minutes',
'Points Per Game', 'Shots on Goal', 'Shooting Percentage',
'Game-Winning Goals', 'Power-Play Goals', 'Power-Play Assists',
'Short-Handed Goals', 'Short-Handed Assists']
df.columns = col_names
print(df.head(5))
# TODO: Get rid of any row that has at least 4 NANs in it
#
# .. your code here ..
df = df.dropna(axis=0, thresh=4)
# TODO: At this point, look through your dataset by printing
# it. There probably still are some erroneous rows in there.
# What indexing command(s) can you use to select all rows
# EXCEPT those rows?
#
# .. your code here ..
print(df)
df = df[df.Player != 'PLAYER']
print(df)
# TODO: Get rid of the 'RK' column
#
# .. your code here ..
df = df.drop(labels=['RK'], axis=1)
print(df)
# TODO: Ensure there are no holes in your index by resetting
# it. By the way, don't store the original index
#
# .. your code here ..
df = df.reset_index(drop=True)
# TODO: Check the data type of all columns, and ensure those
# that should be numeric are numeric
print(df.dtypes)
for i in range(2, len(df.columns)):
df.iloc[:, i] = pd.to_numeric(df.iloc[:, i], errors='coerce')
print(df.dtypes)
# TODO: Your dataframe is now ready! Use the appropriate
# commands to answer the questions on the course lab page.
pct_unique = df.iloc[:, 10].unique()
print(pct_unique)
added = df.iloc[15, 2] + df.iloc[16, 2] | 24.414634 | 124 | 0.671329 | import pandas as pd
# carefully at the sample code provided in the reading
#
# .. your code here ..
df = pd.read_html('http://www.espn.com/nhl/statistics/player/_/stat/points/sort/points/year/2015/seasontype/2', header=1)[0]
print(df.head(5))
# TODO: Rename the columns so that they match the
# column definitions provided to you on the website
#
# .. your code here ..
col_names = ['RK', 'Player', 'Team', 'Games Played', 'Goals', 'Assists',
'Points', 'Plus/Minus Rating', 'Penalty Minutes',
'Points Per Game', 'Shots on Goal', 'Shooting Percentage',
'Game-Winning Goals', 'Power-Play Goals', 'Power-Play Assists',
'Short-Handed Goals', 'Short-Handed Assists']
df.columns = col_names
print(df.head(5))
# TODO: Get rid of any row that has at least 4 NANs in it
#
# .. your code here ..
df = df.dropna(axis=0, thresh=4)
# TODO: At this point, look through your dataset by printing
# it. There probably still are some erroneous rows in there.
# What indexing command(s) can you use to select all rows
# EXCEPT those rows?
#
# .. your code here ..
print(df)
df = df[df.Player != 'PLAYER']
print(df)
# TODO: Get rid of the 'RK' column
#
# .. your code here ..
df = df.drop(labels=['RK'], axis=1)
print(df)
# TODO: Ensure there are no holes in your index by resetting
# it. By the way, don't store the original index
df = df.reset_index(drop=True)
print(df.dtypes)
for i in range(2, len(df.columns)):
df.iloc[:, i] = pd.to_numeric(df.iloc[:, i], errors='coerce')
print(df.dtypes)
pct_unique = df.iloc[:, 10].unique()
print(pct_unique)
added = df.iloc[15, 2] + df.iloc[16, 2] | true | true |
f7faed14203e675e6310bac51b542fa36e9887e5 | 1,072 | py | Python | insta/forms.py | alvynah/instagram-clone | 1e945a2bc4b13c9f1d612e40bfbc1d94dca6e6e3 | [
"MIT"
] | 1 | 2021-06-22T16:31:30.000Z | 2021-06-22T16:31:30.000Z | insta/forms.py | alvynah/instagram-clone | 1e945a2bc4b13c9f1d612e40bfbc1d94dca6e6e3 | [
"MIT"
] | null | null | null | insta/forms.py | alvynah/instagram-clone | 1e945a2bc4b13c9f1d612e40bfbc1d94dca6e6e3 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Post,Profile,Comment
class SignUpForm(UserCreationForm):
email = forms.EmailField(max_length=254)
fullname=forms.CharField(max_length=254)
class Meta:
model = User
fields = ('username', 'fullname', 'email', 'password1','password2')
class UploadImageForm(forms.ModelForm):
class Meta:
model = Post
fields = ('image', 'title', 'description')
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
exclude = ['post','user']
class Meta:
model = Comment
fields = ('comment',)
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')
class Meta:
model = User
fields = ('username', 'email')
class UserProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('name', 'bio', 'profile_pic') | 28.210526 | 97 | 0.656716 | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Post,Profile,Comment
class SignUpForm(UserCreationForm):
email = forms.EmailField(max_length=254)
fullname=forms.CharField(max_length=254)
class Meta:
model = User
fields = ('username', 'fullname', 'email', 'password1','password2')
class UploadImageForm(forms.ModelForm):
class Meta:
model = Post
fields = ('image', 'title', 'description')
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
exclude = ['post','user']
class Meta:
model = Comment
fields = ('comment',)
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')
class Meta:
model = User
fields = ('username', 'email')
class UserProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('name', 'bio', 'profile_pic') | true | true |
f7faee016d77c87b813ba04e896bb40ccfc31484 | 2,878 | py | Python | slack_bolt/adapter/aws_lambda/handler.py | misscoded/bolt-python | ed26ea039c37cbd00551e25deac0fb1871c03aed | [
"MIT"
] | 1 | 2020-11-11T19:19:20.000Z | 2020-11-11T19:19:20.000Z | slack_bolt/adapter/aws_lambda/handler.py | misscoded/bolt-python | ed26ea039c37cbd00551e25deac0fb1871c03aed | [
"MIT"
] | null | null | null | slack_bolt/adapter/aws_lambda/handler.py | misscoded/bolt-python | ed26ea039c37cbd00551e25deac0fb1871c03aed | [
"MIT"
] | null | null | null | import base64
import logging
from typing import List, Dict, Any
from slack_bolt.adapter.aws_lambda.internals import _first_value
from slack_bolt.app import App
from slack_bolt.logger import get_bolt_app_logger
from slack_bolt.oauth import OAuthFlow
from slack_bolt.request import BoltRequest
from slack_bolt.response import BoltResponse
class SlackRequestHandler:
def __init__(self, app: App): # type: ignore
self.app = app
self.logger = get_bolt_app_logger(app.name, SlackRequestHandler)
@classmethod
def clear_all_log_handlers(cls):
# https://stackoverflow.com/questions/37703609/using-python-logging-with-aws-lambda
root = logging.getLogger()
if root.handlers:
for handler in root.handlers:
root.removeHandler(handler)
def handle(self, event, context):
self.logger.debug(f"Incoming event: {event}, context: {context}")
method = event.get("requestContext", {}).get("http", {}).get("method", None)
if method is None:
return not_found()
if method == "GET":
if self.app.oauth_flow is not None:
oauth_flow: OAuthFlow = self.app.oauth_flow
bolt_req: BoltRequest = to_bolt_request(event)
query = bolt_req.query
is_callback = query is not None and (
(
_first_value(query, "code") is not None
and _first_value(query, "state") is not None
)
or _first_value(query, "error") is not None
)
if is_callback:
bolt_resp = oauth_flow.handle_callback(bolt_req)
return to_aws_response(bolt_resp)
else:
bolt_resp = oauth_flow.handle_installation(bolt_req)
return to_aws_response(bolt_resp)
elif method == "POST":
bolt_req = to_bolt_request(event)
bolt_resp = self.app.dispatch(bolt_req)
aws_response = to_aws_response(bolt_resp)
return aws_response
return not_found()
def to_bolt_request(event) -> BoltRequest:
body = event.get("body", "")
if event["isBase64Encoded"]:
body = base64.b64decode(body).decode("utf-8")
cookies: List[str] = event.get("cookies", [])
headers = event.get("headers", {})
headers["cookie"] = cookies
return BoltRequest(
body=body, query=event.get("queryStringParameters", {}), headers=headers,
)
def to_aws_response(resp: BoltResponse) -> Dict[str, Any]:
return {
"statusCode": resp.status,
"body": resp.body,
"headers": resp.first_headers(),
}
def not_found() -> Dict[str, Any]:
return {
"statusCode": 404,
"body": "Not Found",
"headers": {},
}
| 33.858824 | 91 | 0.602154 | import base64
import logging
from typing import List, Dict, Any
from slack_bolt.adapter.aws_lambda.internals import _first_value
from slack_bolt.app import App
from slack_bolt.logger import get_bolt_app_logger
from slack_bolt.oauth import OAuthFlow
from slack_bolt.request import BoltRequest
from slack_bolt.response import BoltResponse
class SlackRequestHandler:
def __init__(self, app: App):
self.app = app
self.logger = get_bolt_app_logger(app.name, SlackRequestHandler)
@classmethod
def clear_all_log_handlers(cls):
root = logging.getLogger()
if root.handlers:
for handler in root.handlers:
root.removeHandler(handler)
def handle(self, event, context):
self.logger.debug(f"Incoming event: {event}, context: {context}")
method = event.get("requestContext", {}).get("http", {}).get("method", None)
if method is None:
return not_found()
if method == "GET":
if self.app.oauth_flow is not None:
oauth_flow: OAuthFlow = self.app.oauth_flow
bolt_req: BoltRequest = to_bolt_request(event)
query = bolt_req.query
is_callback = query is not None and (
(
_first_value(query, "code") is not None
and _first_value(query, "state") is not None
)
or _first_value(query, "error") is not None
)
if is_callback:
bolt_resp = oauth_flow.handle_callback(bolt_req)
return to_aws_response(bolt_resp)
else:
bolt_resp = oauth_flow.handle_installation(bolt_req)
return to_aws_response(bolt_resp)
elif method == "POST":
bolt_req = to_bolt_request(event)
bolt_resp = self.app.dispatch(bolt_req)
aws_response = to_aws_response(bolt_resp)
return aws_response
return not_found()
def to_bolt_request(event) -> BoltRequest:
body = event.get("body", "")
if event["isBase64Encoded"]:
body = base64.b64decode(body).decode("utf-8")
cookies: List[str] = event.get("cookies", [])
headers = event.get("headers", {})
headers["cookie"] = cookies
return BoltRequest(
body=body, query=event.get("queryStringParameters", {}), headers=headers,
)
def to_aws_response(resp: BoltResponse) -> Dict[str, Any]:
return {
"statusCode": resp.status,
"body": resp.body,
"headers": resp.first_headers(),
}
def not_found() -> Dict[str, Any]:
return {
"statusCode": 404,
"body": "Not Found",
"headers": {},
}
| true | true |
f7faee6cb336c35495e1fcc4fe3a7778899c6353 | 1,090 | py | Python | run.py | Licht-T/Ant-Cuda | 048d99bce4e018cabe2e51853e5fc22dfa476235 | [
"MIT"
] | null | null | null | run.py | Licht-T/Ant-Cuda | 048d99bce4e018cabe2e51853e5fc22dfa476235 | [
"MIT"
] | null | null | null | run.py | Licht-T/Ant-Cuda | 048d99bce4e018cabe2e51853e5fc22dfa476235 | [
"MIT"
] | 2 | 2020-07-24T14:05:49.000Z | 2021-02-03T08:11:31.000Z | import os
import subprocess
import argparse
MAKE_CMD = 'make'
FLAG_WIN = False
ARG_PARSER = argparse.ArgumentParser(
description=(
'A foraging ants multi-agent simulation software.\n'
'The result is output into subdirectories.'
)
)
ARG_PARSER.add_argument(
'--angle', type=int, required=True,
help='Relative angle between two food resources $\\theta$.'
)
ARG_PARSER.add_argument(
'--dist', type=int, required=True,
help='Distance between the nest and each food $R$.'
)
try:
os.uname()
except AttributeError:
FLAG_WIN = True
if FLAG_WIN:
MAKE_CMD = 'mingw32-make'
if __name__ == '__main__':
parsed_args = ARG_PARSER.parse_args()
angle = parsed_args.angle
dist = parsed_args.dist
print('{0}dist, {1}deg. compiling.'.format(dist, angle))
make_args = [MAKE_CMD, 'ANGLE='+str(angle), 'DIST='+str(dist)]
subprocess.call(make_args)
print('{0}dist, {1}deg. started.'.format(dist, angle))
subprocess.call('./{0}dist_{1}deg.exe'.format(dist, angle))
print('{0}dist, {1}deg. ended.'.format(dist, angle))
| 24.222222 | 66 | 0.670642 | import os
import subprocess
import argparse
MAKE_CMD = 'make'
FLAG_WIN = False
ARG_PARSER = argparse.ArgumentParser(
description=(
'A foraging ants multi-agent simulation software.\n'
'The result is output into subdirectories.'
)
)
ARG_PARSER.add_argument(
'--angle', type=int, required=True,
help='Relative angle between two food resources $\\theta$.'
)
ARG_PARSER.add_argument(
'--dist', type=int, required=True,
help='Distance between the nest and each food $R$.'
)
try:
os.uname()
except AttributeError:
FLAG_WIN = True
if FLAG_WIN:
MAKE_CMD = 'mingw32-make'
if __name__ == '__main__':
parsed_args = ARG_PARSER.parse_args()
angle = parsed_args.angle
dist = parsed_args.dist
print('{0}dist, {1}deg. compiling.'.format(dist, angle))
make_args = [MAKE_CMD, 'ANGLE='+str(angle), 'DIST='+str(dist)]
subprocess.call(make_args)
print('{0}dist, {1}deg. started.'.format(dist, angle))
subprocess.call('./{0}dist_{1}deg.exe'.format(dist, angle))
print('{0}dist, {1}deg. ended.'.format(dist, angle))
| true | true |
f7faf01f357e232e66511bc4071fb2b5ac5bd549 | 88 | py | Python | dglearn/learning/search/__init__.py | syanga/dcglearn | c3faaed7e04431c86649512813c17968de6803eb | [
"MIT"
] | 5 | 2020-07-19T00:11:20.000Z | 2021-11-23T16:28:49.000Z | dglearn/learning/search/__init__.py | syanga/dcglearn | c3faaed7e04431c86649512813c17968de6803eb | [
"MIT"
] | null | null | null | dglearn/learning/search/__init__.py | syanga/dcglearn | c3faaed7e04431c86649512813c17968de6803eb | [
"MIT"
] | null | null | null | from .tabu import *
from .hill_climb import *
from .beam import *
from .virtual import * | 22 | 25 | 0.738636 | from .tabu import *
from .hill_climb import *
from .beam import *
from .virtual import * | true | true |
f7faf156a343a8a8ce188db9653163be500bcd20 | 926 | py | Python | coworker/place/migrations/0014_auto_20170922_1940.py | flybackl/spacesmap | 859f83e564af2c93f506b0fedd1397152e1ccfab | [
"MIT"
] | null | null | null | coworker/place/migrations/0014_auto_20170922_1940.py | flybackl/spacesmap | 859f83e564af2c93f506b0fedd1397152e1ccfab | [
"MIT"
] | null | null | null | coworker/place/migrations/0014_auto_20170922_1940.py | flybackl/spacesmap | 859f83e564af2c93f506b0fedd1397152e1ccfab | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-22 19:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('place', '0013_place_city'),
]
operations = [
migrations.RemoveField(
model_name='place',
name='cs_extra_description',
),
migrations.RemoveField(
model_name='place',
name='meeting_room_number',
),
migrations.AlterField(
model_name='place',
name='space_name',
field=models.CharField(max_length=250, verbose_name='创客云图场地的名称'),
),
migrations.AlterField(
model_name='place',
name='user_type',
field=models.CharField(choices=[('ot', '官方团队'), ('cm', '新会员'), ('pm', '老会员')], default='ot', max_length=2),
),
]
| 27.235294 | 119 | 0.564795 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('place', '0013_place_city'),
]
operations = [
migrations.RemoveField(
model_name='place',
name='cs_extra_description',
),
migrations.RemoveField(
model_name='place',
name='meeting_room_number',
),
migrations.AlterField(
model_name='place',
name='space_name',
field=models.CharField(max_length=250, verbose_name='创客云图场地的名称'),
),
migrations.AlterField(
model_name='place',
name='user_type',
field=models.CharField(choices=[('ot', '官方团队'), ('cm', '新会员'), ('pm', '老会员')], default='ot', max_length=2),
),
]
| true | true |
f7faf26c9813e565a4c581c923daa96e433628cf | 442 | py | Python | gis/urls.py | thaiseerp/Live-Events-Earthquake-map-using-in-Google-Maps-using-Django | 9359b36275cfb60619f2dbcfdd1a83cdbac6ef1f | [
"MIT"
] | null | null | null | gis/urls.py | thaiseerp/Live-Events-Earthquake-map-using-in-Google-Maps-using-Django | 9359b36275cfb60619f2dbcfdd1a83cdbac6ef1f | [
"MIT"
] | null | null | null | gis/urls.py | thaiseerp/Live-Events-Earthquake-map-using-in-Google-Maps-using-Django | 9359b36275cfb60619f2dbcfdd1a83cdbac6ef1f | [
"MIT"
] | null | null | null | """
Author: Thaiseer Parammal
"""
from django.conf.urls import include, url
from django.contrib import admin
from earthquake import views as main_view
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', main_view.home, name='home'),
url(r'^earthquake/', include('earthquake.urls')),
url(r'^landslide/', main_view.landslide, name='landslide'),
url(r'^forest_fire/', main_view.forest_fire, name='forest_fire'),
]
| 27.625 | 69 | 0.69457 |
from django.conf.urls import include, url
from django.contrib import admin
from earthquake import views as main_view
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', main_view.home, name='home'),
url(r'^earthquake/', include('earthquake.urls')),
url(r'^landslide/', main_view.landslide, name='landslide'),
url(r'^forest_fire/', main_view.forest_fire, name='forest_fire'),
]
| true | true |
f7faf28fba1854238d467135bb0491f3fe865c72 | 246 | py | Python | acmicpc/3009.py | juseongkr/BOJ | 8f10a2bf9a7d695455493fbe7423347a8b648416 | [
"Apache-2.0"
] | 7 | 2020-02-03T10:00:19.000Z | 2021-11-16T11:03:57.000Z | acmicpc/3009.py | juseongkr/Algorithm-training | 8f10a2bf9a7d695455493fbe7423347a8b648416 | [
"Apache-2.0"
] | 1 | 2021-01-03T06:58:24.000Z | 2021-01-03T06:58:24.000Z | acmicpc/3009.py | juseongkr/Algorithm-training | 8f10a2bf9a7d695455493fbe7423347a8b648416 | [
"Apache-2.0"
] | 1 | 2020-01-22T14:34:03.000Z | 2020-01-22T14:34:03.000Z | x1, y1 = input().split()
x2, y2 = input().split()
x3, y3 = input().split()
x4, y4 = 0, 0
if x1 == x2:
x4 = x3
elif x1 == x3:
x4 = x2
else:
x4 = x1
if y1 == y2:
y4 = y3
elif y1 == y3:
y4 = y2
else:
y4 = y1
print(x4+' '+y4)
| 13.666667 | 24 | 0.47561 | x1, y1 = input().split()
x2, y2 = input().split()
x3, y3 = input().split()
x4, y4 = 0, 0
if x1 == x2:
x4 = x3
elif x1 == x3:
x4 = x2
else:
x4 = x1
if y1 == y2:
y4 = y3
elif y1 == y3:
y4 = y2
else:
y4 = y1
print(x4+' '+y4)
| true | true |
f7faf2b1cfa8c3091a8c0e1121e22c1259604d14 | 284,685 | py | Python | madgraph/interface/common_run_interface.py | Ceebs93/madgraph | 82340bc97bcda1f510598d9f3d7819e22cb922f9 | [
"NCSA"
] | 5 | 2018-10-23T14:37:18.000Z | 2021-11-22T20:59:02.000Z | madgraph/interface/common_run_interface.py | Ceebs93/madgraph | 82340bc97bcda1f510598d9f3d7819e22cb922f9 | [
"NCSA"
] | 2 | 2021-01-01T19:42:09.000Z | 2022-03-12T12:22:02.000Z | madgraph/interface/common_run_interface.py | Ceebs93/madgraph | 82340bc97bcda1f510598d9f3d7819e22cb922f9 | [
"NCSA"
] | 4 | 2019-02-18T11:42:18.000Z | 2021-11-11T20:46:08.000Z | ###############################################################################
#
# Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors
#
# This file is a part of the MadGraph5_aMC@NLO project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph5_aMC@NLO license which should accompany this
# distribution.
#
# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch
#
################################################################################
"""A user friendly command line interface to access MadGraph5_aMC@NLO features.
Uses the cmd package for command interpretation and tab completion.
"""
from __future__ import division
import ast
import logging
import os
import re
import shutil
import signal
import stat
import subprocess
import sys
import time
import traceback
import urllib
import glob
import StringIO
try:
import readline
GNU_SPLITTING = ('GNU' in readline.__doc__)
except:
GNU_SPLITTING = True
root_path = os.path.split(os.path.dirname(os.path.realpath( __file__ )))[0]
root_path = os.path.split(root_path)[0]
sys.path.insert(0, os.path.join(root_path,'bin'))
# usefull shortcut
pjoin = os.path.join
# Special logger for the Cmd Interface
logger = logging.getLogger('madgraph.stdout') # -> stdout
logger_stderr = logging.getLogger('madgraph.stderr') # ->stderr
try:
import madgraph
except ImportError:
# import from madevent directory
import internal.extended_cmd as cmd
import internal.banner as banner_mod
import internal.shower_card as shower_card_mod
import internal.misc as misc
import internal.cluster as cluster
import internal.check_param_card as check_param_card
import internal.files as files
# import internal.histograms as histograms # imported later to not slow down the loading of the code
import internal.save_load_object as save_load_object
import internal.gen_crossxhtml as gen_crossxhtml
import internal.lhe_parser as lhe_parser
import internal.FO_analyse_card as FO_analyse_card
import internal.sum_html as sum_html
from internal import InvalidCmd, MadGraph5Error
MADEVENT=True
else:
# import from madgraph directory
import madgraph.interface.extended_cmd as cmd
import madgraph.various.banner as banner_mod
import madgraph.various.shower_card as shower_card_mod
import madgraph.various.misc as misc
import madgraph.iolibs.files as files
import madgraph.various.cluster as cluster
import madgraph.various.lhe_parser as lhe_parser
import madgraph.various.FO_analyse_card as FO_analyse_card
import madgraph.iolibs.save_load_object as save_load_object
import madgraph.madevent.gen_crossxhtml as gen_crossxhtml
import models.check_param_card as check_param_card
import madgraph.madevent.sum_html as sum_html
# import madgraph.various.histograms as histograms # imported later to not slow down the loading of the code
from madgraph import InvalidCmd, MadGraph5Error, MG5DIR
MADEVENT=False
#===============================================================================
# HelpToCmd
#===============================================================================
class HelpToCmd(object):
""" The Series of help routins in common between amcatnlo_run and
madevent interface"""
def help_treatcards(self):
logger.info("syntax: treatcards [param|run] [--output_dir=] [--param_card=] [--run_card=]")
logger.info("-- create the .inc files containing the cards information." )
def help_set(self):
logger.info("syntax: set %s argument" % "|".join(self._set_options))
logger.info("-- set options")
logger.info(" stdout_level DEBUG|INFO|WARNING|ERROR|CRITICAL")
logger.info(" change the default level for printed information")
logger.info(" timeout VALUE")
logger.info(" (default 20) Seconds allowed to answer questions.")
logger.info(" Note that pressing tab always stops the timer.")
logger.info(" cluster_temp_path PATH")
logger.info(" (default None) Allow to perform the run in PATH directory")
logger.info(" This allow to not run on the central disk. This is not used")
logger.info(" by condor cluster (since condor has it's own way to prevent it).")
def help_plot(self):
logger.info("syntax: plot [RUN] [%s] [-f]" % '|'.join(self._plot_mode))
logger.info("-- create the plot for the RUN (current run by default)")
logger.info(" at the different stage of the event generation")
logger.info(" Note than more than one mode can be specified in the same command.")
logger.info(" This requires to have MadAnalysis and td installed.")
logger.info(" -f options: answer all question by default.")
def help_compute_widths(self):
logger.info("syntax: compute_widths Particle [Particles] [OPTIONS]")
logger.info("-- Compute the widths for the particles specified.")
logger.info(" By default, this takes the current param_card and overwrites it.")
logger.info(" Precision allows to define when to include three/four/... body decays (LO).")
logger.info(" If this number is an integer then all N-body decay will be included.")
logger.info(" Various options:\n")
logger.info(" --body_decay=X: Parameter to control the precision of the computation")
logger.info(" if X is an integer, we compute all channels up to X-body decay.")
logger.info(" if X <1, then we stop when the estimated error is lower than X.")
logger.info(" if X >1 BUT not an integer, then we X = N + M, with M <1 and N an integer")
logger.info(" We then either stop at the N-body decay or when the estimated error is lower than M.")
logger.info(" default: 4.0025")
logger.info(" --min_br=X: All channel which are estimated below this value will not be integrated numerically.")
logger.info(" default: precision (decimal part of the body_decay options) divided by four")
logger.info(" --precision_channel=X: requested numerical precision for each channel")
logger.info(" default: 0.01")
logger.info(" --path=X: path for param_card")
logger.info(" default: take value from the model")
logger.info(" --output=X: path where to write the resulting card. ")
logger.info(" default: overwrite input file. If no input file, write it in the model directory")
logger.info(" --nlo: Compute NLO width [if the model support it]")
def help_shower(self):
logger.info("syntax: shower [shower_name] [shower_options]")
logger.info("-- This is equivalent to running '[shower_name] [shower_options]'")
def help_pgs(self):
logger.info("syntax: pgs [RUN] [--run_options]")
logger.info("-- run pgs on RUN (current one by default)")
self.run_options_help([('-f','answer all question by default'),
('--tag=', 'define the tag for the pgs run'),
('--no_default', 'not run if pgs_card not present')])
def help_delphes(self):
logger.info("syntax: delphes [RUN] [--run_options]")
logger.info("-- run delphes on RUN (current one by default)")
self.run_options_help([('-f','answer all question by default'),
('--tag=', 'define the tag for the delphes run'),
('--no_default', 'not run if delphes_card not present')])
def help_decay_events(self, skip_syntax=False):
if not skip_syntax:
logger.info("syntax: decay_events [RUN]")
logger.info("This functionality allows for the decay of resonances")
logger.info("in a .lhe file, keeping track of the spin correlation effets.")
logger.info("BE AWARE OF THE CURRENT LIMITATIONS:")
logger.info(" (1) Only a succession of 2 body decay are currently allowed")
class CheckValidForCmd(object):
""" The Series of check routines in common between amcatnlo_run and
madevent interface"""
def check_set(self, args):
""" check the validity of the line"""
if len(args) < 2:
if len(args)==1 and "=" in args[0]:
args[:] = args[0].split("=",1)
else:
self.help_set()
raise self.InvalidCmd('set needs an option and an argument')
if args[0] not in self._set_options + self.options.keys():
self.help_set()
raise self.InvalidCmd('Possible options for set are %s' % \
(self._set_options+self.options.keys()))
if args[0] in ['stdout_level']:
if args[1] not in ['DEBUG','INFO','WARNING','ERROR','CRITICAL'] \
and not args[1].isdigit():
raise self.InvalidCmd('output_level needs ' + \
'a valid level')
if args[0] in ['timeout']:
if not args[1].isdigit():
raise self.InvalidCmd('timeout values should be a integer')
def check_compute_widths(self, args):
"""check that the model is loadable and check that the format is of the
type: PART PATH --output=PATH -f --precision=N
return the model.
"""
# Check that MG5 directory is present .
if MADEVENT and not self.options['mg5_path']:
raise self.InvalidCmd, '''The automatic computations of widths requires that MG5 is installed on the system.
You can install it and set his path in ./Cards/me5_configuration.txt'''
elif MADEVENT:
sys.path.append(self.options['mg5_path'])
try:
import models.model_reader as model_reader
import models.import_ufo as import_ufo
except ImportError:
raise self.ConfigurationError, '''Can\'t load MG5.
The variable mg5_path should not be correctly configure.'''
ufo_path = pjoin(self.me_dir,'bin','internal', 'ufomodel')
# Import model
if not MADEVENT:
modelname = self.find_model_name()
#restrict_file = None
#if os.path.exists(pjoin(ufo_path, 'restrict_default.dat')):
# restrict_file = pjoin(ufo_path, 'restrict_default.dat')
force_CMS = self.mother and self.mother.options['complex_mass_scheme']
model = import_ufo.import_model(modelname, decay=True,
restrict=True, complex_mass_scheme=force_CMS)
else:
force_CMS = self.proc_characteristics['complex_mass_scheme']
model = import_ufo.import_model(pjoin(self.me_dir,'bin','internal',
'ufomodel'), decay=True, complex_mass_scheme=force_CMS)
# if not hasattr(model.get('particles')[0], 'partial_widths'):
# raise self.InvalidCmd, 'The UFO model does not include partial widths information. Impossible to compute widths automatically'
# check if the name are passed to default MG5
if '-modelname' not in open(pjoin(self.me_dir,'Cards','proc_card_mg5.dat')).read():
model.pass_particles_name_in_mg_default()
model = model_reader.ModelReader(model)
particles_name = dict([(p.get('name'), p.get('pdg_code'))
for p in model.get('particles')])
particles_name.update(dict([(p.get('antiname'), p.get('pdg_code'))
for p in model.get('particles')]))
output = {'model': model, 'force': False, 'output': None,
'path':None, 'particles': set(), 'body_decay':4.0025,
'min_br':None, 'precision_channel':0.01}
for arg in args:
if arg.startswith('--output='):
output_path = arg.split('=',1)[1]
if not os.path.exists(output_path):
raise self.InvalidCmd, 'Invalid Path for the output. Please retry.'
if not os.path.isfile(output_path):
output_path = pjoin(output_path, 'param_card.dat')
output['output'] = output_path
elif arg == '-f':
output['force'] = True
elif os.path.isfile(arg):
ftype = self.detect_card_type(arg)
if ftype != 'param_card.dat':
raise self.InvalidCmd , '%s is not a valid param_card.' % arg
output['path'] = arg
elif arg.startswith('--path='):
arg = arg.split('=',1)[1]
ftype = self.detect_card_type(arg)
if ftype != 'param_card.dat':
raise self.InvalidCmd , '%s is not a valid param_card.' % arg
output['path'] = arg
elif arg.startswith('--'):
if "=" in arg:
name, value = arg.split('=',1)
try:
value = float(value)
except Exception:
raise self.InvalidCmd, '--%s requires integer or a float' % name
output[name[2:]] = float(value)
elif arg == "--nlo":
output["nlo"] = True
elif arg in particles_name:
# should be a particles
output['particles'].add(particles_name[arg])
elif arg.isdigit() and int(arg) in particles_name.values():
output['particles'].add(ast.literal_eval(arg))
elif arg == 'all':
output['particles'] = set(['all'])
else:
self.help_compute_widths()
raise self.InvalidCmd, '%s is not a valid argument for compute_widths' % arg
if self.force:
output['force'] = True
if not output['particles']:
raise self.InvalidCmd, '''This routines requires at least one particle in order to compute
the related width'''
if output['output'] is None:
output['output'] = output['path']
return output
def check_delphes(self, arg, nodefault=False):
"""Check the argument for pythia command
syntax: delphes [NAME]
Note that other option are already remove at this point
"""
# If not pythia-pgs path
if not self.options['delphes_path']:
logger.info('Retry to read configuration file to find delphes path')
self.set_configuration()
if not self.options['delphes_path']:
error_msg = 'No valid Delphes path set.\n'
error_msg += 'Please use the set command to define the path and retry.\n'
error_msg += 'You can also define it in the configuration file.\n'
raise self.InvalidCmd(error_msg)
tag = [a for a in arg if a.startswith('--tag=')]
if tag:
arg.remove(tag[0])
tag = tag[0][6:]
if len(arg) == 0 and not self.run_name:
if self.results.lastrun:
arg.insert(0, self.results.lastrun)
else:
raise self.InvalidCmd('No run name currently define. Please add this information.')
if len(arg) == 1 and self.run_name == arg[0]:
arg.pop(0)
filepath = None
if not len(arg):
prev_tag = self.set_run_name(self.run_name, tag, 'delphes')
paths = [pjoin(self.me_dir,'Events',self.run_name, '%(tag)s_pythia_events.hep.gz'),
pjoin(self.me_dir,'Events',self.run_name, '%(tag)s_pythia8_events.hepmc.gz'),
pjoin(self.me_dir,'Events',self.run_name, '%(tag)s_pythia_events.hep'),
pjoin(self.me_dir,'Events',self.run_name, '%(tag)s_pythia8_events.hepmc'),
pjoin(self.me_dir,'Events','pythia_events.hep'),
pjoin(self.me_dir,'Events','pythia_events.hepmc'),
pjoin(self.me_dir,'Events','pythia8_events.hep.gz'),
pjoin(self.me_dir,'Events','pythia8_events.hepmc.gz')
]
for p in paths:
if os.path.exists(p % {'tag': prev_tag}):
filepath = p % {'tag': prev_tag}
break
else:
a = raw_input("NO INPUT")
if nodefault:
return False
else:
self.help_pgs()
raise self.InvalidCmd('''No file file pythia_events.* currently available
Please specify a valid run_name''')
if len(arg) == 1:
prev_tag = self.set_run_name(arg[0], tag, 'delphes')
if os.path.exists(pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep.gz' % prev_tag)):
filepath = pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep.gz' % prev_tag)
elif os.path.exists(pjoin(self.me_dir,'Events',self.run_name, '%s_pythia8_events.hepmc.gz' % prev_tag)):
filepath = pjoin(self.me_dir,'Events',self.run_name, '%s_pythia8_events.hepmc.gz' % prev_tag)
elif os.path.exists(pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep' % prev_tag)):
filepath = pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep.gz' % prev_tag)
elif os.path.exists(pjoin(self.me_dir,'Events',self.run_name, '%s_pythia8_events.hepmc' % prev_tag)):
filepath = pjoin(self.me_dir,'Events',self.run_name, '%s_pythia8_events.hepmc.gz' % prev_tag)
else:
raise self.InvalidCmd('No events file corresponding to %s run with tag %s.:%s '\
% (self.run_name, prev_tag,
pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep.gz' % prev_tag)))
else:
if tag:
self.run_card['run_tag'] = tag
self.set_run_name(self.run_name, tag, 'delphes')
return filepath
def check_open(self, args):
""" check the validity of the line """
if len(args) != 1:
self.help_open()
raise self.InvalidCmd('OPEN command requires exactly one argument')
if args[0].startswith('./'):
if not os.path.isfile(args[0]):
raise self.InvalidCmd('%s: not such file' % args[0])
return True
# if special : create the path.
if not self.me_dir:
if not os.path.isfile(args[0]):
self.help_open()
raise self.InvalidCmd('No MadEvent path defined. Unable to associate this name to a file')
else:
return True
path = self.me_dir
if os.path.isfile(os.path.join(path,args[0])):
args[0] = os.path.join(path,args[0])
elif os.path.isfile(os.path.join(path,'Cards',args[0])):
args[0] = os.path.join(path,'Cards',args[0])
elif os.path.isfile(os.path.join(path,'HTML',args[0])):
args[0] = os.path.join(path,'HTML',args[0])
# special for card with _default define: copy the default and open it
elif '_card.dat' in args[0]:
name = args[0].replace('_card.dat','_card_default.dat')
if os.path.isfile(os.path.join(path,'Cards', name)):
files.cp(os.path.join(path,'Cards', name), os.path.join(path,'Cards', args[0]))
args[0] = os.path.join(path,'Cards', args[0])
else:
raise self.InvalidCmd('No default path for this file')
elif not os.path.isfile(args[0]):
raise self.InvalidCmd('No default path for this file')
def check_treatcards(self, args):
"""check that treatcards arguments are valid
[param|run|all] [--output_dir=] [--param_card=] [--run_card=]
"""
opt = {'output_dir':pjoin(self.me_dir,'Source'),
'param_card':pjoin(self.me_dir,'Cards','param_card.dat'),
'run_card':pjoin(self.me_dir,'Cards','run_card.dat')}
mode = 'all'
for arg in args:
if arg.startswith('--') and '=' in arg:
key,value =arg[2:].split('=',1)
if not key in opt:
self.help_treatcards()
raise self.InvalidCmd('Invalid option for treatcards command:%s ' \
% key)
if key in ['param_card', 'run_card']:
if os.path.isfile(value):
card_name = self.detect_card_type(value)
if card_name != key:
raise self.InvalidCmd('Format for input file detected as %s while expecting %s'
% (card_name, key))
opt[key] = value
elif os.path.isfile(pjoin(self.me_dir,value)):
card_name = self.detect_card_type(pjoin(self.me_dir,value))
if card_name != key:
raise self.InvalidCmd('Format for input file detected as %s while expecting %s'
% (card_name, key))
opt[key] = value
else:
raise self.InvalidCmd('No such file: %s ' % value)
elif key in ['output_dir']:
if os.path.isdir(value):
opt[key] = value
elif os.path.isdir(pjoin(self.me_dir,value)):
opt[key] = pjoin(self.me_dir, value)
else:
raise self.InvalidCmd('No such directory: %s' % value)
elif arg in ['MadLoop','param','run','all']:
mode = arg
else:
self.help_treatcards()
raise self.InvalidCmd('Unvalid argument %s' % arg)
return mode, opt
def check_decay_events(self,args):
"""Check the argument for decay_events command
syntax is "decay_events [NAME]"
Note that other option are already remove at this point
"""
opts = []
if '-from_cards' in args:
args.remove('-from_cards')
opts.append('-from_cards')
if len(args) == 0:
if self.run_name:
args.insert(0, self.run_name)
elif self.results.lastrun:
args.insert(0, self.results.lastrun)
else:
raise self.InvalidCmd('No run name currently defined. Please add this information.')
return
if args[0] != self.run_name:
self.set_run_name(args[0])
args[0] = self.get_events_path(args[0])
args += opts
def check_check_events(self,args):
"""Check the argument for decay_events command
syntax is "decay_events [NAME]"
Note that other option are already remove at this point
"""
if len(args) == 0:
if self.run_name:
args.insert(0, self.run_name)
elif self.results.lastrun:
args.insert(0, self.results.lastrun)
else:
raise self.InvalidCmd('No run name currently defined. Please add this information.')
return
if args[0] and os.path.isfile(args[0]):
pass
else:
if args[0] != self.run_name:
self.set_run_name(args[0], allow_new_tag=False)
args[0] = self.get_events_path(args[0])
def get_events_path(self, run_name):
"""return the path to the output events
"""
if self.mode == 'madevent':
possible_path = [
pjoin(self.me_dir,'Events', run_name, 'unweighted_events.lhe.gz'),
pjoin(self.me_dir,'Events', run_name, 'unweighted_events.lhe')]
else:
possible_path = [
pjoin(self.me_dir,'Events', run_name, 'events.lhe.gz'),
pjoin(self.me_dir,'Events', run_name, 'events.lhe')]
for path in possible_path:
if os.path.exists(path):
correct_path = path
break
else:
if os.path.exists(run_name):
correct_path = run_name
else:
raise self.InvalidCmd('No events file corresponding to %s run. ' % run_name)
return correct_path
class MadEventAlreadyRunning(InvalidCmd):
pass
class AlreadyRunning(MadEventAlreadyRunning):
pass
#===============================================================================
# CommonRunCmd
#===============================================================================
class CommonRunCmd(HelpToCmd, CheckValidForCmd, cmd.Cmd):
debug_output = 'ME5_debug'
helporder = ['Main Commands', 'Documented commands', 'Require MG5 directory',
'Advanced commands']
sleep_for_error = True
# The three options categories are treated on a different footage when a
# set/save configuration occur. current value are kept in self.options
options_configuration = {'pythia8_path': './pythia8',
'hwpp_path': './herwigPP',
'thepeg_path': './thepeg',
'hepmc_path': './hepmc',
'madanalysis_path': './MadAnalysis',
'madanalysis5_path': './HEPTools/madanalysis5',
'pythia-pgs_path':'./pythia-pgs',
'td_path':'./td',
'delphes_path':'./Delphes',
'exrootanalysis_path':'./ExRootAnalysis',
'syscalc_path': './SysCalc',
'lhapdf': 'lhapdf-config',
'timeout': 60,
'f2py_compiler':None,
'web_browser':None,
'eps_viewer':None,
'text_editor':None,
'fortran_compiler':None,
'cpp_compiler': None,
'auto_update':7,
'cluster_type': 'condor',
'cluster_status_update': (600, 30),
'cluster_nb_retry':1,
'cluster_local_path': None,
'cluster_retry_wait':300}
options_madgraph= {'stdout_level':None}
options_madevent = {'automatic_html_opening':True,
'notification_center':True,
'run_mode':2,
'cluster_queue':None,
'cluster_time':None,
'cluster_size':100,
'cluster_memory':None,
'nb_core': None,
'cluster_temp_path':None}
def __init__(self, me_dir, options, *args, **opts):
"""common"""
self.force_run = False # this flag force the run even if RunWeb is present
if 'force_run' in opts and opts['force_run']:
self.force_run = True
del opts['force_run']
cmd.Cmd.__init__(self, *args, **opts)
# Define current MadEvent directory
if me_dir is None and MADEVENT:
me_dir = root_path
if os.path.isabs(me_dir):
self.me_dir = me_dir
else:
self.me_dir = pjoin(os.getcwd(),me_dir)
self.options = options
self.param_card_iterator = [] #an placeholder containing a generator of paramcard for scanning
# usefull shortcut
self.status = pjoin(self.me_dir, 'status')
self.error = pjoin(self.me_dir, 'error')
self.dirbin = pjoin(self.me_dir, 'bin', 'internal')
# Check that the directory is not currently running_in_idle
if not self.force_run:
if os.path.exists(pjoin(me_dir,'RunWeb')):
message = '''Another instance of the program is currently running.
(for this exact same directory) Please wait that this is instance is
closed. If no instance is running, you can delete the file
%s and try again.''' % pjoin(me_dir,'RunWeb')
raise AlreadyRunning, message
else:
pid = os.getpid()
fsock = open(pjoin(me_dir,'RunWeb'),'w')
fsock.write(`pid`)
fsock.close()
self.gen_card_html()
self.to_store = []
self.run_name = None
self.run_tag = None
self.banner = None
# Load the configuration file
self.set_configuration()
self.configure_run_mode(self.options['run_mode'])
# Define self.proc_characteristics
self.get_characteristics()
if not self.proc_characteristics['ninitial']:
# Get number of initial states
nexternal = open(pjoin(self.me_dir,'Source','nexternal.inc')).read()
found = re.search("PARAMETER\s*\(NINCOMING=(\d)\)", nexternal)
self.ninitial = int(found.group(1))
else:
self.ninitial = self.proc_characteristics['ninitial']
def make_make_all_html_results(self, folder_names = [], jobs=[]):
return sum_html.make_all_html_results(self, folder_names, jobs)
############################################################################
def split_arg(self, line, error=False):
"""split argument and remove run_options"""
args = cmd.Cmd.split_arg(line)
for arg in args[:]:
if not arg.startswith('-'):
continue
elif arg == '-c':
self.configure_run_mode(1)
elif arg == '-m':
self.configure_run_mode(2)
elif arg == '-f':
self.force = True
elif not arg.startswith('--'):
if error:
raise self.InvalidCmd('%s argument cannot start with - symbol' % arg)
else:
continue
elif arg.startswith('--cluster'):
self.configure_run_mode(1)
elif arg.startswith('--multicore'):
self.configure_run_mode(2)
elif arg.startswith('--nb_core'):
self.options['nb_core'] = int(arg.split('=',1)[1])
self.configure_run_mode(2)
elif arg.startswith('--web'):
self.pass_in_web_mode()
self.configure_run_mode(1)
else:
continue
args.remove(arg)
return args
@misc.multiple_try(nb_try=5, sleep=2)
def load_results_db(self):
"""load the current results status"""
# load the current status of the directory
if os.path.exists(pjoin(self.me_dir,'HTML','results.pkl')):
try:
self.results = save_load_object.load_from_file(pjoin(self.me_dir,'HTML','results.pkl'))
except Exception:
#the pickle fail -> need to recreate the library
model = self.find_model_name()
process = self.process # define in find_model_name
self.results = gen_crossxhtml.AllResults(model, process, self.me_dir)
self.results.resetall(self.me_dir)
else:
try:
self.results.resetall(self.me_dir)
except Exception, error:
logger.debug(error)
# Maybe the format was updated -> try fresh
model = self.find_model_name()
process = self.process # define in find_model_name
self.results = gen_crossxhtml.AllResults(model, process, self.me_dir)
self.results.resetall(self.me_dir)
self.last_mode = ''
try:
self.last_mode = self.results[self.results.lastrun][-1]['run_mode']
except:
self.results.resetall(self.me_dir)
self.last_mode = ''
else:
model = self.find_model_name()
process = self.process # define in find_model_name
self.results = gen_crossxhtml.AllResults(model, process, self.me_dir)
self.results.resetall(self.me_dir)
self.last_mode=''
return self.results
############################################################################
def do_treatcards(self, line, amcatnlo=False):
"""Advanced commands: create .inc files from param_card.dat/run_card.dat"""
#ensure that the cluster/card are consistent
if hasattr(self, 'run_card'):
self.cluster.modify_interface(self)
else:
try:
self.cluster.modify_interface(self)
except Exception, error:
misc.sprint(str(error))
keepwidth = False
if '--keepwidth' in line:
keepwidth = True
line = line.replace('--keepwidth', '')
args = self.split_arg(line)
mode, opt = self.check_treatcards(args)
if mode in ['run', 'all']:
if not hasattr(self, 'run_card'):
run_card = banner_mod.RunCard(opt['run_card'])
else:
run_card = self.run_card
# add the conversion from the lhaid to the pdf set names
if amcatnlo and run_card['pdlabel']=='lhapdf':
pdfsetsdir=self.get_lhapdf_pdfsetsdir()
pdfsets=self.get_lhapdf_pdfsets_list(pdfsetsdir)
lhapdfsetname=[]
for lhaid in run_card['lhaid']:
if lhaid in pdfsets:
lhapdfsetname.append(pdfsets[lhaid]['filename'])
else:
raise MadGraph5Error("lhaid %s is not a valid PDF identification number. This can be due to the use of an outdated version of LHAPDF, or %s is not a LHAGlue number corresponding to a central PDF set (but rather one of the error sets)." % (lhaid,lhaid))
run_card['lhapdfsetname']=lhapdfsetname
run_card.write_include_file(opt['output_dir'])
if mode in ['MadLoop', 'all']:
if os.path.exists(pjoin(self.me_dir, 'Cards', 'MadLoopParams.dat')):
self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.me_dir,
'Cards', 'MadLoopParams.dat'))
# write the output file
self.MadLoopparam.write(pjoin(self.me_dir,"SubProcesses",
"MadLoopParams.dat"))
if mode in ['param', 'all']:
if os.path.exists(pjoin(self.me_dir, 'Source', 'MODEL', 'mp_coupl.inc')):
param_card = check_param_card.ParamCardMP(opt['param_card'])
else:
param_card = check_param_card.ParamCard(opt['param_card'])
outfile = pjoin(opt['output_dir'], 'param_card.inc')
ident_card = pjoin(self.me_dir,'Cards','ident_card.dat')
if os.path.isfile(pjoin(self.me_dir,'bin','internal','ufomodel','restrict_default.dat')):
default = pjoin(self.me_dir,'bin','internal','ufomodel','restrict_default.dat')
elif os.path.isfile(pjoin(self.me_dir,'bin','internal','ufomodel','param_card.dat')):
default = pjoin(self.me_dir,'bin','internal','ufomodel','param_card.dat')
elif not os.path.exists(pjoin(self.me_dir,'bin','internal','ufomodel')):
fsock = open(pjoin(self.me_dir,'Source','param_card.inc'),'w')
fsock.write(' ')
fsock.close()
return
else:
subprocess.call(['python', 'write_param_card.py'],
cwd=pjoin(self.me_dir,'bin','internal','ufomodel'))
default = pjoin(self.me_dir,'bin','internal','ufomodel','param_card.dat')
if amcatnlo and not keepwidth:
# force particle in final states to have zero width
pids = self.get_pid_final_initial_states()
# check those which are charged under qcd
if not MADEVENT and pjoin(self.me_dir,'bin','internal') not in sys.path:
sys.path.insert(0,pjoin(self.me_dir,'bin','internal'))
#Ensure that the model that we are going to load is the current
#one.
to_del = [name for name in sys.modules.keys()
if name.startswith('internal.ufomodel')
or name.startswith('ufomodel')]
for name in to_del:
del(sys.modules[name])
import ufomodel as ufomodel
zero = ufomodel.parameters.ZERO
no_width = [p for p in ufomodel.all_particles
if (str(p.pdg_code) in pids or str(-p.pdg_code) in pids)
and p.color != 1 and p.width != zero]
done = []
for part in no_width:
if abs(part.pdg_code) in done:
continue
done.append(abs(part.pdg_code))
param = param_card['decay'].get((part.pdg_code,))
if param.value != 0:
logger.info('''For gauge cancellation, the width of \'%s\' has been set to zero.'''\
% part.name,'$MG:color:BLACK')
param.value = 0
param_card.write_inc_file(outfile, ident_card, default)
def get_model(self):
"""return the model related to this process"""
if self.options['mg5_path']:
sys.path.append(self.options['mg5_path'])
import models.import_ufo as import_ufo
complexmass = self.proc_characteristics['complex_mass_scheme']
with misc.MuteLogger(['madgraph.model'],[50]):
out= import_ufo.import_model(pjoin(self.me_dir,'bin','internal','ufomodel'),
complex_mass_scheme=complexmass)
return out
#elif self.mother:
# misc.sprint('Hum this is dangerous....')
# return self.mother._curr_model
else:
return None
def ask_edit_cards(self, cards, mode='fixed', plot=True, first_cmd=None):
""" """
if not self.options['madanalysis_path']:
plot = False
self.ask_edit_card_static(cards, mode, plot, self.options['timeout'],
self.ask, first_cmd=first_cmd)
@staticmethod
def ask_edit_card_static(cards, mode='fixed', plot=True,
timeout=0, ask=None, **opt):
if not ask:
ask = CommonRunCmd.ask
def path2name(path):
if '_card' in path:
return path.split('_card')[0]
elif path == 'delphes_trigger.dat':
return 'trigger'
elif path == 'input.lhco':
return 'lhco'
elif path == 'MadLoopParams.dat':
return 'MadLoopParams'
else:
raise Exception, 'Unknow cards name %s' % path
# Ask the user if he wants to edit any of the files
#First create the asking text
question = """Do you want to edit a card (press enter to bypass editing)?\n"""
possible_answer = ['0', 'done']
card = {0:'done'}
indent = max(len(path2name(card_name)) for card_name in cards)
question += '/'+'-'*60+'\\\n'
for i, card_name in enumerate(cards):
imode = path2name(card_name)
possible_answer.append(i+1)
possible_answer.append(imode)
question += '| %-77s|\n'%((' \x1b[31m%%s\x1b[0m. %%-%ds : \x1b[32m%%s\x1b[0m'%indent)%(i+1, imode, card_name))
card[i+1] = imode
if plot and not 'plot_card.dat' in cards:
question += '| %-77s|\n'%((' \x1b[31m9\x1b[0m. %%-%ds : \x1b[32mplot_card.dat\x1b[0m'%indent) % 'plot')
possible_answer.append(9)
possible_answer.append('plot')
card[9] = 'plot'
question += '\\'+'-'*60+'/\n'
if 'param_card.dat' in cards:
# Add the path options
question += ' you can also\n'
question += ' - enter the path to a valid card or banner.\n'
question += ' - use the \'set\' command to modify a parameter directly.\n'
question += ' The set option works only for param_card and run_card.\n'
question += ' Type \'help set\' for more information on this command.\n'
question += ' - call an external program (ASperGE/MadWidth/...).\n'
question += ' Type \'help\' for the list of available command\n'
else:
question += ' you can also\n'
question += ' - enter the path to a valid card.\n'
if 'transfer_card.dat' in cards:
question += ' - use the \'change_tf\' command to set a transfer functions.\n'
out = 'to_run'
while out not in ['0', 'done']:
out = ask(question, '0', possible_answer, timeout=int(1.5*timeout),
path_msg='enter path', ask_class = AskforEditCard,
cards=cards, mode=mode, **opt)
@staticmethod
def detect_card_type(path):
"""detect the type of the card. Return value are
banner
param_card.dat
run_card.dat
pythia_card.dat
pythia8_card.dat
plot_card.dat
pgs_card.dat
delphes_card.dat
delphes_trigger.dat
shower_card.dat [aMCatNLO]
FO_analyse_card.dat [aMCatNLO]
madspin_card.dat [MS]
transfer_card.dat [MW]
madweight_card.dat [MW]
madanalysis5_hadron_card.dat
madanalysis5_parton_card.dat
Please update the unit-test: test_card_type_recognition when adding
cards.
"""
fulltext = open(path).read(50000)
if fulltext == '':
logger.warning('File %s is empty' % path)
return 'unknown'
to_search = ['<MGVersion>', # banner
'<mg5proccard>'
'ParticlePropagator', # Delphes
'ExecutionPath',
'Treewriter',
'CEN_max_tracker',
'#TRIGGER CARD', # delphes_trigger.dat
'parameter set name', # pgs_card
'muon eta coverage',
'req_acc_FO',
'MSTP',
'b_stable',
'FO_ANALYSIS_FORMAT',
'MSTU',
'Begin Minpts',
'gridpack',
'ebeam1',
'block\s+mw_run',
'BLOCK',
'DECAY',
'launch',
'madspin',
'transfer_card\.dat',
'set',
'main:numberofevents', # pythia8,
'@MG5aMC skip_analysis', #MA5 --both--
'@MG5aMC\s*inputs\s*=\s*\*\.(?:hepmc|lhe)', #MA5 --both--
'@MG5aMC\s*reconstruction_name', # MA5 hadronique
'@MG5aMC' # MA5 hadronique
]
text = re.findall('(%s)' % '|'.join(to_search), fulltext, re.I)
text = [t.lower() for t in text]
if '<mgversion>' in text or '<mg5proccard>' in text:
return 'banner'
elif 'particlepropagator' in text or 'executionpath' in text or 'treewriter' in text:
return 'delphes_card.dat'
elif 'cen_max_tracker' in text:
return 'delphes_card.dat'
elif '@mg5amc' in text:
ma5_flag = [f[7:].strip() for f in text if f.startswith('@mg5amc')]
if any(f.startswith('reconstruction_name') for f in ma5_flag):
return 'madanalysis5_hadron_card.dat'
ma5_flag = [f.split('*.')[1] for f in ma5_flag if '*.' in f]
if any(f.startswith('lhe') for f in ma5_flag):
return 'madanalysis5_parton_card.dat'
if any(f.startswith(('hepmc','hep','stdhep','lhco')) for f in ma5_flag):
return 'madanalysis5_hadron_card.dat'
else:
return 'unknown'
elif '#trigger card' in text:
return 'delphes_trigger.dat'
elif 'parameter set name' in text:
return 'pgs_card.dat'
elif 'muon eta coverage' in text:
return 'pgs_card.dat'
elif 'mstp' in text and not 'b_stable' in text:
return 'pythia_card.dat'
elif 'begin minpts' in text:
return 'plot_card.dat'
elif ('gridpack' in text and 'ebeam1' in text) or \
('req_acc_fo' in text and 'ebeam1' in text):
return 'run_card.dat'
elif any(t.endswith('mw_run') for t in text):
return 'madweight_card.dat'
elif 'transfer_card.dat' in text:
return 'transfer_card.dat'
elif 'block' in text and 'decay' in text:
return 'param_card.dat'
elif 'b_stable' in text:
return 'shower_card.dat'
elif 'fo_analysis_format' in text:
return 'FO_analyse_card.dat'
elif 'main:numberofevents' in text:
return 'pythia8_card.dat'
elif 'launch' in text:
# need to separate madspin/reweight.
# decay/set can be in both...
if 'madspin' in text:
return 'madspin_card.dat'
if 'decay' in text:
# need to check if this a line like "decay w+" or "set decay"
if re.search("(^|;)\s*decay", fulltext):
return 'madspin_card.dat'
else:
return 'reweight_card.dat'
else:
return 'reweight_card.dat'
else:
return 'unknown'
############################################################################
def get_available_tag(self):
"""create automatically a tag"""
used_tags = [r['tag'] for r in self.results[self.run_name]]
i=0
while 1:
i+=1
if 'tag_%s' %i not in used_tags:
return 'tag_%s' % i
############################################################################
@misc.mute_logger(names=['madgraph.various.histograms',
'internal.histograms'],levels=[20,20])
def generate_Pythia8_HwU_plots(self, plot_root_path,
merging_scale_name, observable_name,
data_path):
"""Generated the HwU plots from Pythia8 driver output for a specific
observable."""
try:
import madgraph
except ImportError:
import internal.histograms as histograms
else:
import madgraph.various.histograms as histograms
# Make sure that the file is present
if not os.path.isfile(data_path):
return False
# Load the HwU file.
histos = histograms.HwUList(data_path, consider_reweights='ALL',run_id=0)
if len(histos)==0:
return False
# Now also plot the max vs min merging scale
merging_scales_available = [label[1] for label in \
histos[0].bins.weight_labels if
histograms.HwU.get_HwU_wgt_label_type(label)=='merging_scale']
if len(merging_scales_available)>=2:
min_merging_scale = min(merging_scales_available)
max_merging_scale = max(merging_scales_available)
else:
min_merging_scale = None
max_merging_scale = None
# jet_samples_to_keep = None means that all jet_samples are kept
histo_output_options = {
'format':'gnuplot',
'uncertainties':['scale','pdf','statistical',
'merging_scale','alpsfact'],
'ratio_correlations':True,
'arg_string':'Automatic plotting from MG5aMC',
'jet_samples_to_keep':None,
'use_band':['merging_scale','alpsfact'],
'auto_open':False
}
# alpsfact variation only applies to MLM
if not (int(self.run_card['ickkw'])==1):
histo_output_options['uncertainties'].pop(
histo_output_options['uncertainties'].index('alpsfact'))
histo_output_options['use_band'].pop(
histo_output_options['use_band'].index('alpsfact'))
histos.output(pjoin(plot_root_path,
'central_%s_%s_plots'%(merging_scale_name,observable_name)),
**histo_output_options)
for scale in merging_scales_available:
that_scale_histos = histograms.HwUList(
data_path, run_id=0, merging_scale=scale)
that_scale_histos.output(pjoin(plot_root_path,
'%s_%.3g_%s_plots'%(merging_scale_name,scale,observable_name)),
**histo_output_options)
# If several merging scales were specified, then it is interesting
# to compare the summed jet samples for the maximum and minimum
# merging scale available.
if not min_merging_scale is None:
min_scale_histos = histograms.HwUList(data_path,
consider_reweights=[], run_id=0,
merging_scale=min_merging_scale)
max_scale_histos = histograms.HwUList(data_path,
consider_reweights=[], run_id=0,
merging_scale=max_merging_scale)
# Give the histos types so that the plot labels look good
for histo in min_scale_histos:
if histo.type is None:
histo.type = '%s=%.4g'%(merging_scale_name, min_merging_scale)
else:
histo.type += '|%s=%.4g'%(merging_scale_name, min_merging_scale)
for histo in max_scale_histos:
if histo.type is None:
histo.type = '%s=%.4g'%(merging_scale_name, max_merging_scale)
else:
histo.type += '|%s=%.4g'%(merging_scale_name, max_merging_scale)
# Now plot and compare against oneanother the shape for the the two scales
histograms.HwUList(min_scale_histos+max_scale_histos).output(
pjoin(plot_root_path,'min_max_%s_%s_comparison'
%(merging_scale_name,observable_name)),
format='gnuplot',
uncertainties=[],
ratio_correlations=True,
arg_string='Automatic plotting from MG5aMC',
jet_samples_to_keep=None,
use_band=[],
auto_open=False)
return True
def gen_card_html(self):
""" """
devnull = open(os.devnull, 'w')
try:
misc.call(['./bin/internal/gen_cardhtml-pl'], cwd=self.me_dir,
stdout=devnull, stderr=devnull)
except Exception:
pass
devnull.close()
def create_plot(self, mode='parton', event_path=None, output=None, tag=None):
"""create the plot"""
if not tag:
tag = self.run_card['run_tag']
if mode != 'Pythia8':
madir = self.options['madanalysis_path']
td = self.options['td_path']
if not madir or not td or \
not os.path.exists(pjoin(self.me_dir, 'Cards', 'plot_card.dat')):
return False
else:
PY8_plots_root_path = pjoin(self.me_dir,'HTML',
self.run_name,'%s_PY8_plots'%tag)
if 'ickkw' in self.run_card:
if int(self.run_card['ickkw']) and mode == 'Pythia':
self.update_status('Create matching plots for Pythia', level='pythia')
# recover old data if none newly created
if not os.path.exists(pjoin(self.me_dir,'Events','events.tree')):
misc.gunzip(pjoin(self.me_dir,'Events',
self.run_name, '%s_pythia_events.tree.gz' % tag), keep=True,
stdout=pjoin(self.me_dir,'Events','events.tree'))
files.mv(pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_xsecs.tree'),
pjoin(self.me_dir,'Events','xsecs.tree'))
# Generate the matching plots
misc.call([self.dirbin+'/create_matching_plots.sh',
self.run_name, tag, madir],
stdout = os.open(os.devnull, os.O_RDWR),
cwd=pjoin(self.me_dir,'Events'))
#Clean output
misc.gzip(pjoin(self.me_dir,"Events","events.tree"),
stdout=pjoin(self.me_dir,'Events',self.run_name, tag + '_pythia_events.tree.gz'))
files.mv(pjoin(self.me_dir,'Events','xsecs.tree'),
pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_xsecs.tree'))
elif mode == 'Pythia8' and (int(self.run_card['ickkw'])==1 or \
self.run_card['ktdurham']>0.0 or self.run_card['ptlund']>0.0):
self.update_status('Create matching plots for Pythia8',
level='pythia8')
# Create the directory if not existing at this stage
if not os.path.isdir(PY8_plots_root_path):
os.makedirs(PY8_plots_root_path)
merging_scale_name = 'qCut' if int(self.run_card['ickkw'])==1 \
else 'TMS'
djr_path = pjoin(self.me_dir,'Events',
self.run_name, '%s_djrs.dat' % tag)
pt_path = pjoin(self.me_dir,'Events',
self.run_name, '%s_pts.dat' % tag)
for observable_name, data_path in [('djr',djr_path),
('pt',pt_path)]:
if not self.generate_Pythia8_HwU_plots(
PY8_plots_root_path, merging_scale_name,
observable_name,data_path):
return False
if mode == 'Pythia8':
plot_files = glob.glob(pjoin(PY8_plots_root_path,'*.gnuplot'))
if not misc.which('gnuplot'):
logger.warning("Install gnuplot to be able to view the plots"+\
" generated at :\n "+\
'\n '.join('%s.gnuplot'%p for p in plot_files))
return True
for plot in plot_files:
command = ['gnuplot',plot]
try:
subprocess.call(command,cwd=PY8_plots_root_path,stderr=subprocess.PIPE)
except Exception as e:
logger.warning("Automatic processing of the Pythia8 "+\
"merging plots with gnuplot failed. Try the"+\
" following command by hand:\n %s"%(' '.join(command))+\
"\nException was: %s"%str(e))
return False
plot_files = glob.glob(pjoin(PY8_plots_root_path,'*.pdf'))
if len(plot_files)>0:
# Add an html page
html = "<html>\n<head>\n<TITLE>PLOT FOR PYTHIA8</TITLE>"
html+= '<link rel=stylesheet href="../../mgstyle.css" type="text/css">\n</head>\n<body>\n'
html += "<h2> Plot for Pythia8 </h2>\n"
html += '<a href=../../../crossx.html>return to summary</a><br>'
html += "<table>\n<tr> <td> <b>Obs.</b> </td> <td> <b>Type of plot</b> </td> <td><b> PDF</b> </td> <td><b> input file</b> </td> </tr>\n"
def sorted_plots(elem):
name = os.path.basename(elem[1])
if 'central' in name:
return -100
if 'min_max' in name:
return -10
merging_re = re.match(r'^.*_(\d+)_.*$',name)
if not merging_re is None:
return int(merging_re.group(1))
else:
return 1e10
djr_plot_files = sorted(
(('DJR',p) for p in plot_files if '_djr_' in p),
key = sorted_plots)
pt_plot_files = sorted(
(('Pt',p) for p in plot_files if '_pt_' in p),
key = sorted_plots)
last_obs = None
for obs, one_plot in djr_plot_files+pt_plot_files:
if obs!=last_obs:
# Add a line between observables
html += "<tr><td></td></tr>"
last_obs = obs
name = os.path.basename(one_plot).replace('.pdf','')
short_name = name
for dummy in ['_plots','_djr','_pt']:
short_name = short_name.replace(dummy,'')
short_name = short_name.replace('_',' ')
if 'min max' in short_name:
short_name = "%s comparison with min/max merging scale"%obs
if 'central' in short_name:
short_name = "Merging uncertainty band around central scale"
html += "<tr><td>%(obs)s</td><td>%(sn)s</td><td> <a href=./%(n)s.pdf>PDF</a> </td><td> <a href=./%(n)s.HwU>HwU</a> <a href=./%(n)s.gnuplot>GNUPLOT</a> </td></tr>\n" %\
{'obs':obs, 'sn': short_name, 'n': name}
html += '</table>\n'
html += '<a href=../../../bin/internal/plot_djrs.py> Example of code to plot the above with matplotlib </a><br><br>'
html+='</body>\n</html>'
ff=open(pjoin(PY8_plots_root_path, 'index.html'),'w')
ff.write(html)
return True
if not event_path:
if mode == 'parton':
possibilities=[
pjoin(self.me_dir, 'Events', 'unweighted_events.lhe'),
pjoin(self.me_dir, 'Events', 'unweighted_events.lhe.gz'),
pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe'),
pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe.gz')]
for event_path in possibilities:
if os.path.exists(event_path):
break
output = pjoin(self.me_dir, 'HTML',self.run_name, 'plots_parton.html')
elif mode == 'Pythia':
event_path = pjoin(self.me_dir, 'Events','pythia_events.lhe')
output = pjoin(self.me_dir, 'HTML',self.run_name,
'plots_pythia_%s.html' % tag)
elif mode == 'PGS':
event_path = pjoin(self.me_dir, 'Events', self.run_name,
'%s_pgs_events.lhco' % tag)
output = pjoin(self.me_dir, 'HTML',self.run_name,
'plots_pgs_%s.html' % tag)
elif mode == 'Delphes':
event_path = pjoin(self.me_dir, 'Events', self.run_name,'%s_delphes_events.lhco' % tag)
output = pjoin(self.me_dir, 'HTML',self.run_name,
'plots_delphes_%s.html' % tag)
elif mode == "shower":
event_path = pjoin(self.me_dir, 'Events','pythia_events.lhe')
output = pjoin(self.me_dir, 'HTML',self.run_name,
'plots_shower_%s.html' % tag)
if not self.options['pythia-pgs_path']:
return
else:
raise self.InvalidCmd, 'Invalid mode %s' % mode
elif mode == 'reweight' and not output:
output = pjoin(self.me_dir, 'HTML',self.run_name,
'plots_%s.html' % tag)
if not os.path.exists(event_path):
if os.path.exists(event_path+'.gz'):
misc.gunzip('%s.gz' % event_path)
else:
raise self.InvalidCmd, 'Events file %s does not exist' % event_path
elif event_path.endswith(".gz"):
misc.gunzip(event_path)
event_path = event_path[:-3]
self.update_status('Creating Plots for %s level' % mode, level = mode.lower())
mode = mode.lower()
if mode not in ['parton', 'reweight']:
plot_dir = pjoin(self.me_dir, 'HTML', self.run_name,'plots_%s_%s' % (mode.lower(),tag))
elif mode == 'parton':
plot_dir = pjoin(self.me_dir, 'HTML', self.run_name,'plots_parton')
else:
plot_dir =pjoin(self.me_dir, 'HTML', self.run_name,'plots_%s' % (tag))
if not os.path.isdir(plot_dir):
os.makedirs(plot_dir)
files.ln(pjoin(self.me_dir, 'Cards','plot_card.dat'), plot_dir, 'ma_card.dat')
try:
proc = misc.Popen([os.path.join(madir, 'plot_events')],
stdout = open(pjoin(plot_dir, 'plot.log'),'w'),
stderr = subprocess.STDOUT,
stdin=subprocess.PIPE,
cwd=plot_dir)
proc.communicate('%s\n' % event_path)
del proc
#proc.wait()
misc.call(['%s/plot' % self.dirbin, madir, td],
stdout = open(pjoin(plot_dir, 'plot.log'),'a'),
stderr = subprocess.STDOUT,
cwd=plot_dir)
misc.call(['%s/plot_page-pl' % self.dirbin,
os.path.basename(plot_dir),
mode],
stdout = open(pjoin(plot_dir, 'plot.log'),'a'),
stderr = subprocess.STDOUT,
cwd=pjoin(self.me_dir, 'HTML', self.run_name))
shutil.move(pjoin(self.me_dir, 'HTML',self.run_name ,'plots.html'),
output)
logger.info("Plots for %s level generated, see %s" % \
(mode, output))
except OSError, error:
logger.error('fail to create plot: %s. Please check that MadAnalysis is correctly installed.' % error)
self.update_status('End Plots for %s level' % mode, level = mode.lower(),
makehtml=False)
return True
def run_hep2lhe(self, banner_path = None):
"""Run hep2lhe on the file Events/pythia_events.hep"""
if not self.options['pythia-pgs_path']:
raise self.InvalidCmd, 'No pythia-pgs path defined'
pydir = pjoin(self.options['pythia-pgs_path'], 'src')
eradir = self.options['exrootanalysis_path']
# Creating LHE file
if misc.is_executable(pjoin(pydir, 'hep2lhe')):
self.update_status('Creating shower LHE File (for plot)', level='pythia')
# Write the banner to the LHE file
out = open(pjoin(self.me_dir,'Events','pythia_events.lhe'), 'w')
#out.writelines('<LesHouchesEvents version=\"1.0\">\n')
out.writelines('<!--\n')
out.writelines('# Warning! Never use this file for detector studies!\n')
out.writelines('-->\n<!--\n')
if banner_path:
out.writelines(open(banner_path).read().replace('<LesHouchesEvents version="1.0">',''))
out.writelines('\n-->\n')
out.close()
self.cluster.launch_and_wait(self.dirbin+'/run_hep2lhe',
argument= [pydir],
cwd=pjoin(self.me_dir,'Events'),
stdout=os.devnull)
logger.info('Warning! Never use this lhe file for detector studies!')
# Creating ROOT file
if eradir and misc.is_executable(pjoin(eradir, 'ExRootLHEFConverter')):
self.update_status('Creating Pythia LHE Root File', level='pythia')
try:
misc.call([eradir+'/ExRootLHEFConverter',
'pythia_events.lhe',
pjoin(self.run_name, '%s_pythia_lhe_events.root' % self.run_tag)],
cwd=pjoin(self.me_dir,'Events'))
except Exception, error:
misc.sprint('ExRootLHEFConverter fails', str(error),
log=logger)
pass
def store_result(self):
"""Dummy routine, to be overwritten by daughter classes"""
pass
############################################################################
def help_systematics(self):
"""help for systematics command"""
logger.info("syntax: systematics RUN_NAME [OUTPUT] [options]",'$MG:color:BLACK')
logger.info("-- Run the systematics run on the RUN_NAME run.")
logger.info(" RUN_NAME can be a path to a lhef file.")
logger.info(" OUTPUT can be the path to the output lhe file, otherwise the input file will be overwritten")
logger.info("")
logger.info("options: (values written are the default)", '$MG:color:BLACK')
logger.info("")
logger.info(" --mur=0.5,1,2 # specify the values for renormalisation scale variation")
logger.info(" --muf=0.5,1,2 # specify the values for factorisation scale variation")
logger.info(" --alps=1 # specify the values for MLM emission scale variation (LO only)")
logger.info(" --dyn=-1,1,2,3,4 # specify the dynamical schemes to use.")
logger.info(" # -1 is the one used by the sample.")
logger.info(" # > 0 correspond to options of dynamical_scale_choice of the run_card.")
logger.info(" --pdf=errorset # specify the pdfs to use for pdf variation. (see below)")
logger.info(" --together=mur,muf,dyn # lists the parameter that must be varied simultaneously so as to ")
logger.info(" # compute the weights for all combinations of their variations.")
logger.info(" --from_card # use the information from the run_card (LO only).")
logger.info(" --remove_weights= # remove previously written weights matching the descriptions")
logger.info(" --keep_weights= # force to keep the weight even if in the list of remove_weights")
logger.info(" --start_id= # define the starting digit for the additial weight. If not specify it is determine automatically")
logger.info("")
logger.info(" Allowed value for the pdf options:", '$MG:color:BLACK')
logger.info(" central : Do not perform any pdf variation" )
logger.info(" errorset : runs over the all the members of the PDF set used to generate the events")
logger.info(" 244800 : runs over the associated set and all its members")
logger.info(" 244800@0 : runs over the central member of the associated set")
# logger.info(" 244800@X : runs over the Xth set of the associated error set")
logger.info(" CT10 : runs over the associated set and all its members")
logger.info(" CT10@0 : runs over the central member of the associated set")
logger.info(" CT10@X : runs over the Xth member of the associated PDF set")
logger.info(" XX,YY,ZZ : runs over the sets for XX,YY,ZZ (those three follows above syntax)")
logger.info("")
logger.info(" Allowed value for the keep/remove_wgts options:", '$MG:color:BLACK')
logger.info(" all : keep/remove all weights")
logger.info(" name : keep/remove that particular weight")
logger.info(" id1,id2 : keep/remove all the weights between those two values --included--")
logger.info(" PATTERN : keep/remove all the weights matching the (python) regular expression.")
logger.info(" note that multiple entry of those arguments are allowed")
def complete_systematics(self, text, line, begidx, endidx):
"""auto completion for the systematics command"""
args = self.split_arg(line[0:begidx], error=False)
options = ['--mur=', '--muf=', '--pdf=', '--dyn=','--alps=',
'--together=','--from_card ','--remove_wgts=',
'--keep_wgts=','--start_id=']
if len(args) == 1 and os.path.sep not in text:
#return valid run_name
data = misc.glob(pjoin('*','*events.lhe*'), pjoin(self.me_dir, 'Events'))
data = [n.rsplit('/',2)[1] for n in data]
return self.list_completion(text, data, line)
elif len(args)==1:
#logger.warning('1args')
return self.path_completion(text,
os.path.join('.',*[a for a in args \
if a.endswith(os.path.sep)]))
elif len(args)==2 and os.path.sep in args[1]:
#logger.warning('2args %s', args[1])
return self.path_completion(text, '.')
elif not line.endswith(tuple(options)):
return self.list_completion(text, options)
############################################################################
def do_systematics(self, line):
""" syntax is 'systematics [INPUT [OUTPUT]] OPTIONS'
--mur=0.5,1,2
--muf=0.5,1,2
--alps=1
--dyn=-1
--together=mur,muf #can be repeated
#special options
--from_card=
"""
try:
lhapdf_version = self.get_lhapdf_version()
except Exception:
logger.info('No version of lhapdf. Can not run systematics computation')
return
else:
if lhapdf_version.startswith('5'):
logger.info('can not run systematics with lhapdf 5')
return
lhapdf = misc.import_python_lhapdf(self.options['lhapdf'])
if not lhapdf:
logger.info('can not run systematics since can not link python to lhapdf')
return
self.update_status('Running Systematics computation', level='parton')
args = self.split_arg(line)
#split arguments and option
opts= []
args = [a for a in args if not a.startswith('-') or opts.append(a)]
#check sanity of options
if any(not o.startswith(('--mur=', '--muf=', '--alps=','--dyn=','--together=','--from_card','--pdf=',
'--remove_wgts=', '--keep_wgts','--start_id='))
for o in opts):
raise self.InvalidCmd, "command systematics called with invalid option syntax. Please retry."
# check that we have define the input
if len(args) == 0:
if self.run_name:
args[0] = self.run_name
else:
raise self.InvalidCmd, 'no default run. Please specify the run_name'
if args[0] != self.run_name:
self.set_run_name(args[0])
# always pass to a path + get the event size
result_file= sys.stdout
if not os.path.isfile(args[0]) and not os.path.sep in args[0]:
path = [pjoin(self.me_dir, 'Events', args[0], 'unweighted_events.lhe.gz'),
pjoin(self.me_dir, 'Events', args[0], 'unweighted_events.lhe'),
pjoin(self.me_dir, 'Events', args[0], 'events.lhe.gz'),
pjoin(self.me_dir, 'Events', args[0], 'events.lhe')]
for p in path:
if os.path.exists(p):
nb_event = self.results[args[0]].get_current_info()['nb_event']
if self.run_name != args[0]:
tag = self.results[args[0]].tags[0]
self.set_run_name(args[0], tag,'parton', False)
result_file = open(pjoin(self.me_dir,'Events', self.run_name, 'parton_systematics.log'),'w')
args[0] = p
break
else:
raise self.InvalidCmd, 'Invalid run name. Please retry'
elif self.options['nb_core'] != 1:
lhe = lhe_parser.EventFile(args[0])
nb_event = len(lhe)
lhe.close()
input = args[0]
if len(args)>1:
output = pjoin(os.getcwd(),args[1])
else:
output = input
lhaid = [self.run_card.get_lhapdf_id()]
if 'store_rwgt_info' in self.run_card and not self.run_card['store_rwgt_info']:
raise self.InvalidCmd, "The events was not generated with store_rwgt_info=True. Can not evaluate systematics error on this event file."
elif 'use_syst' in self.run_card:
if not self.run_card['use_syst']:
raise self.InvalidCmd, "The events was not generated with use_syst=True. Can not evaluate systematics error on this event file."
elif self.proc_characteristics['ninitial'] ==1:
if '--from_card' in opts:
logger.warning('systematics not available for decay processes. Bypass it')
return
else:
raise self.InvalidCmd, 'systematics not available for decay processes.'
try:
pdfsets_dir = self.get_lhapdf_pdfsetsdir()
except Exception, error:
logger.debug(str(error))
logger.warning('Systematic computation requires lhapdf to run. Bypass Systematics')
return
if '--from_card' in opts:
opts.remove('--from_card')
opts.append('--from_card=internal')
# Check that all pdfset are correctly installed
if 'sys_pdf' in self.run_card:
if '&&' in self.run_card['sys_pdf']:
line = ' '.join(self.run_card['sys_pdf'])
sys_pdf = line.split('&&')
lhaid += [l.split()[0] for l in sys_pdf]
else:
lhaid += [l for l in self.run_card['sys_pdf'].split() if not l.isdigit() or int(l) > 500]
else:
#check that all p
pdf = [a[6:] for a in opts if a.startswith('--pdf=')]
lhaid += [t.split('@')[0] for p in pdf for t in p.split(',')
if t not in ['errorset', 'central']]
# Copy all the relevant PDF sets
try:
[self.copy_lhapdf_set([onelha], pdfsets_dir) for onelha in lhaid]
except Exception, error:
logger.debug(str(error))
logger.warning('impossible to download all the pdfsets. Bypass systematics')
return
if self.options['run_mode'] ==2:
nb_submit = min(self.options['nb_core'], nb_event//2500)
elif self.options['run_mode'] ==1:
nb_submit = min(self.options['cluster_size'], nb_event//25000)
else:
nb_submit =1
if MADEVENT:
import internal.systematics as systematics
else:
import madgraph.various.systematics as systematics
#one core:
if nb_submit in [0,1]:
systematics.call_systematics([input, output] + opts,
log=lambda x: logger.info(str(x)),
result=result_file
)
elif self.options['run_mode'] in [1,2]:
event_per_job = nb_event // nb_submit
nb_job_with_plus_one = nb_event % nb_submit
start_event, stop_event = 0,0
for i in range(nb_submit):
#computing start/stop event
event_requested = event_per_job
if i < nb_job_with_plus_one:
event_requested += 1
start_event = stop_event
stop_event = start_event + event_requested
prog = sys.executable
input_files = [os.path.basename(input)]
output_files = ['./tmp_%s_%s' % (i, os.path.basename(output)),
'./log_sys_%s.txt' % (i)]
argument = []
if not __debug__:
argument.append('-O')
argument += [pjoin(self.me_dir, 'bin', 'internal', 'systematics.py'),
input_files[0], output_files[0]] + opts +\
['--start_event=%i' % start_event,
'--stop_event=%i' %stop_event,
'--result=./log_sys_%s.txt' %i,
'--lhapdf_config=%s' % self.options['lhapdf']]
required_output = output_files
self.cluster.cluster_submit(prog, argument,
input_files=input_files,
output_files=output_files,
cwd=os.path.dirname(output),
required_output=required_output,
stdout='/dev/null'
)
starttime = time.time()
update_status = lambda idle, run, finish: \
self.update_status((idle, run, finish, 'running systematics'), level=None,
force=False, starttime=starttime)
try:
self.cluster.wait(os.path.dirname(output), update_status, update_first=update_status)
except Exception:
self.cluster.remove()
old_run_mode = self.options['run_mode']
self.options['run_mode'] =0
try:
out = self.do_systematics(line)
finally:
self.options['run_mode'] = old_run_mode
#collect the data
all_cross = []
for i in range(nb_submit):
pos=0
for line in open(pjoin(os.path.dirname(output), 'log_sys_%s.txt'%i)):
if line.startswith('#'):
continue
split = line.split()
if len(split) in [0,1]:
continue
key = tuple(float(x) for x in split[:-1])
cross= float(split[-1])
if 'event_norm' in self.run_card and \
self.run_card['event_norm'] in ['average', 'unity', 'bias']:
cross *= (event_per_job+1 if i <nb_job_with_plus_one else event_per_job)
if len(all_cross) > pos:
all_cross[pos] += cross
else:
all_cross.append(cross)
pos+=1
if 'event_norm' in self.run_card and \
self.run_card['event_norm'] in ['unity']:
all_cross= [cross/nb_event for cross in all_cross]
sys_obj = systematics.call_systematics([input, None] + opts,
log=lambda x: logger.info(str(x)),
result=result_file,
running=False
)
sys_obj.print_cross_sections(all_cross, nb_event, result_file)
#concatenate the output file
subprocess.call(['cat']+\
['./tmp_%s_%s' % (i, os.path.basename(output)) for i in range(nb_submit)],
stdout=open(output,'w'),
cwd=os.path.dirname(output))
for i in range(nb_submit):
os.remove('%s/tmp_%s_%s' %(os.path.dirname(output),i,os.path.basename(output)))
# os.remove('%s/log_sys_%s.txt' % (os.path.dirname(output),i))
self.update_status('End of systematics computation', level='parton', makehtml=False)
############################################################################
def do_reweight(self, line):
""" syntax is "reweight RUN_NAME"
Allow to reweight the events generated with a new choices of model
parameter. Description of the methods are available here
cp3.irmp.ucl.ac.be/projects/madgraph/wiki/Reweight
"""
#### Utility function
def check_multicore(self):
""" determine if the cards are save for multicore use"""
card = pjoin(self.me_dir, 'Cards', 'reweight_card.dat')
multicore = True
if self.options['run_mode'] in [0,1]:
multicore = False
lines = [l.strip() for l in open(card) if not l.strip().startswith('#')]
while lines and not lines[0].startswith('launch'):
line = lines.pop(0)
# if not standard output mode forbid multicore mode
if line.startswith('change') and line[6:].strip().startswith('output'):
return False
if line.startswith('change') and line[6:].strip().startswith('multicore'):
split_line = line.split()
if len(split_line) > 2:
multicore = bool(split_line[2])
# we have reached the first launch in the card ensure that no output change
#are done after that point.
lines = [line[6:].strip() for line in lines if line.startswith('change')]
for line in lines:
if line.startswith(('process','model','output', 'rwgt_dir')):
return False
elif line.startswith('multicore'):
split_line = line.split()
if len(split_line) > 1:
multicore = bool(split_line[1])
return multicore
if '-from_cards' in line and not os.path.exists(pjoin(self.me_dir, 'Cards', 'reweight_card.dat')):
return
# option for multicore to avoid that all of them create the same directory
if '--multicore=create' in line:
multicore='create'
elif '--multicore=wait' in line:
multicore='wait'
else:
multicore=False
# Check that MG5 directory is present .
if MADEVENT and not self.options['mg5_path']:
raise self.InvalidCmd, '''The module reweight requires that MG5 is installed on the system.
You can install it and set its path in ./Cards/me5_configuration.txt'''
elif MADEVENT:
sys.path.append(self.options['mg5_path'])
try:
import madgraph.interface.reweight_interface as reweight_interface
except ImportError:
raise self.ConfigurationError, '''Can\'t load Reweight module.
The variable mg5_path might not be correctly configured.'''
if not '-from_cards' in line:
self.keep_cards(['reweight_card.dat'], ignore=['*'])
self.ask_edit_cards(['reweight_card.dat'], 'fixed', plot=False)
# load the name of the event file
args = self.split_arg(line)
if not self.force_run:
# forbid this function to create an empty item in results.
if self.run_name and self.results.current and self.results.current['cross'] == 0:
self.results.delete_run(self.run_name, self.run_tag)
self.results.save()
# ensure that the run_card is present
if not hasattr(self, 'run_card'):
self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat'))
# we want to run this in a separate shell to avoid hard f2py crash
command = [sys.executable]
if os.path.exists(pjoin(self.me_dir, 'bin', 'madevent')):
command.append(pjoin(self.me_dir, 'bin', 'internal','madevent_interface.py'))
else:
command.append(pjoin(self.me_dir, 'bin', 'internal', 'amcatnlo_run_interface.py'))
if not isinstance(self, cmd.CmdShell):
command.append('--web')
command.append('reweight')
######### START SINGLE CORE MODE ############
if self.options['nb_core']==1 or self.run_card['nevents'] < 101 or not check_multicore(self):
if self.run_name:
command.append(self.run_name)
else:
command += args
if '-from_cards' not in command:
command.append('-from_cards')
p = misc.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, cwd=os.getcwd())
while p.poll() is None:
line = p.stdout.readline()
if any(t in line for t in ['INFO:', 'WARNING:', 'CRITICAL:', 'ERROR:', 'root:','KEEP:']) and \
not '***********' in line:
print line[:-1].replace('INFO', 'REWEIGHT').replace('KEEP:','')
elif __debug__ and line:
logger.debug(line[:-1])
if p.returncode !=0:
logger.error("Reweighting failed")
return
self.results = self.load_results_db()
# forbid this function to create an empty item in results.
try:
if self.results[self.run_name][-2]['cross']==0:
self.results.delete_run(self.run_name,self.results[self.run_name][-2]['tag'])
except:
pass
try:
if self.results.current['cross'] == 0 and self.run_name:
self.results.delete_run(self.run_name, self.run_tag)
except:
pass
# re-define current run
try:
self.results.def_current(self.run_name, self.run_tag)
except Exception:
pass
return
########## END SINGLE CORE HANDLING #############
else:
########## START MULTI-CORE HANDLING #############
if not isinstance(self.cluster, cluster.MultiCore):
mycluster = cluster.MultiCore(nb_core=self.options['nb_core'])
else:
mycluster = self.cluster
new_args=list(args)
self.check_decay_events(new_args)
try:
os.remove(pjoin(self.me_dir,'rw_me','rwgt.pkl'))
except Exception, error:
pass
# prepare multi-core job:
import madgraph.various.lhe_parser as lhe_parser
# args now alway content the path to the valid files
if 'nevt_job' in self.run_card and self.run_card['nevt_job'] !=-1:
nevt_job = self.run_card['nevt_job']
else:
nevt_job = max(2500, self.run_card['nevents']/self.options['nb_core'])
logger.info("split the event file in bunch of %s events" % nevt_job)
nb_file = lhe_parser.EventFile(new_args[0]).split(nevt_job)
starttime = time.time()
update_status = lambda idle, run, finish: \
self.update_status((idle, run, finish, 'reweight'), level=None,
force=False, starttime=starttime)
all_lhe = []
devnull= open(os.devnull)
for i in range(nb_file):
new_command = list(command)
new_command.append('%s_%s.lhe' % (new_args[0],i))
all_lhe.append('%s_%s.lhe' % (new_args[0],i))
if '-from_cards' not in command:
new_command.append('-from_cards')
if i==0:
if __debug__:
stdout = None
else:
stdout = open(pjoin(self.me_dir,'Events', self.run_name, 'reweight.log'),'w')
new_command.append('--multicore=create')
else:
stdout = devnull
#stdout = open(pjoin(self.me_dir,'Events', self.run_name, 'reweight%s.log' % i),'w')
new_command.append('--multicore=wait')
mycluster.submit(prog=command[0], argument=new_command[1:], stdout=stdout, cwd=os.getcwd())
mycluster.wait(self.me_dir,update_status)
devnull.close()
logger.info("Collect and combine the various output file.")
lhe = lhe_parser.MultiEventFile(all_lhe, parse=False)
nb_event, cross_sections = lhe.write(new_args[0], get_info=True)
if any(os.path.exists('%s_%s_debug.log' % (f, self.run_tag)) for f in all_lhe):
for f in all_lhe:
if os.path.exists('%s_%s_debug.log' % (f, self.run_tag)):
raise Exception, "Some of the run failed: Please read %s_%s_debug.log" % (f, self.run_tag)
if 'event_norm' in self.run_card and self.run_card['event_norm'] in ['average','bias']:
for key, value in cross_sections.items():
cross_sections[key] = value / (nb_event+1)
lhe.remove()
for key in cross_sections:
if key == 'orig' or key.isdigit():
continue
logger.info('%s : %s pb' % (key, cross_sections[key]))
return
########## END MULTI-CORE HANDLING #############
self.to_store.append('event')
# forbid this function to create an empty item in results.
if not self.force_run and self.results.current['cross'] == 0 and self.run_name:
self.results.delete_run(self.run_name, self.run_tag)
self.check_decay_events(args)
# args now alway content the path to the valid files
reweight_cmd = reweight_interface.ReweightInterface(args[0], mother=self)
#reweight_cmd.use_rawinput = False
#reweight_cmd.mother = self
wgt_names = reweight_cmd.get_weight_names()
if wgt_names == [''] and reweight_cmd.has_nlo:
self.update_status('Running Reweighting (LO approximate)', level='madspin')
else:
self.update_status('Running Reweighting', level='madspin')
path = pjoin(self.me_dir, 'Cards', 'reweight_card.dat')
reweight_cmd.raw_input=False
reweight_cmd.me_dir = self.me_dir
reweight_cmd.multicore = multicore #allow the directory creation or not
print "We are in mode", multicore
reweight_cmd.import_command_file(path)
reweight_cmd.do_quit('')
logger.info("quit rwgt")
# re-define current run
try:
self.results.def_current(self.run_name, self.run_tag)
except Exception:
pass
############################################################################
def do_pgs(self, line):
"""launch pgs"""
args = self.split_arg(line)
# Check argument's validity
if '--no_default' in args:
no_default = True
args.remove('--no_default')
else:
no_default = False
if no_default and not os.path.exists(pjoin(self.me_dir, 'Cards', 'pgs_card.dat')):
logger.info('No pgs_card detected, so not run pgs')
return
# Check all arguments
# This might launch a gunzip in another thread. After the question
# This thread need to be wait for completion. (This allow to have the
# question right away and have the computer working in the same time)
# if lock is define this a locker for the completion of the thread
lock = self.check_pgs(args, no_default=no_default)
# Check that the pgs_card exists. If not copy the default
if not os.path.exists(pjoin(self.me_dir, 'Cards', 'pgs_card.dat')):
files.cp(pjoin(self.me_dir, 'Cards', 'pgs_card_default.dat'),
pjoin(self.me_dir, 'Cards', 'pgs_card.dat'))
logger.info('No pgs card found. Take the default one.')
if not (no_default or self.force):
self.ask_edit_cards(['pgs_card.dat'])
self.update_status('prepare PGS run', level=None)
pgsdir = pjoin(self.options['pythia-pgs_path'], 'src')
eradir = self.options['exrootanalysis_path']
madir = self.options['madanalysis_path']
td = self.options['td_path']
# Compile pgs if not there
if not misc.is_executable(pjoin(pgsdir, 'pgs')):
logger.info('No PGS executable -- running make')
misc.compile(cwd=pgsdir)
self.update_status('Running PGS', level='pgs')
tag = self.run_tag
# Update the banner with the pgs card
banner_path = pjoin(self.me_dir, 'Events', self.run_name, '%s_%s_banner.txt' % (self.run_name, self.run_tag))
if os.path.exists(pjoin(self.me_dir, 'Source', 'banner_header.txt')):
self.banner.add(pjoin(self.me_dir, 'Cards','pgs_card.dat'))
self.banner.write(banner_path)
else:
open(banner_path, 'w').close()
########################################################################
# now pass the event to a detector simulator and reconstruct objects
########################################################################
if lock:
lock.wait()
# Prepare the output file with the banner
ff = open(pjoin(self.me_dir, 'Events', 'pgs_events.lhco'), 'w')
if os.path.exists(pjoin(self.me_dir, 'Source', 'banner_header.txt')):
text = open(banner_path).read()
text = '#%s' % text.replace('\n','\n#')
dico = self.results[self.run_name].get_current_info()
text +='\n## Integrated weight (pb) : %.4g' % dico['cross']
text +='\n## Number of Event : %s\n' % dico['nb_event']
ff.writelines(text)
ff.close()
try:
os.remove(pjoin(self.me_dir, 'Events', 'pgs.done'))
except Exception:
pass
pgs_log = pjoin(self.me_dir, 'Events', self.run_name, "%s_pgs.log" % tag)
self.cluster.launch_and_wait('../bin/internal/run_pgs',
argument=[pgsdir], cwd=pjoin(self.me_dir,'Events'),
stdout=pgs_log, stderr=subprocess.STDOUT)
if not os.path.exists(pjoin(self.me_dir, 'Events', 'pgs.done')):
logger.error('Fail to create LHCO events')
return
else:
os.remove(pjoin(self.me_dir, 'Events', 'pgs.done'))
if os.path.getsize(banner_path) == os.path.getsize(pjoin(self.me_dir, 'Events','pgs_events.lhco')):
misc.call(['cat pgs_uncleaned_events.lhco >> pgs_events.lhco'],
cwd=pjoin(self.me_dir, 'Events'))
os.remove(pjoin(self.me_dir, 'Events', 'pgs_uncleaned_events.lhco '))
# Creating Root file
if eradir and misc.is_executable(pjoin(eradir, 'ExRootLHCOlympicsConverter')):
self.update_status('Creating PGS Root File', level='pgs')
try:
misc.call([eradir+'/ExRootLHCOlympicsConverter',
'pgs_events.lhco',pjoin('%s/%s_pgs_events.root' % (self.run_name, tag))],
cwd=pjoin(self.me_dir, 'Events'))
except Exception:
logger.warning('fail to produce Root output [problem with ExRootAnalysis')
if os.path.exists(pjoin(self.me_dir, 'Events', 'pgs_events.lhco')):
# Creating plots
files.mv(pjoin(self.me_dir, 'Events', 'pgs_events.lhco'),
pjoin(self.me_dir, 'Events', self.run_name, '%s_pgs_events.lhco' % tag))
self.create_plot('PGS')
misc.gzip(pjoin(self.me_dir, 'Events', self.run_name, '%s_pgs_events.lhco' % tag))
self.update_status('finish', level='pgs', makehtml=False)
############################################################################
def do_compute_widths(self, line):
"""Require MG5 directory: Compute automatically the widths of a set
of particles"""
args = self.split_arg(line)
opts = self.check_compute_widths(args)
from madgraph.interface.master_interface import MasterCmd
cmd = MasterCmd()
self.define_child_cmd_interface(cmd, interface=False)
cmd.exec_cmd('set automatic_html_opening False --no_save')
if not opts['path']:
opts['path'] = pjoin(self.me_dir, 'Cards', 'param_card.dat')
if not opts['force'] :
self.ask_edit_cards(['param_card'],[], plot=False)
line = 'compute_widths %s %s' % \
(' '.join([str(i) for i in opts['particles']]),
' '.join('--%s=%s' % (key,value) for (key,value) in opts.items()
if key not in ['model', 'force', 'particles'] and value))
cmd.exec_cmd(line, model=opts['model'])
self.child = None
del cmd
############################################################################
def do_print_results(self, line):
"""Not in help:Print the cross-section/ number of events for a given run"""
args = self.split_arg(line)
options={'path':None, 'mode':'w', 'format':'full'}
for arg in list(args):
if arg.startswith('--') and '=' in arg:
name,value=arg.split('=',1)
name = name [2:]
options[name] = value
args.remove(arg)
if len(args) > 0:
run_name = args[0]
else:
for i, run_name in enumerate(self.results.order):
for j, one_result in enumerate(self.results[run_name]):
if i or j:
options['mode'] = "a"
if options['path']:
self.print_results_in_file(one_result, options['path'], options['mode'], options['format'])
else:
self.print_results_in_shell(one_result)
return
if run_name not in self.results:
raise self.InvalidCmd('%s is not a valid run_name or it doesn\'t have any information' \
% run_name)
if len(args) == 2:
tag = args[1]
if tag.isdigit():
tag = int(tag) - 1
if len(self.results[run_name]) < tag:
raise self.InvalidCmd('Only %s different tag available' % \
len(self.results[run_name]))
data = self.results[run_name][tag]
else:
data = self.results[run_name].return_tag(tag)
else:
data = self.results[run_name].return_tag(None) # return the last
if options['path']:
self.print_results_in_file(data, options['path'], options['mode'], options['format'])
else:
self.print_results_in_shell(data)
def configure_directory(self, *args, **opts):
""" All action require before any type of run. Typically overloaded by
daughters if need be."""
pass
############################################################################
# Start of MadAnalysis5 related function
############################################################################
@staticmethod
def runMA5(MA5_interpreter, MA5_cmds, MA5_runtag, logfile_path, advertise_log=True):
""" Run MA5 in a controlled environnment."""
successfull_MA5_run = True
try:
# Predefine MA5_logger as None in case we don't manage to retrieve it.
MA5_logger = None
MA5_logger = logging.getLogger('MA5')
BackUp_MA5_handlers = MA5_logger.handlers
for handler in BackUp_MA5_handlers:
MA5_logger.removeHandler(handler)
file_handler = logging.FileHandler(logfile_path)
MA5_logger.addHandler(file_handler)
if advertise_log:
logger.info("Follow Madanalysis5 run with the following command in a separate terminal:")
logger.info(' tail -f %s'%logfile_path)
# Now the magic, finally call MA5.
with misc.stdchannel_redirected(sys.stdout, os.devnull):
with misc.stdchannel_redirected(sys.stderr, os.devnull):
MA5_interpreter.print_banner()
MA5_interpreter.load(MA5_cmds)
except Exception as e:
logger.warning("MadAnalysis5 failed to run the commands for task "+
"'%s'. Madanalys5 analysis will be skipped."%MA5_runtag)
error=StringIO.StringIO()
traceback.print_exc(file=error)
logger.debug('MadAnalysis5 error was:')
logger.debug('-'*60)
logger.debug(error.getvalue()[:-1])
logger.debug('-'*60)
successfull_MA5_run = False
finally:
if not MA5_logger is None:
for handler in MA5_logger.handlers:
MA5_logger.removeHandler(handler)
for handler in BackUp_MA5_handlers:
MA5_logger.addHandler(handler)
return successfull_MA5_run
#===============================================================================
# Return a Main instance of MadAnlysis5, provided its path
#===============================================================================
@staticmethod
def get_MadAnalysis5_interpreter(mg5_path, ma5_path, mg5_interface=None,
logstream = sys.stdout, loglevel =logging.INFO, forced = True,
compilation=False):
""" Makes sure to correctly setup paths and constructs and return an MA5 path"""
MA5path = os.path.normpath(pjoin(mg5_path,ma5_path))
if MA5path is None or not os.path.isfile(pjoin(MA5path,'bin','ma5')):
return None
if MA5path not in sys.path:
sys.path.insert(0, MA5path)
try:
# We must backup the readline module attributes because they get modified
# when MA5 imports root and that supersedes MG5 autocompletion
import readline
old_completer = readline.get_completer()
old_delims = readline.get_completer_delims()
old_history = [readline.get_history_item(i) for i in range(1,readline.get_current_history_length()+1)]
except ImportError:
old_completer, old_delims, old_history = None, None, None
try:
from madanalysis.interpreter.ma5_interpreter import MA5Interpreter
with misc.stdchannel_redirected(sys.stdout, os.devnull):
with misc.stdchannel_redirected(sys.stderr, os.devnull):
MA5_interpreter = MA5Interpreter(MA5path, LoggerLevel=loglevel,
LoggerStream=logstream,forced=forced,
no_compilation=not compilation)
except Exception as e:
logger.warning('MadAnalysis5 failed to start so that MA5 analysis will be skipped.')
error=StringIO.StringIO()
traceback.print_exc(file=error)
logger.debug('MadAnalysis5 error was:')
logger.debug('-'*60)
logger.debug(error.getvalue()[:-1])
logger.debug('-'*60)
MA5_interpreter = None
finally:
# Now restore the readline MG5 state
if not old_history is None:
readline.clear_history()
for line in old_history:
readline.add_history(line)
if not old_completer is None:
readline.set_completer(old_completer)
if not old_delims is None:
readline.set_completer_delims(old_delims)
# Also restore the completion_display_matches_hook if an mg5 interface
# is specified as it could also have been potentially modified
if not mg5_interface is None and any(not elem is None for elem in [old_completer, old_delims, old_history]):
mg5_interface.set_readline_completion_display_matches_hook()
return MA5_interpreter
def check_madanalysis5(self, args, mode='parton'):
"""Check the argument for the madanalysis5 command
syntax: madanalysis5_parton [NAME]
"""
MA5_options = {'MA5_stdout_lvl':'default'}
stdout_level_tags = [a for a in args if a.startswith('--MA5_stdout_lvl=')]
for slt in stdout_level_tags:
lvl = slt.split('=')[1].strip()
try:
# It is likely an int
MA5_options['MA5_stdout_lvl']=int(lvl)
except ValueError:
if lvl.startswith('logging.'):
lvl = lvl[8:]
try:
MA5_options['MA5_stdout_lvl'] = getattr(logging, lvl)
except:
raise InvalidCmd("MA5 output level specification"+\
" '%s' is incorrect." % str(lvl))
args.remove(slt)
if mode=='parton':
# We will attempt to run MA5 on the parton level output
# found in the last run if not specified.
MA5_options['inputs'] = '*.lhe'
elif mode=='hadron':
# We will run MA5 on all sources of post-partonic output we
# can find if not specified. PY8 is a keyword indicating shower
# piped to MA5.
MA5_options['inputs'] = ['fromCard']
else:
raise MadGraph5Error('Mode %s not reckognized'%mode+
' in function check_madanalysis5.')
# If not madanalysis5 path
if not self.options['madanalysis5_path']:
logger.info('Now trying to read the configuration file again'+
' to find MadAnalysis5 path')
self.set_configuration()
if not self.options['madanalysis5_path'] or not \
os.path.exists(pjoin(self.options['madanalysis5_path'],'bin','ma5')):
error_msg = 'No valid MadAnalysis5 path set.\n'
error_msg += 'Please use the set command to define the path and retry.\n'
error_msg += 'You can also define it in the configuration file.\n'
error_msg += 'Finally, it can be installed automatically using the'
error_msg += ' install command.\n'
raise self.InvalidCmd(error_msg)
# Now make sure that the corresponding default card exists
if not os.path.isfile(pjoin(self.me_dir,
'Cards','madanalysis5_%s_card.dat'%mode)):
raise self.InvalidCmd('Your installed version of MadAnalysis5 and/or'+\
' MadGraph5_aMCatNLO does not seem to support analysis at'+
'%s level.'%mode)
tag = [a for a in args if a.startswith('--tag=')]
if tag:
args.remove(tag[0])
tag = tag[0][6:]
if len(args) == 0 and not self.run_name:
if self.results.lastrun:
args.insert(0, self.results.lastrun)
else:
raise self.InvalidCmd('No run name currently defined. '+
'Please add this information.')
if len(args) >= 1:
if mode=='parton' and args[0] != self.run_name and \
not os.path.exists(pjoin(self.me_dir,'Events',args[0],
'unweighted_events.lhe.gz')) and not os.path.exists(
pjoin(self.me_dir,'Events',args[0])):
raise self.InvalidCmd('No events file in the %s run.'%args[0])
self.set_run_name(args[0], tag, level='madanalysis5_%s'%mode)
else:
if tag:
self.run_card['run_tag'] = args[0]
self.set_run_name(self.run_name, tag, level='madanalysis5_%s'%mode)
if mode=='parton':
if any(t for t in args if t.startswith('--input=')):
raise InvalidCmd('The option --input=<input_file> is not'+
' available when running partonic MadAnalysis5 analysis. The'+
' .lhe output of the selected run is used automatically.')
input_file = pjoin(self.me_dir,'Events',self.run_name, 'unweighted_events.lhe')
MA5_options['inputs'] = '%s.gz'%input_file
if not os.path.exists('%s.gz'%input_file):
if os.path.exists(input_file):
misc.gzip(input_file, stdout='%s.gz' % input_file)
else:
logger.warning("LHE event file not found in \n%s\ns"%input_file+
"Parton-level MA5 analysis will be skipped.")
if mode=='hadron':
# Make sure to store current results (like Pythia8 hep files)
# so that can be found here
self.store_result()
hadron_tag = [t for t in args if t.startswith('--input=')]
if hadron_tag and hadron_tag[0][8:]:
hadron_inputs = hadron_tag[0][8:].split(',')
# If not set above, then we must read it from the card
elif MA5_options['inputs'] == ['fromCard']:
hadron_inputs = banner_mod.MadAnalysis5Card(pjoin(self.me_dir,
'Cards','madanalysis5_hadron_card.dat'),mode='hadron')['inputs']
# Make sure the corresponding input files are present and unfold
# potential wildcard while making their path absolute as well.
MA5_options['inputs'] = []
special_source_tags = []
for htag in hadron_inputs:
# Possible pecial tag for MA5 run inputs
if htag in special_source_tags:
# Special check/actions
continue
# Check if the specified file exists and is not a wildcard
if os.path.isfile(htag) or (os.path.exists(htag) and
stat.S_ISFIFO(os.stat(htag).st_mode)):
MA5_options['inputs'].append(htag)
continue
# Now select one source per tag, giving priority to unzipped
# files with 'events' in their name (case-insensitive).
file_candidates = misc.glob(htag, pjoin(self.me_dir,'Events',self.run_name))+\
misc.glob('%s.gz'%htag, pjoin(self.me_dir,'Events',self.run_name))
priority_files = [f for f in file_candidates if
self.run_card['run_tag'] in os.path.basename(f)]
priority_files = [f for f in priority_files if
'EVENTS' in os.path.basename(f).upper()]
# Make sure to always prefer the original partonic event file
for f in file_candidates:
if os.path.basename(f).startswith('unweighted_events.lhe'):
priority_files.append(f)
if priority_files:
MA5_options['inputs'].append(priority_files[-1])
continue
if file_candidates:
MA5_options['inputs'].append(file_candidates[-1])
continue
return MA5_options
def ask_madanalysis5_run_configuration(self, runtype='parton',mode=None):
"""Ask the question when launching madanalysis5.
In the future we can ask here further question about the MA5 run, but
for now we just edit the cards"""
cards = ['madanalysis5_%s_card.dat'%runtype]
self.keep_cards(cards)
if self.force:
return runtype
# This heavy-looking structure of auto is just to mimick what is done
# for ask_pythia_configuration
auto=False
if mode=='auto':
auto=True
if auto:
self.ask_edit_cards(cards, mode='auto', plot=False)
else:
self.ask_edit_cards(cards, plot=False)
# For now, we don't pass any further information and simply return the
# input mode asked for
mode = runtype
return mode
def complete_madanalysis5_hadron(self,text, line, begidx, endidx):
"Complete the madanalysis5 command"
args = self.split_arg(line[0:begidx], error=False)
if len(args) == 1:
#return valid run_name
data = []
for name in banner_mod.MadAnalysis5Card._default_hadron_inputs:
data += misc.glob(pjoin('*','%s'%name), pjoin(self.me_dir, 'Events'))
data += misc.glob(pjoin('*','%s.gz'%name), pjoin(self.me_dir, 'Events'))
data = [n.rsplit('/',2)[1] for n in data]
tmp1 = self.list_completion(text, data)
if not self.run_name:
return tmp1
else:
tmp2 = self.list_completion(text, ['-f',
'--MA5_stdout_lvl=','--input=','--no_default', '--tag='], line)
return tmp1 + tmp2
elif '--MA5_stdout_lvl=' in line and not any(arg.startswith(
'--MA5_stdout_lvl=') for arg in args):
return self.list_completion(text,
['--MA5_stdout_lvl=%s'%opt for opt in
['logging.INFO','logging.DEBUG','logging.WARNING',
'logging.CRITICAL','90']], line)
elif '--input=' in line and not any(arg.startswith(
'--input=') for arg in args):
return self.list_completion(text, ['--input=%s'%opt for opt in
(banner_mod.MadAnalysis5Card._default_hadron_inputs +['path'])], line)
else:
return self.list_completion(text, ['-f',
'--MA5_stdout_lvl=','--input=','--no_default', '--tag='], line)
def do_madanalysis5_hadron(self, line):
"""launch MadAnalysis5 at the hadron level."""
return self.run_madanalysis5(line,mode='hadron')
def run_madanalysis5(self, line, mode='parton'):
"""launch MadAnalysis5 at the parton level or at the hadron level with
a specific command line."""
# Check argument's validity
args = self.split_arg(line)
if '--no_default' in args:
no_default = True
args.remove('--no_default')
else:
no_default = False
if no_default:
# Called issued by MG5aMC itself during a generate_event action
if mode=='parton' and not os.path.exists(pjoin(self.me_dir, 'Cards',
'madanalysis5_parton_card.dat')):
return
if mode=='hadron' and not os.path.exists(pjoin(self.me_dir, 'Cards',
'madanalysis5_hadron_card.dat')):
return
else:
# Called issued by the user itself and only MA5 will be run.
# we must therefore ask wheter the user wants to edit the card
self.ask_madanalysis5_run_configuration(runtype=mode)
if not self.options['madanalysis5_path'] or \
all(not os.path.exists(pjoin(self.me_dir, 'Cards',card)) for card in
['madanalysis5_parton_card.dat','madanalysis5_hadron_card.dat']):
if no_default:
return
else:
raise InvalidCmd('You must have MadAnalysis5 available to run'+
" this command. Consider installing it with the 'install' function.")
if not self.run_name:
MA5_opts = self.check_madanalysis5(args, mode=mode)
self.configure_directory(html_opening =False)
else:
# initialize / remove lhapdf mode
self.configure_directory(html_opening =False)
MA5_opts = self.check_madanalysis5(args, mode=mode)
# Now check that there is at least one input to run
if MA5_opts['inputs']==[]:
if no_default:
logger.warning('No hadron level input found to run MadAnalysis5 on.'+
' Skipping its hadron-level analysis.')
return
else:
raise self.InvalidCmd('\nNo input files specified or availabled for'+
' this MadAnalysis5 hadron-level run.\nPlease double-check the options of this'+
' MA5 command (or card) and which output files\nare currently in the chosen'+
" run directory '%s'."%self.run_name)
MA5_card = banner_mod.MadAnalysis5Card(pjoin(self.me_dir, 'Cards',
'madanalysis5_%s_card.dat'%mode), mode=mode)
if MA5_card._skip_analysis:
logger.info('Madanalysis5 %s-level analysis was skipped following user request.'%mode)
logger.info("To run the analysis, remove or comment the tag '%s skip_analysis' "
%banner_mod.MadAnalysis5Card._MG5aMC_escape_tag+
"in\n '%s'."%pjoin(self.me_dir, 'Cards','madanalysis5_%s_card.dat'%mode))
return
MA5_cmds_list = MA5_card.get_MA5_cmds(MA5_opts['inputs'],
pjoin(self.me_dir,'MA5_%s_ANALYSIS'%mode.upper()),
run_dir_path = pjoin(self.me_dir,'Events', self.run_name),
UFO_model_path=pjoin(self.me_dir,'bin','internal','ufomodel'),
run_tag = self.run_tag)
# Here's how to print the MA5 commands generated by MG5aMC
# for MA5_runtag, MA5_cmds in MA5_cmds_list:
# misc.sprint('****************************************')
# misc.sprint('* Commands for MA5 runtag %s:'%MA5_runtag)
# misc.sprint('\n'+('\n'.join('* %s'%cmd for cmd in MA5_cmds)))
# misc.sprint('****************************************')
self.update_status('\033[92mRunning MadAnalysis5 [arXiv:1206.1599]\033[0m',
level='madanalysis5_%s'%mode)
if mode=='hadron':
logger.info('Hadron input files considered:')
for input in MA5_opts['inputs']:
logger.info(' --> %s'%input)
elif mode=='parton':
logger.info('Parton input file considered:')
logger.info(' --> %s'%MA5_opts['inputs'])
# Obtain a main MA5 interpreter
# Ideally we would like to do it all with a single interpreter
# but we'd need a way to reset it for this.
if MA5_opts['MA5_stdout_lvl']=='default':
if MA5_card['stdout_lvl'] is None:
MA5_lvl = self.options['stdout_level']
else:
MA5_lvl = MA5_card['stdout_lvl']
else:
MA5_lvl = MA5_opts['MA5_stdout_lvl']
# Bypass initialization information
MA5_interpreter = CommonRunCmd.get_MadAnalysis5_interpreter(
self.options['mg5_path'],
self.options['madanalysis5_path'],
logstream=sys.stdout,
loglevel=100,
forced=True,
compilation=True)
# If failed to start MA5, then just leave
if MA5_interpreter is None:
return
# Make sure to only run over one analysis over each fifo.
used_up_fifos = []
# Now loop over the different MA5_runs
for MA5_runtag, MA5_cmds in MA5_cmds_list:
# Bypass the banner.
MA5_interpreter.setLogLevel(100)
# Make sure to properly initialize MA5 interpreter
if mode=='hadron':
MA5_interpreter.init_reco()
else:
MA5_interpreter.init_parton()
MA5_interpreter.setLogLevel(MA5_lvl)
if MA5_runtag!='default':
if MA5_runtag.startswith('_reco_'):
logger.info("MadAnalysis5 now running the reconstruction '%s'..."%
MA5_runtag[6:],'$MG:color:GREEN')
elif MA5_runtag=='Recasting':
logger.info("MadAnalysis5 now running the recasting...",
'$MG:color:GREEN')
else:
logger.info("MadAnalysis5 now running the '%s' analysis..."%
MA5_runtag,'$MG:color:GREEN')
# Now the magic, let's call MA5
if not CommonRunCmd.runMA5(MA5_interpreter, MA5_cmds, MA5_runtag,
pjoin(self.me_dir,'Events',self.run_name,'%s_MA5_%s.log'%(self.run_tag,MA5_runtag))):
# Unsuccessful MA5 run, we therefore stop here.
return
if MA5_runtag.startswith('_reco_'):
# When doing a reconstruction we must first link the event file
# created with MA5 reconstruction and then directly proceed to the
# next batch of instructions. There can be several output directory
# if there were several input files.
links_created=[]
for i, input in enumerate(MA5_opts['inputs']):
# Make sure it is not an lhco or root input, which would not
# undergo any reconstruction of course.
if not banner_mod.MadAnalysis5Card.events_can_be_reconstructed(input):
continue
if input.endswith('.fifo'):
if input in used_up_fifos:
# Only run once on each fifo
continue
else:
used_up_fifos.append(input)
reco_output = pjoin(self.me_dir,
'MA5_%s_ANALYSIS%s_%d'%(mode.upper(),MA5_runtag,i+1))
# Look for either a root or .lhe.gz output
reco_event_file = misc.glob('*.lhe.gz',pjoin(reco_output,'Output','_reco_events'))+\
misc.glob('*.root',pjoin(reco_output,'Output','_reco_events'))
if len(reco_event_file)==0:
raise MadGraph5Error, "MadAnalysis5 failed to produce the "+\
"reconstructed event file for reconstruction '%s'."%MA5_runtag[6:]
reco_event_file = reco_event_file[0]
# move the reconstruction output to the HTML directory
shutil.move(reco_output,pjoin(self.me_dir,'HTML',
self.run_name,'%s_MA5_%s_ANALYSIS%s_%d'%
(self.run_tag,mode.upper(),MA5_runtag,i+1)))
# link the reconstructed event file to the run directory
links_created.append(os.path.basename(reco_event_file))
files.ln(pjoin(self.me_dir,'HTML',self.run_name,
'%s_MA5_%s_ANALYSIS%s_%d'%(self.run_tag,mode.upper(),
MA5_runtag,i+1),'Output','_reco_events',links_created[-1]),
pjoin(self.me_dir,'Events',self.run_name))
logger.info("MadAnalysis5 successfully completed the reconstruction "+
"'%s'. Links to the reconstructed event files are:"%MA5_runtag[6:])
for link in links_created:
logger.info(' --> %s'%pjoin(self.me_dir,'Events',self.run_name,link))
continue
if MA5_runtag.upper()=='RECASTING':
target = pjoin(self.me_dir,'MA5_%s_ANALYSIS_%s'\
%(mode.upper(),MA5_runtag),'Output','CLs_output_summary.dat')
else:
target = pjoin(self.me_dir,'MA5_%s_ANALYSIS_%s'\
%(mode.upper(),MA5_runtag),'PDF','main.pdf')
has_pdf = True
if not os.path.isfile(target):
has_pdf = False
# Copy the PDF report or CLs in the Events/run directory.
if MA5_runtag.upper()=='RECASTING':
carboncopy_name = '%s_MA5_CLs.dat'%(self.run_tag)
else:
carboncopy_name = '%s_MA5_%s_analysis_%s.pdf'%(
self.run_tag,mode,MA5_runtag)
if has_pdf:
shutil.copy(target, pjoin(self.me_dir,'Events',self.run_name,carboncopy_name))
else:
logger.error('MadAnalysis5 failed to create PDF output')
if MA5_runtag!='default':
logger.info("MadAnalysis5 successfully completed the "+
"%s. Reported results are placed in:"%("analysis '%s'"%MA5_runtag
if MA5_runtag.upper()!='RECASTING' else "recasting"))
else:
logger.info("MadAnalysis5 successfully completed the analysis."+
" Reported results are placed in:")
logger.info(' --> %s'%pjoin(self.me_dir,'Events',self.run_name,carboncopy_name))
anal_dir = pjoin(self.me_dir,'MA5_%s_ANALYSIS_%s' %(mode.upper(),MA5_runtag))
if not os.path.exists(anal_dir):
logger.error('MadAnalysis5 failed to completed succesfully')
return
# Copy the entire analysis in the HTML directory
shutil.move(anal_dir, pjoin(self.me_dir,'HTML',self.run_name,
'%s_MA5_%s_ANALYSIS_%s'%(self.run_tag,mode.upper(),MA5_runtag)))
# Set the number of events and cross-section to the last one
# (maybe do something smarter later)
new_details={}
for detail in ['nb_event','cross','error']:
new_details[detail] = \
self.results[self.run_name].get_current_info()[detail]
for detail in new_details:
self.results.add_detail(detail,new_details[detail])
self.update_status('Finished MA5 analyses.', level='madanalysis5_%s'%mode,
makehtml=False)
#Update the banner
self.banner.add(pjoin(self.me_dir, 'Cards',
'madanalysis5_%s_card.dat'%mode))
banner_path = pjoin(self.me_dir,'Events', self.run_name,
'%s_%s_banner.txt'%(self.run_name, self.run_tag))
self.banner.write(banner_path)
if not no_default:
logger.info('Find more information about this run on the HTML local page')
logger.info(' --> %s'%pjoin(self.me_dir,'index.html'))
############################################################################
# End of MadAnalysis5 related function
############################################################################
def do_delphes(self, line):
""" run delphes and make associate root file/plot """
args = self.split_arg(line)
# Check argument's validity
if '--no_default' in args:
no_default = True
args.remove('--no_default')
else:
no_default = False
if no_default and not os.path.exists(pjoin(self.me_dir, 'Cards', 'delphes_card.dat')):
logger.info('No delphes_card detected, so not run Delphes')
return
# Check all arguments
filepath = self.check_delphes(args, nodefault=no_default)
if no_default and not filepath:
return # no output file but nothing to do either.
self.update_status('prepare delphes run', level=None)
if os.path.exists(pjoin(self.options['delphes_path'], 'data')):
delphes3 = False
prog = '../bin/internal/run_delphes'
if filepath and '.hepmc' in filepath[:-10]:
raise self.InvalidCmd, 'delphes2 do not support hepmc'
else:
delphes3 = True
prog = '../bin/internal/run_delphes3'
# Check that the delphes_card exists. If not copy the default and
# ask for edition of the card.
if not os.path.exists(pjoin(self.me_dir, 'Cards', 'delphes_card.dat')):
if no_default:
logger.info('No delphes_card detected, so not running Delphes')
return
files.cp(pjoin(self.me_dir, 'Cards', 'delphes_card_default.dat'),
pjoin(self.me_dir, 'Cards', 'delphes_card.dat'))
logger.info('No delphes card found. Take the default one.')
if not delphes3 and not os.path.exists(pjoin(self.me_dir, 'Cards', 'delphes_trigger.dat')):
files.cp(pjoin(self.me_dir, 'Cards', 'delphes_trigger_default.dat'),
pjoin(self.me_dir, 'Cards', 'delphes_trigger.dat'))
if not (no_default or self.force):
if delphes3:
self.ask_edit_cards(['delphes_card.dat'], args)
else:
self.ask_edit_cards(['delphes_card.dat', 'delphes_trigger.dat'], args)
self.update_status('Running Delphes', level=None)
delphes_dir = self.options['delphes_path']
tag = self.run_tag
if os.path.exists(pjoin(self.me_dir, 'Source', 'banner_header.txt')):
self.banner.add(pjoin(self.me_dir, 'Cards','delphes_card.dat'))
if not delphes3:
self.banner.add(pjoin(self.me_dir, 'Cards','delphes_trigger.dat'))
self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, '%s_%s_banner.txt' % (self.run_name, tag)))
cross = self.results[self.run_name].get_current_info()['cross']
delphes_log = pjoin(self.me_dir, 'Events', self.run_name, "%s_delphes.log" % tag)
if not self.cluster:
clus = cluster.onecore
else:
clus = self.cluster
clus.launch_and_wait(prog,
argument= [delphes_dir, self.run_name, tag, str(cross), filepath],
stdout=delphes_log, stderr=subprocess.STDOUT,
cwd=pjoin(self.me_dir,'Events'))
if not os.path.exists(pjoin(self.me_dir, 'Events',
self.run_name, '%s_delphes_events.lhco.gz' % tag))\
and not os.path.exists(pjoin(self.me_dir, 'Events',
self.run_name, '%s_delphes_events.lhco' % tag)):
logger.info('If you are interested in lhco output. please run root2lhco converter.')
logger.info(' or edit bin/internal/run_delphes3 to run the converter automatically.')
#eradir = self.options['exrootanalysis_path']
madir = self.options['madanalysis_path']
td = self.options['td_path']
if os.path.exists(pjoin(self.me_dir, 'Events',
self.run_name, '%s_delphes_events.lhco' % tag)):
# Creating plots
self.create_plot('Delphes')
if os.path.exists(pjoin(self.me_dir, 'Events', self.run_name, '%s_delphes_events.lhco' % tag)):
misc.gzip(pjoin(self.me_dir, 'Events', self.run_name, '%s_delphes_events.lhco' % tag))
self.update_status('delphes done', level='delphes', makehtml=False)
############################################################################
def get_pid_final_initial_states(self):
"""Find the pid of all particles in the final and initial states"""
pids = set()
subproc = [l.strip() for l in open(pjoin(self.me_dir,'SubProcesses',
'subproc.mg'))]
nb_init = self.ninitial
pat = re.compile(r'''DATA \(IDUP\(I,\d+\),I=1,\d+\)/([\+\-\d,\s]*)/''', re.I)
for Pdir in subproc:
text = open(pjoin(self.me_dir, 'SubProcesses', Pdir, 'born_leshouche.inc')).read()
group = pat.findall(text)
for particles in group:
particles = particles.split(',')
pids.update(set(particles))
return pids
############################################################################
def get_pdf_input_filename(self):
"""return the name of the file which is used by the pdfset"""
if self.options["cluster_local_path"] and \
os.path.exists(self.options["cluster_local_path"]) and \
self.options['run_mode'] ==1:
# no need to transfer the pdf.
return ''
def check_cluster(path):
if not self.options["cluster_local_path"] or \
os.path.exists(self.options["cluster_local_path"]) or\
self.options['run_mode'] !=1:
return path
main = self.options["cluster_local_path"]
if os.path.isfile(path):
filename = os.path.basename(path)
possible_path = [pjoin(main, filename),
pjoin(main, "lhadpf", filename),
pjoin(main, "Pdfdata", filename)]
if any(os.path.exists(p) for p in possible_path):
return " "
else:
return path
if hasattr(self, 'pdffile') and self.pdffile:
return self.pdffile
else:
for line in open(pjoin(self.me_dir,'Source','PDF','pdf_list.txt')):
data = line.split()
if len(data) < 4:
continue
if data[1].lower() == self.run_card['pdlabel'].lower():
self.pdffile = check_cluster(pjoin(self.me_dir, 'lib', 'Pdfdata', data[2]))
return self.pdffile
else:
# possible when using lhapdf
path = pjoin(self.me_dir, 'lib', 'PDFsets')
if os.path.exists(path):
self.pdffile = path
else:
self.pdffile = " "
return self.pdffile
############################################################################
def do_open(self, line):
"""Open a text file/ eps file / html file"""
args = self.split_arg(line)
# Check Argument validity and modify argument to be the real path
self.check_open(args)
file_path = args[0]
misc.open_file(file_path)
############################################################################
def do_set(self, line, log=True):
"""Set an option, which will be default for coming generations/outputs
"""
# cmd calls automaticaly post_set after this command.
args = self.split_arg(line)
# Check the validity of the arguments
self.check_set(args)
# Check if we need to save this in the option file
if args[0] in self.options_configuration and '--no_save' not in args:
self.do_save('options --auto')
if args[0] == "stdout_level":
if args[1].isdigit():
logging.root.setLevel(int(args[1]))
logging.getLogger('madgraph').setLevel(int(args[1]))
else:
logging.root.setLevel(eval('logging.' + args[1]))
logging.getLogger('madgraph').setLevel(eval('logging.' + args[1]))
if log: logger.info('set output information to level: %s' % args[1])
elif args[0] == "fortran_compiler":
if args[1] == 'None':
args[1] = None
self.options['fortran_compiler'] = args[1]
current = misc.detect_current_compiler(pjoin(self.me_dir,'Source','make_opts'), 'fortran')
if current != args[1] and args[1] != None:
misc.mod_compilator(self.me_dir, args[1], current, 'gfortran')
elif args[0] == "cpp_compiler":
if args[1] == 'None':
args[1] = None
self.options['cpp_compiler'] = args[1]
current = misc.detect_current_compiler(pjoin(self.me_dir,'Source','make_opts'), 'cpp')
if current != args[1] and args[1] != None:
misc.mod_compilator(self.me_dir, args[1], current, 'cpp')
elif args[0] == "run_mode":
if not args[1] in [0,1,2,'0','1','2']:
raise self.InvalidCmd, 'run_mode should be 0, 1 or 2.'
self.cluster_mode = int(args[1])
self.options['run_mode'] = self.cluster_mode
elif args[0] in ['cluster_type', 'cluster_queue', 'cluster_temp_path']:
if args[1] == 'None':
args[1] = None
self.options[args[0]] = args[1]
# cluster (re)-initialization done later
# self.cluster update at the end of the routine
elif args[0] in ['cluster_nb_retry', 'cluster_retry_wait', 'cluster_size']:
self.options[args[0]] = int(args[1])
# self.cluster update at the end of the routine
elif args[0] == 'nb_core':
if args[1] == 'None':
import multiprocessing
self.nb_core = multiprocessing.cpu_count()
self.options['nb_core'] = self.nb_core
return
if not args[1].isdigit():
raise self.InvalidCmd('nb_core should be a positive number')
self.nb_core = int(args[1])
self.options['nb_core'] = self.nb_core
elif args[0] == 'timeout':
self.options[args[0]] = int(args[1])
elif args[0] == 'cluster_status_update':
if '(' in args[1]:
data = ' '.join([a for a in args[1:] if not a.startswith('-')])
data = data.replace('(','').replace(')','').replace(',',' ').split()
first, second = data[:2]
else:
first, second = args[1:3]
self.options[args[0]] = (int(first), int(second))
elif args[0] == 'notification_center':
if args[1] in ['None','True','False']:
self.allow_notification_center = eval(args[1])
self.options[args[0]] = eval(args[1])
else:
raise self.InvalidCmd('Not a valid value for notification_center')
# True/False formatting
elif args[0] in ['crash_on_error']:
tmp = banner_mod.ConfigFile.format_variable(args[1], bool, 'crash_on_error')
self.options[args[0]] = tmp
elif args[0] in self.options:
if args[1] in ['None','True','False']:
self.options[args[0]] = ast.literal_eval(args[1])
elif args[0].endswith('path'):
if os.path.exists(args[1]):
self.options[args[0]] = args[1]
elif os.path.exists(pjoin(self.me_dir, args[1])):
self.options[args[0]] = pjoin(self.me_dir, args[1])
else:
raise self.InvalidCmd('Not a valid path: keep previous value: \'%s\'' % self.options[args[0]])
else:
self.options[args[0]] = args[1]
def post_set(self, stop, line):
"""Check if we need to save this in the option file"""
try:
args = self.split_arg(line)
if 'cluster' in args[0] or args[0] == 'run_mode':
self.configure_run_mode(self.options['run_mode'])
# Check the validity of the arguments
self.check_set(args)
if args[0] in self.options_configuration and '--no_save' not in args:
self.exec_cmd('save options %s --auto' % args[0])
elif args[0] in self.options_madevent:
logger.info('This option will be the default in any output that you are going to create in this session.')
logger.info('In order to keep this changes permanent please run \'save options\'')
return stop
except self.InvalidCmd:
return stop
def configure_run_mode(self, run_mode):
"""change the way to submit job 0: single core, 1: cluster, 2: multicore"""
self.cluster_mode = run_mode
self.options['run_mode'] = run_mode
if run_mode == 2:
if not self.options['nb_core']:
import multiprocessing
self.options['nb_core'] = multiprocessing.cpu_count()
nb_core = self.options['nb_core']
elif run_mode == 0:
nb_core = 1
if run_mode in [0, 2]:
self.cluster = cluster.MultiCore(
**self.options)
self.cluster.nb_core = nb_core
#cluster_temp_path=self.options['cluster_temp_path'],
if self.cluster_mode == 1:
opt = self.options
cluster_name = opt['cluster_type']
if cluster_name in cluster.from_name:
self.cluster = cluster.from_name[cluster_name](**opt)
else:
if MADEVENT and ('mg5_path' not in self.options or not self.options['mg5_path']):
if not self.plugin_path:
raise self.InvalidCmd('%s not native cluster type and no PLUGIN directory available')
elif MADEVENT:
mg5dir = self.options['mg5_path']
if mg5dir not in sys.path:
sys.path.append(mg5dir)
newpath = pjoin(mg5dir, 'PLUGIN')
if newpath not in self.plugin_path:
self.plugin_path.append(newpath)
else:
mg5dir = MG5DIR
# Check if a plugin define this type of cluster
# check for PLUGIN format
for plugpath in self.plugin_path:
plugindirname = os.path.basename(plugpath)
for plug in os.listdir(plugpath):
if os.path.exists(pjoin(plugpath, plug, '__init__.py')):
try:
__import__('%s.%s' % (plugindirname,plug))
except Exception:
logger.critical('plugin directory %s/%s fail to be loaded. Please check it', plugindirname, plug)
continue
plugin = sys.modules['%s.%s' % (plugindirname,plug)]
if not hasattr(plugin, 'new_cluster'):
continue
if not misc.is_plugin_supported(plugin):
continue
if cluster_name in plugin.new_cluster:
logger.info("cluster handling will be done with PLUGIN: %s" % plug,'$MG:color:BLACK')
self.cluster = plugin.new_cluster[cluster_name](**opt)
break
else:
continue
break
else:
raise self.InvalidCmd, "%s is not recognized as a supported cluster format." % cluster_name
def check_param_card(self, path, run=True, dependent=False):
"""
1) Check that no scan parameter are present
2) Check that all the width are define in the param_card.
- If a scan parameter is define. create the iterator and recall this fonction
on the first element.
- If some width are set on 'Auto', call the computation tools.
- Check that no width are too small (raise a warning if this is the case)
3) if dependent is on True check for dependent parameter (automatic for scan)"""
pattern_scan = re.compile(r'''^(decay)?[\s\d]*scan''', re.I+re.M)
pattern_width = re.compile(r'''decay\s+(\+?\-?\d+)\s+auto(@NLO|)''',re.I)
text = open(path).read()
if pattern_scan.search(text):
if not isinstance(self, cmd.CmdShell):
# we are in web mode => forbid scan due to security risk
raise Exception, "Scan are not allowed in web mode"
# at least one scan parameter found. create an iterator to go trough the cards
main_card = check_param_card.ParamCardIterator(text)
self.param_card_iterator = main_card
first_card = main_card.next(autostart=True)
first_card.write(path)
return self.check_param_card(path, run, dependent=True)
pdg_info = pattern_width.findall(text)
if pdg_info:
if run:
logger.info('Computing the width set on auto in the param_card.dat')
has_nlo = any(nlo.lower()=="@nlo" for _,nlo in pdg_info)
pdg = [pdg for pdg,nlo in pdg_info]
if not has_nlo:
self.do_compute_widths('%s %s' % (' '.join(pdg), path))
else:
self.do_compute_widths('%s %s --nlo' % (' '.join(pdg), path))
else:
logger.info('''Some width are on Auto in the card.
Those will be computed as soon as you have finish the edition of the cards.
If you want to force the computation right now and being able to re-edit
the cards afterwards, you can type \"compute_wdiths\".''')
card = check_param_card.ParamCard(path)
if dependent:
AskforEditCard.update_dependent(self, self.me_dir, card, path, timer=20)
for param in card['decay']:
width = param.value
if width == 0:
continue
try:
mass = card['mass'].get(param.lhacode).value
except Exception:
logger.warning('Missing mass in the lhef file (%s) . Please fix this (use the "update missing" command if needed)', param.lhacode[0])
continue
if mass and width/mass < 1e-12:
logger.error('The width of particle %s is too small for an s-channel resonance (%s). If you have this particle in an s-channel, this is likely to create numerical instabilities .', param.lhacode[0], width)
if CommonRunCmd.sleep_for_error:
time.sleep(5)
CommonRunCmd.sleep_for_error = False
elif not mass and width:
logger.error('The width of particle %s is different of zero for a massless particle.', param.lhacode[0])
if CommonRunCmd.sleep_for_error:
time.sleep(5)
CommonRunCmd.sleep_for_error = False
return
def add_error_log_in_html(self, errortype=None):
"""If a ME run is currently running add a link in the html output"""
# Be very carefull to not raise any error here (the traceback
#will be modify in that case.)
if hasattr(self, 'results') and hasattr(self.results, 'current') and\
self.results.current and 'run_name' in self.results.current and \
hasattr(self, 'me_dir'):
name = self.results.current['run_name']
tag = self.results.current['tag']
self.debug_output = pjoin(self.me_dir, '%s_%s_debug.log' % (name,tag))
if errortype:
self.results.current.debug = errortype
else:
self.results.current.debug = self.debug_output
else:
#Force class default
self.debug_output = CommonRunCmd.debug_output
if os.path.exists('ME5_debug') and not 'ME5_debug' in self.debug_output:
os.remove('ME5_debug')
if not 'ME5_debug' in self.debug_output:
os.system('ln -s %s ME5_debug &> /dev/null' % self.debug_output)
def do_quit(self, line):
"""Not in help: exit """
if not self.force_run:
try:
os.remove(pjoin(self.me_dir,'RunWeb'))
except Exception:
pass
try:
self.store_result()
except Exception:
# If nothing runs they they are no result to update
pass
try:
self.update_status('', level=None)
except Exception, error:
pass
self.gen_card_html()
return super(CommonRunCmd, self).do_quit(line)
# Aliases
do_EOF = do_quit
do_exit = do_quit
def update_status(self, status, level, makehtml=True, force=True,
error=False, starttime = None, update_results=True,
print_log=True):
""" update the index status """
if makehtml and not force:
if hasattr(self, 'next_update') and time.time() < self.next_update:
return
else:
self.next_update = time.time() + 3
if print_log:
if isinstance(status, str):
if '<br>' not in status:
logger.info(status)
elif starttime:
running_time = misc.format_timer(time.time()-starttime)
logger.info(' Idle: %s, Running: %s, Completed: %s [ %s ]' % \
(status[0], status[1], status[2], running_time))
else:
logger.info(' Idle: %s, Running: %s, Completed: %s' % status[:3])
if isinstance(status, str) and status.startswith('\x1b['):
status = status[status.index('m')+1:-7]
if 'arXiv' in status:
if '[' in status:
status = status.split('[',1)[0]
else:
status = status.split('arXiv',1)[0]
if update_results:
self.results.update(status, level, makehtml=makehtml, error=error)
############################################################################
def keep_cards(self, need_card=[], ignore=[]):
"""Ask the question when launching generate_events/multi_run"""
check_card = ['pythia_card.dat', 'pgs_card.dat','delphes_card.dat',
'delphes_trigger.dat', 'madspin_card.dat', 'shower_card.dat',
'reweight_card.dat','pythia8_card.dat',
'madanalysis5_parton_card.dat','madanalysis5_hadron_card.dat',
'plot_card.dat']
cards_path = pjoin(self.me_dir,'Cards')
for card in check_card:
if card in ignore or (ignore == ['*'] and card not in need_card):
continue
if card not in need_card:
if os.path.exists(pjoin(cards_path, card)):
files.mv(pjoin(cards_path, card), pjoin(cards_path, '.%s' % card))
else:
if not os.path.exists(pjoin(cards_path, card)):
if os.path.exists(pjoin(cards_path, '.%s' % card)):
files.mv(pjoin(cards_path, '.%s' % card), pjoin(cards_path, card))
else:
default = card.replace('.dat', '_default.dat')
files.cp(pjoin(cards_path, default),pjoin(cards_path, card))
############################################################################
def set_configuration(self, config_path=None, final=True, initdir=None, amcatnlo=False):
""" assign all configuration variable from file
./Cards/mg5_configuration.txt. assign to default if not define """
if not hasattr(self, 'options') or not self.options:
self.options = dict(self.options_configuration)
self.options.update(self.options_madgraph)
self.options.update(self.options_madevent)
if not config_path:
if os.environ.has_key('MADGRAPH_BASE'):
config_path = pjoin(os.environ['MADGRAPH_BASE'],'mg5_configuration.txt')
self.set_configuration(config_path=config_path, final=False)
if 'HOME' in os.environ:
config_path = pjoin(os.environ['HOME'],'.mg5',
'mg5_configuration.txt')
if os.path.exists(config_path):
self.set_configuration(config_path=config_path, final=False)
if amcatnlo:
me5_config = pjoin(self.me_dir, 'Cards', 'amcatnlo_configuration.txt')
else:
me5_config = pjoin(self.me_dir, 'Cards', 'me5_configuration.txt')
self.set_configuration(config_path=me5_config, final=False, initdir=self.me_dir)
if self.options.has_key('mg5_path') and self.options['mg5_path']:
MG5DIR = self.options['mg5_path']
config_file = pjoin(MG5DIR, 'input', 'mg5_configuration.txt')
self.set_configuration(config_path=config_file, final=False,initdir=MG5DIR)
else:
self.options['mg5_path'] = None
return self.set_configuration(config_path=me5_config, final=final,initdir=self.me_dir)
config_file = open(config_path)
# read the file and extract information
logger.info('load configuration from %s ' % config_file.name)
for line in config_file:
if '#' in line:
line = line.split('#',1)[0]
line = line.replace('\n','').replace('\r\n','')
try:
name, value = line.split('=')
except ValueError:
pass
else:
name = name.strip()
value = value.strip()
if name.endswith('_path') and not name.startswith('cluster'):
path = value
if os.path.isdir(path):
self.options[name] = os.path.realpath(path)
continue
if not initdir:
continue
path = pjoin(initdir, value)
if os.path.isdir(path):
self.options[name] = os.path.realpath(path)
continue
else:
self.options[name] = value
if value.lower() == "none":
self.options[name] = None
if not final:
return self.options # the return is usefull for unittest
# Treat each expected input
# delphes/pythia/... path
for key in self.options:
# Final cross check for the path
if key.endswith('path') and not key.startswith("cluster"):
path = self.options[key]
if path is None:
continue
if os.path.isdir(path):
self.options[key] = os.path.realpath(path)
continue
path = pjoin(self.me_dir, self.options[key])
if os.path.isdir(path):
self.options[key] = os.path.realpath(path)
continue
elif self.options.has_key('mg5_path') and self.options['mg5_path']:
path = pjoin(self.options['mg5_path'], self.options[key])
if os.path.isdir(path):
self.options[key] = os.path.realpath(path)
continue
self.options[key] = None
elif key.startswith('cluster') and key != 'cluster_status_update':
if key in ('cluster_nb_retry','cluster_wait_retry'):
self.options[key] = int(self.options[key])
if hasattr(self,'cluster'):
del self.cluster
pass
elif key == 'automatic_html_opening':
if self.options[key] in ['False', 'True']:
self.options[key] =ast.literal_eval(self.options[key])
elif key == "notification_center":
if self.options[key] in ['False', 'True']:
self.allow_notification_center =ast.literal_eval(self.options[key])
self.options[key] =ast.literal_eval(self.options[key])
elif key not in ['text_editor','eps_viewer','web_browser','stdout_level',
'complex_mass_scheme', 'gauge', 'group_subprocesses']:
# Default: try to set parameter
try:
self.do_set("%s %s --no_save" % (key, self.options[key]), log=False)
except self.InvalidCmd:
logger.warning("Option %s from config file not understood" \
% key)
# Configure the way to open a file:
misc.open_file.configure(self.options)
self.configure_run_mode(self.options['run_mode'])
return self.options
@staticmethod
def find_available_run_name(me_dir):
""" find a valid run_name for the current job """
name = 'run_%02d'
data = [int(s[4:j]) for s in os.listdir(pjoin(me_dir,'Events')) for
j in range(4,len(s)+1) if \
s.startswith('run_') and s[4:j].isdigit()]
return name % (max(data+[0])+1)
############################################################################
def do_decay_events(self,line):
"""Require MG5 directory: decay events with spin correlations
"""
if '-from_cards' in line and not os.path.exists(pjoin(self.me_dir, 'Cards', 'madspin_card.dat')):
return
# First need to load MadSpin
# Check that MG5 directory is present .
if MADEVENT and not self.options['mg5_path']:
raise self.InvalidCmd, '''The module decay_events requires that MG5 is installed on the system.
You can install it and set its path in ./Cards/me5_configuration.txt'''
elif MADEVENT:
sys.path.append(self.options['mg5_path'])
try:
import MadSpin.decay as decay
import MadSpin.interface_madspin as interface_madspin
except ImportError:
if __debug__:
raise
else:
raise self.ConfigurationError, '''Can\'t load MadSpin
The variable mg5_path might not be correctly configured.'''
self.update_status('Running MadSpin', level='madspin')
if not '-from_cards' in line and '-f' not in line:
self.keep_cards(['madspin_card.dat'], ignore=['*'])
self.ask_edit_cards(['madspin_card.dat'], 'fixed', plot=False)
self.help_decay_events(skip_syntax=True)
# load the name of the event file
args = self.split_arg(line)
self.check_decay_events(args)
# args now alway content the path to the valid files
madspin_cmd = interface_madspin.MadSpinInterface(args[0])
# pass current options to the interface
madspin_cmd.mg5cmd.options.update(self.options)
madspin_cmd.cluster = self.cluster
madspin_cmd.update_status = lambda *x,**opt: self.update_status(*x, level='madspin',**opt)
path = pjoin(self.me_dir, 'Cards', 'madspin_card.dat')
madspin_cmd.import_command_file(path)
# create a new run_name directory for this output
i = 1
while os.path.exists(pjoin(self.me_dir,'Events', '%s_decayed_%i' % (self.run_name,i))):
i+=1
new_run = '%s_decayed_%i' % (self.run_name,i)
evt_dir = pjoin(self.me_dir, 'Events')
os.mkdir(pjoin(evt_dir, new_run))
current_file = args[0].replace('.lhe', '_decayed.lhe')
new_file = pjoin(evt_dir, new_run, os.path.basename(args[0]))
if not os.path.exists(current_file):
if os.path.exists(current_file+'.gz'):
current_file += '.gz'
new_file += '.gz'
elif current_file.endswith('.gz') and os.path.exists(current_file[:-3]):
current_file = current_file[:-3]
new_file = new_file[:-3]
else:
logger.error('MadSpin fails to create any decayed file.')
return
files.mv(current_file, new_file)
logger.info("The decayed event file has been moved to the following location: ")
logger.info(new_file)
if hasattr(self, 'results'):
current = self.results.current
nb_event = self.results.current['nb_event']
if not nb_event:
current = self.results[self.run_name][0]
nb_event = current['nb_event']
cross = current['cross']
error = current['error']
self.results.add_run( new_run, self.run_card)
self.results.add_detail('nb_event', int(nb_event*madspin_cmd.efficiency))
self.results.add_detail('cross', madspin_cmd.cross)#cross * madspin_cmd.branching_ratio)
self.results.add_detail('error', madspin_cmd.error+ cross * madspin_cmd.err_branching_ratio)
self.results.add_detail('run_mode', current['run_mode'])
self.run_name = new_run
self.banner = madspin_cmd.banner
self.banner.add(path)
self.banner.write(pjoin(self.me_dir,'Events',self.run_name, '%s_%s_banner.txt' %
(self.run_name, self.run_tag)))
self.update_status('MadSpin Done', level='parton', makehtml=False)
if 'unweighted' in os.path.basename(args[0]):
self.create_plot('parton')
def complete_decay_events(self, text, line, begidx, endidx):
args = self.split_arg(line[0:begidx], error=False)
if len(args) == 1:
return self.complete_plot(text, line, begidx, endidx)
else:
return
def complete_print_results(self,text, line, begidx, endidx):
"Complete the print results command"
args = self.split_arg(line[0:begidx], error=False)
if len(args) == 1:
#return valid run_name
data = misc.glob(pjoin('*','unweighted_events.lhe.gz'),
pjoin(self.me_dir, 'Events'))
data = [n.rsplit('/',2)[1] for n in data]
tmp1 = self.list_completion(text, data)
return tmp1
else:
data = misc.glob('*_pythia_events.hep.gz', pjoin(self.me_dir, 'Events', args[0]))
data = [os.path.basename(p).rsplit('_',1)[0] for p in data]
data += ["--mode=a", "--mode=w", "--path=", "--format=short"]
tmp1 = self.list_completion(text, data)
return tmp1
def help_print_result(self):
logger.info("syntax: print_result [RUN] [TAG] [options]")
logger.info("-- show in text format the status of the run (cross-section/nb-event/...)")
logger.info("--path= defines the path of the output file.")
logger.info("--mode=a allow to add the information at the end of the file.")
logger.info("--format=short (only if --path is define)")
logger.info(" allows to have a multi-column output easy to parse")
############################################################################
def do_check_events(self, line):
""" Run some sanity check on the generated events."""
# Check that MG5 directory is present .
if MADEVENT and not self.options['mg5_path']:
raise self.InvalidCmd, '''The module reweight requires that MG5 is installed on the system.
You can install it and set its path in ./Cards/me5_configuration.txt'''
elif MADEVENT:
sys.path.append(self.options['mg5_path'])
try:
import madgraph.interface.reweight_interface as reweight_interface
except ImportError:
raise self.ConfigurationError, '''Can\'t load Reweight module.
The variable mg5_path might not be correctly configured.'''
# load the name of the event file
args = self.split_arg(line)
self.check_check_events(args)
# args now alway content the path to the valid files
reweight_cmd = reweight_interface.ReweightInterface(args[0], allow_madspin=True)
reweight_cmd.mother = self
self.update_status('Running check on events', level='check')
reweight_cmd.check_events()
############################################################################
def complete_check_events(self, text, line, begidx, endidx):
args = self.split_arg(line[0:begidx], error=False)
if len(args) == 1 and os.path.sep not in text:
#return valid run_name
data = misc.glob(pjoin('*','*events.lhe*'), pjoin(self.me_dir, 'Events'))
data = [n.rsplit('/',2)[1] for n in data]
return self.list_completion(text, data, line)
else:
return self.path_completion(text,
os.path.join('.',*[a for a in args \
if a.endswith(os.path.sep)]))
def complete_reweight(self,text, line, begidx, endidx):
"Complete the pythia command"
args = self.split_arg(line[0:begidx], error=False)
#return valid run_name
data = misc.glob(pjoin('*','*events.lhe*'), pjoin(self.me_dir, 'Events'))
data = list(set([n.rsplit('/',2)[1] for n in data]))
if not '-f' in args:
data.append('-f')
tmp1 = self.list_completion(text, data)
return tmp1
def complete_compute_widths(self, text, line, begidx, endidx, formatting=True):
"Complete the compute_widths command"
args = self.split_arg(line[0:begidx])
if args[-1] in ['--path=', '--output=']:
completion = {'path': self.path_completion(text)}
elif line[begidx-1] == os.path.sep:
current_dir = pjoin(*[a for a in args if a.endswith(os.path.sep)])
if current_dir.startswith('--path='):
current_dir = current_dir[7:]
if current_dir.startswith('--output='):
current_dir = current_dir[9:]
completion = {'path': self.path_completion(text, current_dir)}
else:
completion = {}
completion['options'] = self.list_completion(text,
['--path=', '--output=', '--min_br=0.\$', '--nlo',
'--precision_channel=0.\$', '--body_decay='])
return self.deal_multiple_categories(completion, formatting)
def update_make_opts(self):
"""update the make_opts file writing the environmental variables
stored in make_opts_var"""
make_opts = os.path.join(self.me_dir, 'Source', 'make_opts')
# Set some environment variables common to all interfaces
if not hasattr(self,'options') or not 'pythia8_path' in self.options or \
not self.options['pythia8_path'] or \
not os.path.isfile(pjoin(self.options['pythia8_path'],'bin','pythia8-config')):
self.make_opts_var['PYTHIA8_PATH']='NotInstalled'
else:
self.make_opts_var['PYTHIA8_PATH']=self.options['pythia8_path']
self.make_opts_var['MG5AMC_VERSION'] = misc.get_pkg_info()['version']
return self.update_make_opts_full(make_opts, self.make_opts_var)
@staticmethod
def update_make_opts_full(path, def_variables, keep_old=True):
"""update the make_opts file writing the environmental variables
of def_variables.
if a value of the dictionary is None then it is not written.
"""
make_opts = path
pattern = re.compile(r'^(\w+)\s*=\s*(.*)$',re.DOTALL)
diff = False # set to True if one varible need to be updated
#if on False the file is not modify
tag = '#end_of_make_opts_variables\n'
make_opts_variable = True # flag to say if we are in edition area or not
content = []
variables = dict(def_variables)
need_keys = variables.keys()
for line in open(make_opts):
line = line.strip()
if make_opts_variable:
if line.startswith('#') or not line:
if line.startswith('#end_of_make_opts_variables'):
make_opts_variable = False
continue
elif pattern.search(line):
key, value = pattern.search(line).groups()
if key not in variables:
variables[key] = value
elif value != variables[key]:
diff=True
else:
need_keys.remove(key)
else:
make_opts_variable = False
content.append(line)
else:
content.append(line)
if need_keys:
diff=True #This means that new definition are added to the file.
content_variables = '\n'.join('%s=%s' % (k,v) for k, v in variables.items() if v is not None)
content_variables += '\n%s' % tag
if diff:
with open(make_opts, 'w') as fsock:
fsock.write(content_variables + '\n'.join(content))
return
# lhapdf-related functions
def link_lhapdf(self, libdir, extra_dirs = []):
"""links lhapdf into libdir"""
lhapdf_version = self.get_lhapdf_version()
logger.info('Using LHAPDF v%s interface for PDFs' % lhapdf_version)
lhalibdir = subprocess.Popen([self.options['lhapdf'], '--libdir'],
stdout = subprocess.PIPE).stdout.read().strip()
if lhapdf_version.startswith('5.'):
pdfsetsdir = subprocess.Popen([self.options['lhapdf'], '--pdfsets-path'],
stdout = subprocess.PIPE).stdout.read().strip()
else:
pdfsetsdir = subprocess.Popen([self.options['lhapdf'], '--datadir'],
stdout = subprocess.PIPE).stdout.read().strip()
self.lhapdf_pdfsets = self.get_lhapdf_pdfsets_list(pdfsetsdir)
# link the static library in lib
lhalib = 'libLHAPDF.a'
if os.path.exists(pjoin(libdir, lhalib)):
files.rm(pjoin(libdir, lhalib))
files.ln(pjoin(lhalibdir, lhalib), libdir)
# just create the PDFsets dir, the needed PDF set will be copied at run time
if not os.path.isdir(pjoin(libdir, 'PDFsets')):
os.mkdir(pjoin(libdir, 'PDFsets'))
self.make_opts_var['lhapdf'] = self.options['lhapdf']
self.make_opts_var['lhapdfversion'] = lhapdf_version[0]
self.make_opts_var['lhapdfsubversion'] = lhapdf_version.split('.',2)[1]
self.make_opts_var['lhapdf_config'] = self.options['lhapdf']
def get_characteristics(self, path=None):
"""reads the proc_characteristics file and initialises the correspondant
dictionary"""
if not path:
path = os.path.join(self.me_dir, 'SubProcesses', 'proc_characteristics')
self.proc_characteristics = banner_mod.ProcCharacteristic(path)
return self.proc_characteristics
def copy_lhapdf_set(self, lhaid_list, pdfsets_dir):
"""copy (if needed) the lhapdf set corresponding to the lhaid in lhaid_list
into lib/PDFsets"""
if not hasattr(self, 'lhapdf_pdfsets'):
self.lhapdf_pdfsets = self.get_lhapdf_pdfsets_list(pdfsets_dir)
pdfsetname=set()
for lhaid in lhaid_list:
if isinstance(lhaid, str) and lhaid.isdigit():
lhaid = int(lhaid)
if isinstance(lhaid, (int,float)):
try:
if lhaid in self.lhapdf_pdfsets:
pdfsetname.add(self.lhapdf_pdfsets[lhaid]['filename'])
else:
raise MadGraph5Error('lhaid %s not valid input number for the current lhapdf' % lhaid )
except KeyError:
if self.lhapdf_version.startswith('5'):
raise MadGraph5Error(\
('invalid lhaid set in th run_card: %d .\nPlease note that some sets' % lhaid) + \
'(eg MSTW 90%CL error sets) \nare not available in aMC@NLO + LHAPDF 5.x.x')
else:
logger.debug('%d not found in pdfsets.index' % lhaid)
else:
pdfsetname.add(lhaid)
# check if the file exists, otherwise install it:
# also check that the PDFsets dir exists, otherwise create it.
# if fails, install the lhapdfset into lib/PDFsets
if not os.path.isdir(pdfsets_dir):
try:
os.mkdir(pdfsets_dir)
except OSError:
pdfsets_dir = pjoin(self.me_dir, 'lib', 'PDFsets')
elif os.path.exists(pjoin(self.me_dir, 'lib', 'PDFsets')):
#clean previous set of pdf used
for name in os.listdir(pjoin(self.me_dir, 'lib', 'PDFsets')):
if name not in pdfsetname:
try:
if os.path.isdir(pjoin(self.me_dir, 'lib', 'PDFsets', name)):
shutil.rmtree(pjoin(self.me_dir, 'lib', 'PDFsets', name))
else:
os.remove(pjoin(self.me_dir, 'lib', 'PDFsets', name))
except Exception, error:
logger.debug('%s', error)
if self.options["cluster_local_path"]:
lhapdf_cluster_possibilities = [self.options["cluster_local_path"],
pjoin(self.options["cluster_local_path"], "lhapdf"),
pjoin(self.options["cluster_local_path"], "lhapdf", "pdfsets"),
pjoin(self.options["cluster_local_path"], "..", "lhapdf"),
pjoin(self.options["cluster_local_path"], "..", "lhapdf", "pdfsets"),
pjoin(self.options["cluster_local_path"], "..", "lhapdf","pdfsets", "6.1")
]
else:
lhapdf_cluster_possibilities = []
for pdfset in pdfsetname:
# Check if we need to copy the pdf
if self.options["cluster_local_path"] and self.options["run_mode"] == 1 and \
any((os.path.exists(pjoin(d, pdfset)) for d in lhapdf_cluster_possibilities)):
os.environ["LHAPATH"] = [d for d in lhapdf_cluster_possibilities if os.path.exists(pjoin(d, pdfset))][0]
os.environ["CLUSTER_LHAPATH"] = os.environ["LHAPATH"]
# no need to copy it
if os.path.exists(pjoin(pdfsets_dir, pdfset)):
try:
if os.path.isdir(pjoin(pdfsets_dir, name)):
shutil.rmtree(pjoin(pdfsets_dir, name))
else:
os.remove(pjoin(pdfsets_dir, name))
except Exception, error:
logger.debug('%s', error)
#check that the pdfset is not already there
elif not os.path.exists(pjoin(self.me_dir, 'lib', 'PDFsets', pdfset)) and \
not os.path.isdir(pjoin(self.me_dir, 'lib', 'PDFsets', pdfset)):
if pdfset and not os.path.exists(pjoin(pdfsets_dir, pdfset)):
self.install_lhapdf_pdfset(pdfsets_dir, pdfset)
if os.path.exists(pjoin(pdfsets_dir, pdfset)):
files.cp(pjoin(pdfsets_dir, pdfset), pjoin(self.me_dir, 'lib', 'PDFsets'))
elif os.path.exists(pjoin(os.path.dirname(pdfsets_dir), pdfset)):
files.cp(pjoin(os.path.dirname(pdfsets_dir), pdfset), pjoin(self.me_dir, 'lib', 'PDFsets'))
def install_lhapdf_pdfset(self, pdfsets_dir, filename):
"""idownloads and install the pdfset filename in the pdfsets_dir"""
lhapdf_version = self.get_lhapdf_version()
local_path = pjoin(self.me_dir, 'lib', 'PDFsets')
return self.install_lhapdf_pdfset_static(self.options['lhapdf'],
pdfsets_dir, filename,
lhapdf_version=lhapdf_version,
alternate_path=local_path)
@staticmethod
def install_lhapdf_pdfset_static(lhapdf_config, pdfsets_dir, filename,
lhapdf_version=None, alternate_path=None):
"""idownloads and install the pdfset filename in the pdfsets_dir.
Version which can be used independently of the class.
local path is used if the global installation fails.
"""
if not lhapdf_version:
lhapdf_version = subprocess.Popen([lhapdf_config, '--version'],
stdout = subprocess.PIPE).stdout.read().strip()
if not pdfsets_dir:
pdfsets_dir = subprocess.Popen([lhapdf_config, '--datadir'],
stdout = subprocess.PIPE).stdout.read().strip()
if isinstance(filename, int):
pdf_info = CommonRunCmd.get_lhapdf_pdfsets_list_static(pdfsets_dir, lhapdf_version)
filename = pdf_info[filename]['filename']
if os.path.exists(pjoin(pdfsets_dir, filename)):
logger.debug('%s is already present in %s', filename, pdfsets_dir)
return
logger.info('Trying to download %s' % filename)
if lhapdf_version.startswith('5.'):
# use the lhapdf-getdata command, which is in the same path as
# lhapdf-config
getdata = lhapdf_config.replace('lhapdf-config', ('lhapdf-getdata'))
misc.call([getdata, filename], cwd = pdfsets_dir)
elif lhapdf_version.startswith('6.'):
# use the "lhapdf install xxx" command, which is in the same path as
# lhapdf-config
getdata = lhapdf_config.replace('lhapdf-config', ('lhapdf'))
misc.call([getdata, 'install', filename], cwd = pdfsets_dir)
else:
raise MadGraph5Error('Not valid LHAPDF version: %s' % lhapdf_version)
# check taht the file has been installed in the global dir
if os.path.exists(pjoin(pdfsets_dir, filename)) or \
os.path.isdir(pjoin(pdfsets_dir, filename)):
logger.info('%s successfully downloaded and stored in %s' \
% (filename, pdfsets_dir))
#otherwise (if v5) save it locally
elif lhapdf_version.startswith('5.'):
logger.warning('Could not download %s into %s. Trying to save it locally' \
% (filename, pdfsets_dir))
CommonRunCmd.install_lhapdf_pdfset_static(lhapdf_config, alternate_path, filename,
lhapdf_version=lhapdf_version)
elif lhapdf_version.startswith('6.') and '.LHgrid' in filename:
logger.info('Could not download %s: Try %s', filename, filename.replace('.LHgrid',''))
return CommonRunCmd.install_lhapdf_pdfset_static(lhapdf_config, pdfsets_dir,
filename.replace('.LHgrid',''),
lhapdf_version, alternate_path)
else:
raise MadGraph5Error, \
'Could not download %s into %s. Please try to install it manually.' \
% (filename, pdfsets_dir)
def get_lhapdf_pdfsets_list(self, pdfsets_dir):
"""read the PDFsets.index file, which should be located in the same
place as pdfsets_dir, and return a list of dictionaries with the information
about each pdf set"""
lhapdf_version = self.get_lhapdf_version()
return self.get_lhapdf_pdfsets_list_static(pdfsets_dir, lhapdf_version)
@staticmethod
def get_lhapdf_pdfsets_list_static(pdfsets_dir, lhapdf_version):
if lhapdf_version.startswith('5.'):
if os.path.exists('%s.index' % pdfsets_dir):
indexfile = '%s.index' % pdfsets_dir
else:
raise MadGraph5Error, 'index of lhapdf file not found'
pdfsets_lines = \
[l for l in open(indexfile).read().split('\n') if l.strip() and \
not '90cl' in l]
lhapdf_pdfsets = dict( (int(l.split()[0]), {'lhaid': int(l.split()[0]),
'pdflib_ntype': int(l.split()[1]),
'pdflib_ngroup': int(l.split()[2]),
'pdflib_nset': int(l.split()[3]),
'filename': l.split()[4],
'lhapdf_nmem': int(l.split()[5]),
'q2min': float(l.split()[6]),
'q2max': float(l.split()[7]),
'xmin': float(l.split()[8]),
'xmax': float(l.split()[9]),
'description': l.split()[10]}) \
for l in pdfsets_lines)
elif lhapdf_version.startswith('6.'):
pdfsets_lines = \
[l for l in open(pjoin(pdfsets_dir, 'pdfsets.index')).read().split('\n') if l.strip()]
lhapdf_pdfsets = dict( (int(l.split()[0]),
{'lhaid': int(l.split()[0]),
'filename': l.split()[1]}) \
for l in pdfsets_lines)
else:
raise MadGraph5Error('Not valid LHAPDF version: %s' % lhapdf_version)
return lhapdf_pdfsets
def get_lhapdf_version(self):
"""returns the lhapdf version number"""
if not hasattr(self, 'lhapdfversion'):
try:
self.lhapdf_version = \
subprocess.Popen([self.options['lhapdf'], '--version'],
stdout = subprocess.PIPE).stdout.read().strip()
except OSError, error:
if error.errno == 2:
raise Exception, 'lhapdf executable (%s) is not found on your system. Please install it and/or indicate the path to the correct executable in input/mg5_configuration.txt' % self.options['lhapdf']
else:
raise
# this will be removed once some issues in lhapdf6 will be fixed
if self.lhapdf_version.startswith('6.0'):
raise MadGraph5Error('LHAPDF 6.0.x not supported. Please use v6.1 or later')
if self.lhapdf_version.startswith('6.2'):
logger.warning('Support of LHAPDF 6.2.x is still in beta phase. Consider to use LHAPDF 6.1.x in case of problem.')
return self.lhapdf_version
def get_lhapdf_pdfsetsdir(self):
lhapdf_version = self.get_lhapdf_version()
# check if the LHAPDF_DATA_PATH variable is defined
if 'LHAPDF_DATA_PATH' in os.environ.keys() and os.environ['LHAPDF_DATA_PATH']:
datadir = os.environ['LHAPDF_DATA_PATH']
elif lhapdf_version.startswith('5.'):
datadir = subprocess.Popen([self.options['lhapdf'], '--pdfsets-path'],
stdout = subprocess.PIPE).stdout.read().strip()
elif lhapdf_version.startswith('6.'):
datadir = subprocess.Popen([self.options['lhapdf'], '--datadir'],
stdout = subprocess.PIPE).stdout.read().strip()
return datadir
def get_lhapdf_libdir(self):
lhapdf_version = self.get_lhapdf_version()
if lhapdf_version.startswith('5.'):
libdir = subprocess.Popen([self.options['lhapdf-config'], '--libdir'],
stdout = subprocess.PIPE).stdout.read().strip()
elif lhapdf_version.startswith('6.'):
libdir = subprocess.Popen([self.options['lhapdf'], '--libs'],
stdout = subprocess.PIPE).stdout.read().strip()
return libdir
class AskforEditCard(cmd.OneLinePathCompletion):
"""A class for asking a question where in addition you can have the
set command define and modifying the param_card/run_card correctly"""
all_card_name = ['param_card', 'run_card', 'pythia_card', 'pythia8_card',
'madweight_card', 'MadLoopParams', 'shower_card']
special_shortcut = {'ebeam':([float],['run_card ebeam1 %(0)s', 'run_card ebeam2 %(0)s']),
'lpp': ([int],['run_card lpp1 %(0)s', 'run_card lpp2 %(0)s' ]),
'lhc': ([int],['run_card lpp1 1', 'run_card lpp2 1', 'run_card ebeam1 %(0)s*1000/2', 'run_card ebeam2 %(0)s*1000/2']),
'lep': ([int],['run_card lpp1 0', 'run_card lpp2 0', 'run_card ebeam1 %(0)s/2', 'run_card ebeam2 %(0)s/2']),
'ilc': ([int],['run_card lpp1 0', 'run_card lpp2 0', 'run_card ebeam1 %(0)s/2', 'run_card ebeam2 %(0)s/2']),
'lcc': ([int],['run_card lpp1 1', 'run_card lpp2 1', 'run_card ebeam1 %(0)s*1000/2', 'run_card ebeam2 %(0)s*1000/2']),
'fixed_scale': ([float],['run_card fixed_fac_scale T', 'run_card fixed_ren_scale T', 'run_card scale %(0)s', 'run_card dsqrt_q2fact1 %(0)s' ,'run_card dsqrt_q2fact2 %(0)s']),
'simplepy8':([],['pythia8_card hadronlevel:all False',
'pythia8_card partonlevel:mpi False',
'pythia8_card BeamRemnants:primordialKT False',
'pythia8_card PartonLevel:Remnants False',
'pythia8_card Check:event False',
'pythia8_card TimeShower:QEDshowerByQ False',
'pythia8_card TimeShower:QEDshowerByL False',
'pythia8_card SpaceShower:QEDshowerByQ False',
'pythia8_card SpaceShower:QEDshowerByL False',
'pythia8_card PartonLevel:FSRinResonances False',
'pythia8_card ProcessLevel:resonanceDecays False',
]),
'mpi':([bool],['pythia8_card partonlevel:mpi %(0)s']),
'no_parton_cut':([],['run_card nocut T'])
}
special_shortcut_help = {
'ebeam' : 'syntax: set ebeam VALUE:\n This parameter sets the energy to both beam to the value in GeV',
'lpp' : 'syntax: set ebeam VALUE:\n'+\
' Set the type of beam to a given value for both beam\n'+\
' 0 : means no PDF\n'+\
' 1 : means proton PDF\n'+\
' -1 : means antiproton PDF\n'+\
' 2 : means PDF for elastic photon emited from a proton\n'+\
' 3 : means PDF for elastic photon emited from an electron',
'lhc' : 'syntax: set lhc VALUE:\n Set for a proton-proton collision with that given center of mass energy (in TeV)',
'lep' : 'syntax: set lep VALUE:\n Set for a electron-positron collision with that given center of mass energy (in GeV)',
'fixed_scale' : 'syntax: set fixed_scale VALUE:\n Set all scales to the give value (in GeV)',
'simplepy8' : 'Turn off non-perturbative slow features of Pythia8.',
'mpi' : 'syntax: set mpi value: allow to turn mpi in Pythia8 on/off'
}
def load_default(self):
""" define all default variable. No load of card here.
This allow to subclass this class and just change init and still have
all variables defined."""
self.me_dir = None
self.param_card = None
self.run_card = {}
self.pname2block = {}
self.conflict = []
self.restricted_value = {}
self.mode = ''
self.cards = []
self.run_set = []
self.has_mw = False
self.has_ml = False
self.has_shower = False
self.has_PY8 = False
self.paths = {}
def define_paths(self, **opt):
# Initiation
if 'pwd' in opt:
self.me_dir = opt['pwd']
elif 'mother_interface' in opt:
self.mother_interface = opt['mother_interface']
if not hasattr(self, 'me_dir') or not self.me_dir:
self.me_dir = self.mother_interface.me_dir
#define paths
self.paths['param'] = pjoin(self.me_dir,'Cards','param_card.dat')
self.paths['param_default'] = pjoin(self.me_dir,'Cards','param_card_default.dat')
self.paths['run'] = pjoin(self.me_dir,'Cards','run_card.dat')
self.paths['run_default'] = pjoin(self.me_dir,'Cards','run_card_default.dat')
self.paths['transfer'] =pjoin(self.me_dir,'Cards','transfer_card.dat')
self.paths['MadWeight'] =pjoin(self.me_dir,'Cards','MadWeight_card.dat')
self.paths['MadWeight_default'] =pjoin(self.me_dir,'Cards','MadWeight_card_default.dat')
self.paths['ML'] =pjoin(self.me_dir,'Cards','MadLoopParams.dat')
self.paths['shower'] = pjoin(self.me_dir,'Cards','shower_card.dat')
self.paths['shower_default'] = pjoin(self.me_dir,'Cards','shower_card_default.dat')
self.paths['FO_analyse'] = pjoin(self.me_dir,'Cards','FO_analyse_card.dat')
self.paths['FO_analyse_default'] = pjoin(self.me_dir,'Cards','FO_analyse_card_default.dat')
self.paths['pythia'] =pjoin(self.me_dir, 'Cards','pythia_card.dat')
self.paths['pythia8'] = pjoin(self.me_dir, 'Cards','pythia8_card.dat')
self.paths['pythia8_default'] = pjoin(self.me_dir, 'Cards','pythia8_card_default.dat')
self.paths['madspin_default'] = pjoin(self.me_dir,'Cards/madspin_card_default.dat')
self.paths['madspin'] = pjoin(self.me_dir,'Cards/madspin_card.dat')
self.paths['reweight'] = pjoin(self.me_dir,'Cards','reweight_card.dat')
self.paths['delphes'] = pjoin(self.me_dir,'Cards','delphes_card.dat')
self.paths['plot'] = pjoin(self.me_dir,'Cards','plot_card.dat')
self.paths['plot_default'] = pjoin(self.me_dir,'Cards','plot_card_default.dat')
self.paths['madanalysis5_parton'] = pjoin(self.me_dir,'Cards','madanalysis5_parton_card.dat')
self.paths['madanalysis5_hadron'] = pjoin(self.me_dir,'Cards','madanalysis5_hadron_card.dat')
self.paths['madanalysis5_parton_default'] = pjoin(self.me_dir,'Cards','madanalysis5_parton_card_default.dat')
self.paths['madanalysis5_hadron_default'] = pjoin(self.me_dir,'Cards','madanalysis5_hadron_card_default.dat')
self.paths['FO_analyse'] = pjoin(self.me_dir,'Cards', 'FO_analyse_card.dat')
def __init__(self, question, cards=[], mode='auto', *args, **opt):
self.load_default()
self.define_paths(**opt)
cmd.OneLinePathCompletion.__init__(self, question, *args, **opt)
try:
self.param_card = check_param_card.ParamCard(self.paths['param'])
except (check_param_card.InvalidParamCard, ValueError) as e:
logger.error('Current param_card is not valid. We are going to use the default one.')
logger.error('problem detected: %s' % e)
files.cp(self.paths['param_default'], self.paths['param'])
self.param_card = check_param_card.ParamCard(self.paths['param'])
default_param = check_param_card.ParamCard(self.paths['param_default'])
self.param_card_default = default_param
try:
self.run_card = banner_mod.RunCard(self.paths['run'], consistency='warning')
except IOError:
self.run_card = {}
try:
run_card_def = banner_mod.RunCard(self.paths['run_default'])
except IOError:
run_card_def = {}
self.pname2block = {}
self.conflict = []
self.restricted_value = {}
self.mode = mode
self.cards = cards
# Read the comment of the param_card_default to find name variable for
# the param_card also check which value seems to be constrained in the
# model.
self.pname2block, self.restricted_value = \
default_param.analyze_param_card()
if run_card_def:
self.run_set = run_card_def.keys() + self.run_card.hidden_param
elif self.run_card:
self.run_set = self.run_card.keys()
else:
self.run_set = []
# check for conflict with run_card
for var in self.pname2block:
if var in self.run_set:
self.conflict.append(var)
self.has_delphes = False
if 'delphes_card.dat' in cards:
self.has_delphes = True
#check if Madweight_card is present:
self.has_mw = False
if 'madweight_card.dat' in cards:
self.do_change_tf = self.mother_interface.do_define_transfer_fct
self.complete_change_tf = self.mother_interface.complete_define_transfer_fct
self.help_change_tf = self.mother_interface.help_define_transfer_fct
if not os.path.exists(self.paths['transfer']):
logger.warning('No transfer function currently define. Please use the change_tf command to define one.')
self.has_mw = True
try:
import madgraph.madweight.Cards as mwcards
except:
import internal.madweight.Cards as mwcards
self.mw_card = mwcards.Card(self.paths['MadWeight'])
self.mw_card = self.mw_card.info
self.mw_vars = []
for key in self.mw_card:
if key == 'comment':
continue
for key2 in self.mw_card.info[key]:
if isinstance(key2, str) and not key2.isdigit():
self.mw_vars.append(key2)
# check for conflict with run_card/param_card
for var in self.pname2block:
if var in self.mw_vars:
self.conflict.append(var)
for var in self.mw_vars:
if var in self.run_card:
self.conflict.append(var)
#check if MadLoopParams.dat is present:
self.has_ml = False
if os.path.isfile(self.paths['ML']):
self.has_ml = True
self.MLcard = banner_mod.MadLoopParam(self.paths['ML'])
self.MLcardDefault = banner_mod.MadLoopParam()
self.ml_vars = [k.lower() for k in self.MLcard.keys()]
# check for conflict
for var in self.ml_vars:
if var in self.run_card:
self.conflict.append(var)
if var in self.pname2block:
self.conflict.append(var)
if self.has_mw and var in self.mw_vars:
self.conflict.append(var)
#check if shower_card is present:
self.has_shower = False
if 'shower_card.dat' in cards:
self.has_shower = True
self.shower_card = shower_card_mod.ShowerCard(self.paths['shower'])
self.shower_vars = self.shower_card.keys()
# check for conflict with run_card/param_card
for var in self.pname2block:
if var in self.shower_vars:
self.conflict.append(var)
for var in self.shower_vars:
if var in self.run_card:
self.conflict.append(var)
#check if pythia8_card.dat is present:
self.has_PY8 = False
if 'pythia8_card.dat' in cards:
self.has_PY8 = True
self.PY8Card = banner_mod.PY8Card(self.paths['pythia8'])
self.PY8CardDefault = banner_mod.PY8Card()
self.py8_vars = [k.lower() for k in self.PY8Card.keys()]
# check for conflict
for var in self.py8_vars:
if var in self.run_card:
self.conflict.append(var)
if var in self.pname2block:
self.conflict.append(var)
if self.has_mw and var in self.mw_vars:
self.conflict.append(var)
if self.has_ml and var in self.ml_vars:
self.conflict.append(var)
def do_help(self, line, conflict_raise=False, banner=True):
# try:
if banner:
logger.info('*** HELP MESSAGE ***', '$MG:color:BLACK')
args = self.split_arg(line)
# handle comand related help
if len(args)==0 or (len(args) == 1 and hasattr(self, 'do_%s' % args[0])):
out = cmd.BasicCmd.do_help(self, line)
if len(args)==0:
print 'Allowed Argument'
print '================'
print '\t'.join(self.allow_arg)
print
print 'Special shortcut: (type help <name>)'
print '===================================='
print ' syntax: set <name> <value>'
print '\t'.join(self.special_shortcut)
print
if banner:
logger.info('*** END HELP ***', '$MG:color:BLACK')
return out
# check for special shortcut.
# special shortcut:
if args[0] in self.special_shortcut:
if args[0] in self.special_shortcut_help:
print self.special_shortcut_help[args[0]]
if banner:
logger.info('*** END HELP ***', '$MG:color:BLACK')
return
start = 0
card = ''
if args[0]+'_card' in self.all_card_name+ self.cards:
args[0] += '_card'
elif args[0]+'.dat' in self.all_card_name+ self.cards:
args[0] += '.dat'
elif args[0]+'_card.dat' in self.all_card_name+ self.cards:
args[0] += '_card.dat'
if args[0] in self.all_card_name + self.cards:
start += 1
card = args[0]
if len(args) == 1:
if args[0] == 'pythia8_card':
args[0] = 'PY8Card'
if args[0] == 'param_card':
logger.info("Param_card information: ", '$MG:color:BLUE')
print "File to define the various model parameter"
logger.info("List of the Block defined:",'$MG:color:BLUE')
print "\t".join(self.param_card.keys())
elif args[0].startswith('madanalysis5'):
print 'This card allow to make plot with the madanalysis5 package'
print 'An example card is provided. For more information about the '
print 'syntax please refer to: https://madanalysis.irmp.ucl.ac.be/'
print 'or to the user manual [arXiv:1206.1599]'
if args[0].startswith('madanalysis5_hadron'):
print
print 'This card also allow to make recasting analysis'
print 'For more detail, see: arXiv:1407.3278'
elif hasattr(self, args[0]):
logger.info("%s information: " % args[0], '$MG:color:BLUE')
print(eval('self.%s' % args[0]).__doc__)
logger.info("List of parameter associated", '$MG:color:BLUE')
print "\t".join(eval('self.%s' % args[0]).keys())
if banner:
logger.info('*** END HELP ***', '$MG:color:BLACK')
return
#### RUN CARD
if args[start] in [l.lower() for l in self.run_card.keys()] and card in ['', 'run_card']:
if args[start] not in self.run_set:
args[start] = [l for l in self.run_set if l.lower() == args[start]][0]
if args[start] in self.conflict and not conflict_raise:
conflict_raise = True
logger.info('** AMBIGUOUS NAME: %s **', args[start], '$MG:color:BLACK')
if card == '':
logger.info('** If not explicitely speficy this parameter will modif the run_card file', '$MG:color:BLACK')
self.run_card.do_help(args[start])
### PARAM_CARD WITH BLOCK NAME -----------------------------------------
elif (args[start] in self.param_card or args[start] == 'width') \
and card in ['','param_card']:
if args[start] in self.conflict and not conflict_raise:
conflict_raise = True
logger.info('** AMBIGUOUS NAME: %s **', args[start], '$MG:color:BLACK')
if card == '':
logger.info('** If not explicitely speficy this parameter will modif the param_card file', '$MG:color:BLACK')
if args[start] == 'width':
args[start] = 'decay'
if len(args) == start+1:
self.param_card.do_help(args[start], tuple())
key = None
elif args[start+1] in self.pname2block:
all_var = self.pname2block[args[start+1]]
key = None
for bname, lhaid in all_var:
if bname == args[start]:
key = lhaid
break
else:
logger.warning('%s is not part of block "%s" but "%s". please correct.' %
(args[start+1], args[start], bname))
else:
try:
key = tuple([int(i) for i in args[start+1:]])
except ValueError:
logger.warning('Failed to identify LHA information')
return
if key in self.param_card[args[start]].param_dict:
self.param_card.do_help(args[start], key, default=self.param_card_default)
elif key:
logger.warning('invalid information: %s not defined in the param_card' % (key,))
# PARAM_CARD NO BLOCK NAME ---------------------------------------------
elif args[start] in self.pname2block and card in ['','param_card']:
if args[start] in self.conflict and not conflict_raise:
conflict_raise = True
logger.info('** AMBIGUOUS NAME: %s **', args[start], '$MG:color:BLACK')
if card == '':
logger.info('** If not explicitely speficy this parameter will modif the param_card file', '$MG:color:BLACK')
all_var = self.pname2block[args[start]]
for bname, lhaid in all_var:
new_line = 'param_card %s %s %s' % (bname,
' '.join([ str(i) for i in lhaid]), ' '.join(args[start+1:]))
self.do_help(new_line, conflict_raise=True, banner=False)
# MadLoop Parameter ---------------------------------------------------
elif self.has_ml and args[start] in self.ml_vars \
and card in ['', 'MadLoop_card']:
if args[start] in self.conflict and not conflict_raise:
conflict_raise = True
logger.info('** AMBIGUOUS NAME: %s **', args[start], '$MG:color:BLACK')
if card == '':
logger.info('** If not explicitely speficy this parameter will modif the madloop_card file', '$MG:color:BLACK')
self.MLcard.do_help(args[start])
# Pythia8 Parameter ---------------------------------------------------
elif self.has_PY8 and args[start] in self.PY8Card:
if args[start] in self.conflict and not conflict_raise:
conflict_raise = True
logger.info('** AMBIGUOUS NAME: %s **', args[start], '$MG:color:BLACK')
if card == '':
logger.info('** If not explicitely speficy this parameter will modif the pythia8_card file', '$MG:color:BLACK')
self.PY8Card.do_help(args[start])
elif card.startswith('madanalysis5'):
print 'MA5'
else:
print "no help available"
if banner:
logger.info('*** END HELP ***', '$MG:color:BLACK')
#raw_input('press enter to quit the help')
return
# except Exception, error:
# if __debug__:
# import traceback
# traceback.print_exc()
# print error
def complete_help(self, text, line, begidx, endidx):
prev_timer = signal.alarm(0) # avoid timer if any
if prev_timer:
nb_back = len(line)
self.stdout.write('\b'*nb_back + '[timer stopped]\n')
self.stdout.write(line)
self.stdout.flush()
# try:
possibilities = self.complete_set(text, line, begidx, endidx,formatting=False)
if line[:begidx].strip() == 'help':
possibilities['Defined command'] = cmd.BasicCmd.completenames(self, text, line)#, begidx, endidx)
possibilities.update(self.complete_add(text, line, begidx, endidx,formatting=False))
return self.deal_multiple_categories(possibilities)
# except Exception, error:
# import traceback
# traceback.print_exc()
# print error
def complete_update(self, text, line, begidx, endidx):
prev_timer = signal.alarm(0) # avoid timer if any
if prev_timer:
nb_back = len(line)
self.stdout.write('\b'*nb_back + '[timer stopped]\n')
self.stdout.write(line)
self.stdout.flush()
arg = line[:begidx].split()
if len(arg) <=1:
return self.list_completion(text, ['dependent', 'missing', 'to_slha1', 'to_slha2'], line)
def complete_set(self, text, line, begidx, endidx, formatting=True):
""" Complete the set command"""
prev_timer = signal.alarm(0) # avoid timer if any
if prev_timer:
nb_back = len(line)
self.stdout.write('\b'*nb_back + '[timer stopped]\n')
self.stdout.write(line)
self.stdout.flush()
possibilities = {}
allowed = {}
args = self.split_arg(line[0:begidx])
if args[-1] in ['Auto', 'default']:
return
if len(args) == 1:
allowed = {'category':'', 'run_card':'', 'block':'all', 'param_card':'','shortcut':''}
if self.has_mw:
allowed['madweight_card'] = ''
allowed['mw_block'] = 'all'
if self.has_shower:
allowed['shower_card'] = ''
if self.has_ml:
allowed['madloop_card'] = ''
if self.has_PY8:
allowed['pythia8_card'] = ''
if self.has_delphes:
allowed['delphes_card'] = ''
elif len(args) == 2:
if args[1] == 'run_card':
allowed = {'run_card':'default'}
elif args[1] == 'param_card':
allowed = {'block':'all', 'param_card':'default'}
elif args[1] in self.param_card.keys():
allowed = {'block':args[1]}
elif args[1] == 'width':
allowed = {'block': 'decay'}
elif args[1] == 'MadWeight_card':
allowed = {'madweight_card':'default', 'mw_block': 'all'}
elif args[1] == 'MadLoop_card':
allowed = {'madloop_card':'default'}
elif args[1] == 'pythia8_card':
allowed = {'pythia8_card':'default'}
elif self.has_mw and args[1] in self.mw_card.keys():
allowed = {'mw_block':args[1]}
elif args[1] == 'shower_card':
allowed = {'shower_card':'default'}
elif args[1] == 'delphes_card':
allowed = {'delphes_card':'default'}
else:
allowed = {'value':''}
else:
start = 1
if args[1] in ['run_card', 'param_card', 'MadWeight_card', 'shower_card',
'MadLoop_card','pythia8_card','delphes_card','plot_card',
'madanalysis5_parton_card','madanalysis5_hadron_card']:
start = 2
if args[-1] in self.pname2block.keys():
allowed['value'] = 'default'
elif args[start] in self.param_card.keys() or args[start] == 'width':
if args[start] == 'width':
args[start] = 'decay'
if args[start+1:]:
allowed = {'block':(args[start], args[start+1:])}
else:
allowed = {'block':args[start]}
elif self.has_mw and args[start] in self.mw_card.keys():
if args[start+1:]:
allowed = {'mw_block':(args[start], args[start+1:])}
else:
allowed = {'mw_block':args[start]}
#elif len(args) == start +1:
# allowed['value'] = ''
else:
allowed['value'] = ''
if 'category' in allowed.keys():
categories = ['run_card', 'param_card']
if self.has_mw:
categories.append('MadWeight_card')
if self.has_shower:
categories.append('shower_card')
if self.has_ml:
categories.append('MadLoop_card')
if self.has_PY8:
categories.append('pythia8_card')
if self.has_delphes:
categories.append('delphes_card')
possibilities['category of parameter (optional)'] = \
self.list_completion(text, categories)
if 'shortcut' in allowed.keys():
possibilities['special values'] = self.list_completion(text, self.special_shortcut.keys()+['qcut', 'showerkt'])
if 'run_card' in allowed.keys():
opts = self.run_set
if allowed['run_card'] == 'default':
opts.append('default')
possibilities['Run Card'] = self.list_completion(text, opts)
if 'param_card' in allowed.keys():
opts = self.pname2block.keys()
if allowed['param_card'] == 'default':
opts.append('default')
possibilities['Param Card'] = self.list_completion(text, opts)
if 'madweight_card' in allowed.keys():
opts = self.mw_vars + [k for k in self.mw_card.keys() if k !='comment']
if allowed['madweight_card'] == 'default':
opts.append('default')
possibilities['MadWeight Card'] = self.list_completion(text, opts)
if 'madloop_card' in allowed.keys():
opts = self.ml_vars
if allowed['madloop_card'] == 'default':
opts.append('default')
possibilities['MadLoop Parameter'] = self.list_completion(text, opts)
if 'pythia8_card' in allowed.keys():
opts = self.py8_vars
if allowed['pythia8_card'] == 'default':
opts.append('default')
possibilities['Pythia8 Parameter'] = self.list_completion(text, opts)
if 'shower_card' in allowed.keys():
opts = self.shower_vars + [k for k in self.shower_card.keys() if k !='comment']
if allowed['shower_card'] == 'default':
opts.append('default')
possibilities['Shower Card'] = self.list_completion(text, opts)
if 'delphes_card' in allowed:
if allowed['delphes_card'] == 'default':
opts = ['default', 'atlas', 'cms']
possibilities['Delphes Card'] = self.list_completion(text, opts)
if 'value' in allowed.keys():
opts = ['default']
if 'decay' in args:
opts.append('Auto')
opts.append('Auto@NLO')
elif args[-1] in self.pname2block and self.pname2block[args[-1]][0][0] == 'decay':
opts.append('Auto')
opts.append('Auto@NLO')
possibilities['Special Value'] = self.list_completion(text, opts)
if 'block' in allowed.keys():
if allowed['block'] == 'all':
allowed_block = [i for i in self.param_card.keys() if 'qnumbers' not in i]
allowed_block.append('width')
possibilities['Param Card Block' ] = \
self.list_completion(text, allowed_block)
elif isinstance(allowed['block'], basestring):
block = self.param_card[allowed['block']].param_dict
ids = [str(i[0]) for i in block
if (allowed['block'], i) not in self.restricted_value]
possibilities['Param Card id' ] = self.list_completion(text, ids)
varname = [name for name, all_var in self.pname2block.items()
if any((bname == allowed['block']
for bname,lhaid in all_var))]
possibilities['Param card variable'] = self.list_completion(text,
varname)
else:
block = self.param_card[allowed['block'][0]].param_dict
nb = len(allowed['block'][1])
ids = [str(i[nb]) for i in block if len(i) > nb and \
[str(a) for a in i[:nb]] == allowed['block'][1]]
if not ids:
if tuple([int(i) for i in allowed['block'][1]]) in block:
opts = ['default']
if allowed['block'][0] == 'decay':
opts.append('Auto')
opts.append('Auto@NLO')
possibilities['Special value'] = self.list_completion(text, opts)
possibilities['Param Card id' ] = self.list_completion(text, ids)
if 'mw_block' in allowed.keys():
if allowed['mw_block'] == 'all':
allowed_block = [i for i in self.mw_card.keys() if 'comment' not in i]
possibilities['MadWeight Block' ] = \
self.list_completion(text, allowed_block)
elif isinstance(allowed['mw_block'], basestring):
block = self.mw_card[allowed['mw_block']]
ids = [str(i[0]) if isinstance(i, tuple) else str(i) for i in block]
possibilities['MadWeight Card id' ] = self.list_completion(text, ids)
else:
block = self.mw_card[allowed['mw_block'][0]]
nb = len(allowed['mw_block'][1])
ids = [str(i[nb]) for i in block if isinstance(i, tuple) and\
len(i) > nb and \
[str(a) for a in i[:nb]] == allowed['mw_block'][1]]
if not ids:
if tuple([i for i in allowed['mw_block'][1]]) in block or \
allowed['mw_block'][1][0] in block.keys():
opts = ['default']
possibilities['Special value'] = self.list_completion(text, opts)
possibilities['MadWeight Card id' ] = self.list_completion(text, ids)
return self.deal_multiple_categories(possibilities, formatting)
def do_set(self, line):
""" edit the value of one parameter in the card"""
args = self.split_arg(line)
if len(args) == 0:
logger.warning("No argument. For help type 'help set'.")
# fix some formatting problem
if len(args)==1 and '=' in args[-1]:
arg1, arg2 = args.pop(-1).split('=',1)
args += [arg1, arg2]
if '=' in args:
args.remove('=')
args[:-1] = [ a.lower() for a in args[:-1]]
# special shortcut:
if args[0] in self.special_shortcut:
targettypes , cmd = self.special_shortcut[args[0]]
if len(args) != len(targettypes) +1:
logger.warning('shortcut %s requires %s argument' % (args[0], len(targettypes)))
if len(args) < len(targettypes) +1:
return
else:
logger.warning('additional argument will be ignored')
values ={}
for i, argtype in enumerate(targettypes):
try:
values = {str(i): banner_mod.ConfigFile.format_variable(args[i+1], argtype, args[0])}
except ValueError as e:
logger.warning("Wrong argument: The entry #%s should be of type %s.", i+1, argtype)
return
#else:
# logger.warning("too many argument for this command")
# return
for arg in cmd:
try:
text = arg % values
except KeyError:
logger.warning("This command requires one argument")
return
except Exception as e:
logger.warning(str(e))
return
else:
self.do_set(arg % values)
return
start = 0
if len(args) < 2:
logger.warning('Invalid set command %s (need two arguments)' % line)
return
# Special case for the qcut value
if args[0].lower() == 'qcut':
pythia_path = self.paths['pythia']
if os.path.exists(pythia_path):
logger.info('add line QCUT = %s in pythia_card.dat' % args[1])
p_card = open(pythia_path,'r').read()
p_card, n = re.subn('''^\s*QCUT\s*=\s*[\de\+\-\.]*\s*$''',
''' QCUT = %s ''' % args[1], \
p_card, flags=(re.M+re.I))
if n==0:
p_card = '%s \n QCUT= %s' % (p_card, args[1])
with open(pythia_path, 'w') as fsock:
fsock.write(p_card)
return
# Special case for the showerkt value
if args[0].lower() == 'showerkt':
pythia_path = self.paths['pythia']
if os.path.exists(pythia_path):
logger.info('add line SHOWERKT = %s in pythia_card.dat' % args[1].upper())
p_card = open(pythia_path,'r').read()
p_card, n = re.subn('''^\s*SHOWERKT\s*=\s*[default\de\+\-\.]*\s*$''',
''' SHOWERKT = %s ''' % args[1].upper(), \
p_card, flags=(re.M+re.I))
if n==0:
p_card = '%s \n SHOWERKT= %s' % (p_card, args[1].upper())
with open(pythia_path, 'w') as fsock:
fsock.write(p_card)
return
card = '' #store which card need to be modify (for name conflict)
if args[0] == 'madweight_card':
if not self.mw_card:
logger.warning('Invalid Command: No MadWeight card defined.')
return
args[0] = 'MadWeight_card'
if args[0] == 'shower_card':
if not self.shower_card:
logger.warning('Invalid Command: No Shower card defined.')
return
args[0] = 'shower_card'
if args[0] == "madloop_card":
if not self.has_ml:
logger.warning('Invalid Command: No MadLoopParam card defined.')
return
args[0] = 'MadLoop_card'
if args[0] == "pythia8_card":
if not self.has_PY8:
logger.warning('Invalid Command: No Pythia8 card defined.')
return
args[0] = 'pythia8_card'
if args[0] == 'delphes_card':
if not self.has_delphes:
logger.warning('Invalid Command: No Delphes card defined.')
return
if args[1] == 'atlas':
logger.info("set default ATLAS configuration for Delphes", '$MG:color:BLACK')
files.cp(pjoin(self.me_dir,'Cards', 'delphes_card_ATLAS.dat'),
pjoin(self.me_dir,'Cards', 'delphes_card.dat'))
return
elif args[1] == 'cms':
logger.info("set default CMS configuration for Delphes",'$MG:color:BLACK')
files.cp(pjoin(self.me_dir,'Cards', 'delphes_card_CMS.dat'),
pjoin(self.me_dir,'Cards', 'delphes_card.dat'))
return
if args[0] in ['run_card', 'param_card', 'MadWeight_card', 'shower_card',
'delphes_card','madanalysis5_hadron_card','madanalysis5_parton_card']:
if args[1] == 'default':
logger.info('replace %s by the default card' % args[0],'$MG:color:BLACK')
files.cp(self.paths['%s_default' %args[0][:-5]], self.paths[args[0][:-5]])
if args[0] == 'param_card':
self.param_card = check_param_card.ParamCard(self.paths['param'])
elif args[0] == 'run_card':
self.run_card = banner_mod.RunCard(self.paths['run'])
elif args[0] == 'shower_card':
self.shower_card = shower_card_mod.ShowerCard(self.paths['shower'])
return
else:
card = args[0]
start=1
if len(args) < 3:
logger.warning('Invalid set command: %s (not enough arguments)' % line)
return
elif args[0] in ['MadLoop_card']:
if args[1] == 'default':
logger.info('replace MadLoopParams.dat by the default card','$MG:color:BLACK')
self.MLcard = banner_mod.MadLoopParam(self.MLcardDefault)
self.MLcard.write(self.paths['ML'],
commentdefault=True)
return
else:
card = args[0]
start=1
if len(args) < 3:
logger.warning('Invalid set command: %s (not enough arguments)' % line)
return
elif args[0] in ['pythia8_card']:
if args[1] == 'default':
logger.info('replace pythia8_card.dat by the default card','$MG:color:BLACK')
self.PY8Card = banner_mod.PY8Card(self.PY8CardDefault)
self.PY8Card.write(pjoin(self.me_dir,'Cards','pythia8_card.dat'),
pjoin(self.me_dir,'Cards','pythia8_card_default.dat'),
print_only_visible=True)
return
else:
card = args[0]
start=1
if len(args) < 3:
logger.warning('Invalid set command: %s (not enough arguments)' % line)
return
elif args[0] in ['madspin_card']:
if args[1] == 'default':
logger.info('replace madspin_card.dat by the default card','$MG:color:BLACK')
files.cp(self.paths['MS_default'], self.paths['madspin'])
return
else:
logger.warning("""Command set not allowed for modifying the madspin_card.
Check the command \"decay\" instead.""")
return
#### RUN CARD
if args[start] in [l.lower() for l in self.run_card.keys()] and card in ['', 'run_card']:
if args[start] not in self.run_set:
args[start] = [l for l in self.run_set if l.lower() == args[start]][0]
if args[start] in self.conflict and card == '':
text = 'Ambiguous name (present in more than one card). Will assume it to be referred to run_card.\n'
text += 'If this is not intended, please reset it in the run_card and specify the relevant card to \n'
text += 'edit, in the format < set card parameter value >'
logger.warning(text)
if args[start+1] == 'default':
default = banner_mod.RunCard(self.paths['run_default'])
if args[start] in default.keys():
self.setR(args[start],default[args[start]])
else:
logger.info('remove information %s from the run_card' % args[start],'$MG:color:BLACK')
del self.run_card[args[start]]
else:
if args[0].startswith('sys_') or \
args[0] in self.run_card.list_parameter or \
args[0] in self.run_card.dict_parameter:
val = ' '.join(args[start+1:])
val = val.split('#')[0]
else:
val = args[start+1]
self.setR(args[start], val)
self.run_card.write(self.paths['run'], self.paths['run_default'])
# special mode for set run_card nocut T (generated by set no_parton_cut
elif card == 'run_card' and args[start] in ['nocut', 'no_cut']:
logger.info("Going to remove all cuts from the run_card", '$MG:color:BLACK')
self.run_card.remove_all_cut()
self.run_card.write(self.paths['run'], self.paths['run_default'])
### PARAM_CARD WITH BLOCK NAME -----------------------------------------
elif (args[start] in self.param_card or args[start] == 'width') \
and card in ['','param_card']:
#special treatment for scan
if any(t.startswith('scan') for t in args):
index = [i for i,t in enumerate(args) if t.startswith('scan')][0]
args = args[:index] + [' '.join(args[index:])]
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
text += ' in the format < set card parameter value>'
logger.warning(text)
return
if args[start] == 'width':
args[start] = 'decay'
if args[start+1] in self.pname2block:
all_var = self.pname2block[args[start+1]]
key = None
for bname, lhaid in all_var:
if bname == args[start]:
key = lhaid
break
else:
logger.warning('%s is not part of block "%s" but "%s". please correct.' %
(args[start+1], args[start], bname))
return
else:
try:
key = tuple([int(i) for i in args[start+1:-1]])
except ValueError:
if args[start] == 'decay' and args[start+1:-1] == ['all']:
for key in self.param_card[args[start]].param_dict:
if (args[start], key) in self.restricted_value:
continue
else:
self.setP(args[start], key, args[-1])
self.param_card.write(self.paths['param'])
return
logger.warning('invalid set command %s (failed to identify LHA information)' % line)
return
if key in self.param_card[args[start]].param_dict:
if (args[start], key) in self.restricted_value:
text = "Note that this parameter seems to be ignore by MG.\n"
text += "MG will use instead the expression: %s\n" % \
self.restricted_value[(args[start], key)]
text += "You need to match this expression for external program (such pythia)."
logger.warning(text)
if args[-1].lower() in ['default', 'auto', 'auto@nlo'] or args[-1].startswith('scan'):
self.setP(args[start], key, args[-1])
else:
try:
value = float(args[-1])
except Exception:
logger.warning('Invalid input: Expected number and not \'%s\'' \
% args[-1])
return
self.setP(args[start], key, value)
else:
logger.warning('invalid set command %s' % line)
return
self.param_card.write(self.paths['param'])
# PARAM_CARD NO BLOCK NAME ---------------------------------------------
elif args[start] in self.pname2block and card in ['','param_card']:
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
text += ' in the format < set card parameter value>'
logger.warning(text)
return
all_var = self.pname2block[args[start]]
for bname, lhaid in all_var:
new_line = 'param_card %s %s %s' % (bname,
' '.join([ str(i) for i in lhaid]), ' '.join(args[start+1:]))
self.do_set(new_line)
if len(all_var) > 1:
logger.warning('This variable correspond to more than one parameter in the param_card.')
for bname, lhaid in all_var:
logger.warning(' %s %s' % (bname, ' '.join([str(i) for i in lhaid])))
logger.warning('all listed variables have been modified')
# MadWeight_card with block name ---------------------------------------
elif self.has_mw and (args[start] in self.mw_card and args[start] != 'comment') \
and card in ['','MadWeight_card']:
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
text += ' in the format < set card parameter value>'
logger.warning(text)
return
block = args[start]
name = args[start+1]
value = args[start+2:]
self.setM(block, name, value)
self.mw_card.write(self.paths['MadWeight'])
# MadWeight_card NO Block name -----------------------------------------
elif self.has_mw and args[start] in self.mw_vars \
and card in ['', 'MadWeight_card']:
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
text += ' in the format < set card parameter value>'
logger.warning(text)
return
block = [b for b, data in self.mw_card.items() if args[start] in data]
if len(block) > 1:
logger.warning('%s is define in more than one block: %s.Please specify.'
% (args[start], ','.join(block)))
return
block = block[0]
name = args[start]
value = args[start+1:]
self.setM(block, name, value)
self.mw_card.write(self.paths['MadWeight'])
# MadWeight_card New Block ---------------------------------------------
elif self.has_mw and args[start].startswith('mw_') and len(args[start:]) == 3\
and card == 'MadWeight_card':
block = args[start]
name = args[start+1]
value = args[start+2]
self.setM(block, name, value)
self.mw_card.write(self.paths['MadWeight'])
#### SHOWER CARD
elif self.has_shower and args[start].lower() in [l.lower() for l in \
self.shower_card.keys()] and card in ['', 'shower_card']:
if args[start] not in self.shower_card:
args[start] = [l for l in self.shower_card if l.lower() == args[start].lower()][0]
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
text += ' in the format < set card parameter value>'
logger.warning(text)
return
if args[start+1].lower() == 'default':
default = shower_card_mod.ShowerCard(self.paths['shower_default'])
if args[start] in default.keys():
self.shower_card.set_param(args[start],default[args[start]], self.paths['shower'])
else:
logger.info('remove information %s from the shower_card' % args[start],'$MG:color:BLACK')
del self.shower_card[args[start]]
elif args[start+1].lower() in ['t','.true.','true']:
self.shower_card.set_param(args[start],'.true.',self.paths['shower'])
elif args[start+1].lower() in ['f','.false.','false']:
self.shower_card.set_param(args[start],'.false.',self.paths['shower'])
elif args[start] in ['analyse', 'extralibs', 'extrapaths', 'includepaths'] or\
args[start].startswith('dm_'):
#case sensitive parameters
args = line.split()
args_str = ' '.join(str(a) for a in args[start+1:len(args)])
self.shower_card.set_param(args[start],args_str,pjoin(self.me_dir,'Cards','shower_card.dat'))
else:
args_str = ' '.join(str(a) for a in args[start+1:len(args)])
self.shower_card.set_param(args[start],args_str,self.paths['shower'])
# MadLoop Parameter ---------------------------------------------------
elif self.has_ml and args[start] in self.ml_vars \
and card in ['', 'MadLoop_card']:
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
logger.warning(text)
return
if args[start+1] == 'default':
value = self.MLcardDefault[args[start]]
default = True
else:
value = args[start+1]
default = False
self.setML(args[start], value, default=default)
self.MLcard.write(self.paths['ML'],
commentdefault=True)
# Pythia8 Parameter ---------------------------------------------------
elif self.has_PY8 and (card == 'pythia8_card' or (card == '' and \
args[start] in self.PY8Card)):
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
logger.warning(text)
return
if args[start+1] == 'default':
value = self.PY8CardDefault[args[start]]
default = True
else:
value = ' '.join(args[start+1:])
default = False
self.setPY8(args[start], value, default=default)
self.PY8Card.write(pjoin(self.me_dir,'Cards','pythia8_card.dat'),
pjoin(self.me_dir,'Cards','pythia8_card_default.dat'),
print_only_visible=True)
#INVALID --------------------------------------------------------------
else:
logger.warning('invalid set command %s ' % line)
arg = args[start].lower()
if self.has_PY8:
close_opts = [name for name in self.PY8Card if name.lower().startswith(arg[:3]) or arg in name.lower()]
if close_opts:
logger.info('Did you mean one of the following PY8 options:\n%s' % '\t'.join(close_opts))
if self.run_card:
close_opts = [name for name in self.run_card if name.lower().startswith(arg[:3]) or arg in name.lower()]
if close_opts:
logger.info('Did you mean one of the following run_card options:\n%s' % '\t'.join(close_opts))
return
def setM(self, block, name, value):
if isinstance(value, list) and len(value) == 1:
value = value[0]
if block not in self.mw_card:
logger.warning('block %s was not present in the current MadWeight card. We are adding it' % block)
self.mw_card[block] = {}
elif name not in self.mw_card[block]:
logger.info('name %s was not present in the block %s for the current MadWeight card. We are adding it' % (name,block),'$MG:color:BLACK')
if value == 'default':
import madgraph.madweight.Cards as mwcards
mw_default = mwcards.Card(self.paths['MadWeight_default'])
try:
value = mw_default[block][name]
except KeyError:
logger.info('removing id "%s" from Block "%s" '% (name, block),'$MG:color:BLACK')
if name in self.mw_card[block]:
del self.mw_card[block][name]
return
if value:
logger.info('modify madweight_card information BLOCK "%s" with id "%s" set to %s',
block, name, value, '$MG:color:BLACK')
else:
logger.warning("Invalid command: No value. To set default value. Use \"default\" as value")
return
self.mw_card[block][name] = value
def setR(self, name, value):
logger.info('modify parameter %s of the run_card.dat to %s' % (name, value),'$MG:color:BLACK')
self.run_card.set(name, value, user=True)
def setML(self, name, value, default=False):
try:
self.MLcard.set(name, value, user=True)
except Exception, error:
logger.warning("Fail to change parameter. Please Retry. Reason: %s." % error)
return
logger.info('modify parameter %s of the MadLoopParam.dat to %s' % (name, value),'$MG:color:BLACK')
if default and name.lower() in self.MLcard.user_set:
self.MLcard.user_set.remove(name.lower())
def setPY8(self, name, value, default=False):
try:
self.PY8Card.userSet(name, value)
except Exception, error:
logger.warning("Fail to change parameter. Please Retry. Reason: %s." % error)
return
logger.info('modify parameter %s of the pythia8_card.dat to %s' % (name, value), '$MG:color:BLACK')
if default and name.lower() in self.PY8Card.user_set:
self.PY8Card.user_set.remove(name.lower())
def setP(self, block, lhaid, value):
if isinstance(value, str):
value = value.lower()
if value == 'default':
default = check_param_card.ParamCard(self.paths['param_default'])
value = default[block].param_dict[lhaid].value
elif value in ['auto', 'auto@nlo']:
if 'nlo' in value:
value = 'Auto@NLO'
else:
value = 'Auto'
if block != 'decay':
logger.warning('Invalid input: \'Auto\' value only valid for DECAY')
return
elif value.startswith('scan'):
if ':' not in value:
logger.warning('Invalid input: \'scan\' mode requires a \':\' before the definition.')
return
tag = value.split(':')[0]
tag = tag[4:].strip()
if tag and not tag.isdigit():
logger.warning('Invalid input: scan tag need to be integer and not "%s"' % tag)
return
pass
else:
try:
value = float(value)
except ValueError:
logger.warning('Invalid input: \'%s\' not valid intput.'% value)
logger.info('modify param_card information BLOCK %s with id %s set to %s' %\
(block, lhaid, value), '$MG:color:BLACK')
self.param_card[block].param_dict[lhaid].value = value
def check_card_consistency(self):
"""This is run on quitting the class. Apply here all the self-consistency
rule that you want. Do the modification via the set command."""
# if NLO reweighting is ON: ensure that we keep the rwgt information
if 'reweight' in self.allow_arg and 'run' in self.allow_arg and \
isinstance(self.run_card,banner_mod.RunCardNLO) and \
not self.run_card['store_rwgt_info']:
#check if a NLO reweighting is required
re_pattern = re.compile(r'''^\s*change\s*mode\s* (LO\+NLO|LO|NLO|NLO_tree)\s*(?:#|$)''', re.M+re.I)
text = open(self.paths['reweight']).read()
options = re_pattern.findall(text)
if any(o in ['NLO', 'LO+NLO'] for o in options):
logger.info('NLO reweighting is on ON. Automatically set store_rwgt_info to True', '$MG:color:BLACK' )
self.do_set('run_card store_rwgt_info True')
# if external computation for the systematics are asked then switch
#automatically the book-keeping of the weight for NLO
if 'run' in self.allow_arg and \
self.run_card['systematics_program'] == 'systematics' and \
isinstance(self.run_card,banner_mod.RunCardNLO) and \
not self.run_card['store_rwgt_info']:
logger.warning('To be able to run systematics program, we set store_rwgt_info to True')
self.do_set('run_card store_rwgt_info True')
# @LO if PY6 shower => event_norm on sum
if 'pythia_card.dat' in self.cards:
if self.run_card['event_norm'] != 'sum':
logger.info('Pythia6 needs a specific normalisation of the events. We will change it accordingly.', '$MG:color:BLACK' )
self.do_set('run_card event_norm sum')
# @LO if PY6 shower => event_norm on sum
elif 'pythia8_card.dat' in self.cards:
if self.run_card['event_norm'] == 'sum':
logger.info('Pythia8 needs a specific normalisation of the events. We will change it accordingly.', '$MG:color:BLACK' )
self.do_set('run_card event_norm average')
# Check the extralibs flag.
if self.has_shower and isinstance(self.run_card, banner_mod.RunCardNLO):
modify_extralibs, modify_extrapaths = False,False
extralibs = self.shower_card['extralibs'].split()
extrapaths = self.shower_card['extrapaths'].split()
# remove default stdhep/Fmcfio for recent shower
if self.run_card['parton_shower'] in ['PYTHIA8', 'HERWIGPP', 'HW7']:
if 'stdhep' in self.shower_card['extralibs']:
extralibs.remove('stdhep')
modify_extralibs = True
if 'Fmcfio' in self.shower_card['extralibs']:
extralibs.remove('Fmcfio')
modify_extralibs = True
if self.run_card['parton_shower'] == 'PYTHIA8':
# First check sanity of PY8
if not self.mother_interface.options['pythia8_path']:
raise self.mother_interface.InvalidCmd, 'Pythia8 is not correctly specified to MadGraph5_aMC@NLO'
executable = pjoin(self.mother_interface.options['pythia8_path'], 'bin', 'pythia8-config')
if not os.path.exists(executable):
raise self.mother.InvalidCmd, 'Pythia8 is not correctly specified to MadGraph5_aMC@NLO'
# 2. take the compilation flag of PY8 from pythia8-config
libs , paths = [], []
p = misc.subprocess.Popen([executable, '--libs'], stdout=subprocess.PIPE)
stdout, _ = p. communicate()
libs = [x[2:] for x in stdout.split() if x.startswith('-l') or paths.append(x[2:])]
# Add additional user-defined compilation flags
p = misc.subprocess.Popen([executable, '--config'], stdout=subprocess.PIPE)
stdout, _ = p. communicate()
for lib in ['-ldl','-lstdc++','-lc++']:
if lib in stdout:
libs.append(lib[2:])
# This precompiler flag is in principle useful for the analysis if it writes HEPMC
# events, but there is unfortunately no way for now to specify it in the shower_card.
supports_HEPMCHACK = '-DHEPMC2HACK' in stdout
#3. ensure that those flag are in the shower card
for l in libs:
if l not in extralibs:
modify_extralibs = True
extralibs.append(l)
for L in paths:
if L not in extrapaths:
modify_extrapaths = True
extrapaths.append(L)
# Apply the required modification
if modify_extralibs:
if extralibs:
self.do_set('shower_card extralibs %s ' % ' '.join(extralibs))
else:
self.do_set('shower_card extralibs None ')
if modify_extrapaths:
if extrapaths:
self.do_set('shower_card extrapaths %s ' % ' '.join(extrapaths))
else:
self.do_set('shower_card extrapaths None ')
def reask(self, *args, **opt):
cmd.OneLinePathCompletion.reask(self,*args, **opt)
if self.has_mw and not os.path.exists(pjoin(self.me_dir,'Cards','transfer_card.dat')):
logger.warning('No transfer function currently define. Please use the change_tf command to define one.')
fail_due_to_format = 0 #parameter to avoid infinite loop
def postcmd(self, stop, line):
ending_question = cmd.OneLinePathCompletion.postcmd(self,stop,line)
if ending_question:
self.check_card_consistency()
try:
self.do_update('dependent', timer=20)
except MadGraph5Error, error:
if 'Missing block:' in str(error):
self.fail_due_to_format +=1
if self.fail_due_to_format == 10:
missing, unknow = str(error).split('\n')[-2:]
logger.warning("Invalid param_card:\n%s\n%s\n" % (missing, unknow))
logger.info("Type \"update missing\" to use default value.\n ", '$MG:color:BLACK')
self.value = False # to avoid that entering a command stop the question
return self.reask(True)
else:
raise
return ending_question
def do_update(self, line, timer=0):
""" syntax: update dependent: Change the mass/width of particles which are not free parameter for the model.
update missing: add to the current param_card missing blocks/parameters.
update to_slha1: pass SLHA2 card to SLHA1 convention. (beta)
update to_slha2: pass SLHA1 card to SLHA2 convention. (beta)"""
args = self.split_arg(line)
if len(args)==0:
logger.warning('miss an argument (dependent or missing). Please retry')
return
if args[0] == 'dependent':
if not self.mother_interface:
logger.warning('Failed to update dependent parameter. This might create trouble for external program (like MadSpin/shower/...)')
pattern_width = re.compile(r'''decay\s+(\+?\-?\d+)\s+auto(@NLO|)''',re.I)
pattern_scan = re.compile(r'''^(decay)?[\s\d]*scan''', re.I+re.M)
param_text= open(self.paths['param']).read()
if pattern_scan.search(param_text):
#for block, key in self.restricted_value:
# self.param_card[block].get(key).value = -9.999e-99
# self.param_card.write(self.paths['param'])
return
elif pattern_width.search(param_text):
self.do_compute_widths('')
self.param_card = check_param_card.ParamCard(self.paths['param'])
# calling the routine doing the work
self.update_dependent(self.mother_interface, self.me_dir, self.param_card,
self.paths['param'], timer)
elif args[0] == 'missing':
self.update_missing()
return
elif args[0] == 'to_slha2':
try:
check_param_card.convert_to_mg5card(self.paths['param'])
logger.info('card updated')
except Exception, error:
logger.warning('failed to update to slha2 due to %s' % error)
self.param_card = check_param_card.ParamCard(self.paths['param'])
elif args[0] == 'to_slha1':
try:
check_param_card.convert_to_slha1(self.paths['param'])
logger.info('card updated')
except Exception, error:
logger.warning('failed to update to slha1 due to %s' % error)
self.param_card = check_param_card.ParamCard(self.paths['param'])
@staticmethod
def update_dependent(mecmd, me_dir, param_card, path ,timer=0):
"""static method which can also be called from outside the class
usefull in presence of scan.
return if the param_card was updated or not
"""
logger.info('Update the dependent parameter of the param_card.dat')
modify = True
class TimeOutError(Exception):
pass
def handle_alarm(signum, frame):
raise TimeOutError
signal.signal(signal.SIGALRM, handle_alarm)
if timer:
signal.alarm(timer)
log_level=30
else:
log_level=20
# Try to load the model in the limited amount of time allowed
try:
model = mecmd.get_model()
signal.alarm(0)
except TimeOutError:
logger.warning('The model takes too long to load so we bypass the updating of dependent parameter.\n'+\
'This might create trouble for external program (like MadSpin/shower/...)\n'+\
'The update can be forced without timer by typing \'update dependent\' at the time of the card edition')
modify =False
except Exception,error:
logger.debug(str(error))
logger.warning('Failed to update dependent parameter. This might create trouble for external program (like MadSpin/shower/...)')
signal.alarm(0)
else:
restrict_card = pjoin(me_dir,'Source','MODEL','param_card_rule.dat')
if not os.path.exists(restrict_card):
restrict_card = None
#restrict_card = None
if model:
modify = param_card.update_dependent(model, restrict_card, log_level)
if modify and path:
param_card.write(path)
else:
logger.warning('missing MG5aMC code. Fail to update dependent parameter. This might create trouble for program like MadSpin/shower/...')
if log_level==20:
logger.info('param_card up to date.')
return modify
def update_missing(self):
def check_block(self, blockname):
add_entry = 0
if blockname.lower() not in self.param_card_default:
logger.info('unknow block %s: block will be ignored', blockname)
return add_entry
block = self.param_card_default[blockname]
for key in block.keys():
if key not in input_in_block:
param = block.get(key)
if blockname != 'decay':
text.append('\t%s\t%s # %s\n' % (' \t'.join([`i` for i in param.lhacode]), param.value, param.comment))
else:
text.append('DECAY \t%s\t%s # %s\n' % (' \t'.join([`i` for i in param.lhacode]), param.value, param.comment))
add_entry += 1
if add_entry:
text.append('\n')
if add_entry:
logger.info("Adding %s parameter(s) to block %s", add_entry, blockname)
return add_entry
# Add to the current param_card all the missing input at default value
current_block = ''
input_in_block = set()
defined_blocks = set()
decay = set()
text = []
add_entry = 0
for line in open(self.paths['param']):
new_block = re.findall(r'^\s*(block|decay)\s*(\w*)', line, re.I)
if new_block:
new_block = new_block[0]
defined_blocks.add(new_block[1].lower())
if current_block:
add_entry += check_block(self, current_block)
current_block= new_block[1]
input_in_block = set()
if new_block[0].lower() == 'decay':
decay.add((int(new_block[1]),))
current_block = ''
if new_block[1].lower() == 'qnumbers':
current_block = ''
text.append(line)
if not current_block:
continue
#normal line.
#strip comment
line = line.split('#',1)[0]
split = line.split()
if not split:
continue
else:
try:
lhacode = [int(i) for i in split[:-1]]
except:
continue
input_in_block.add(tuple(lhacode))
if current_block:
add_entry += check_block(self, current_block)
# special check for missing block
for block in self.param_card_default:
if block.startswith(('qnumbers', 'decay')):
continue
if block not in defined_blocks:
nb_entry = len(self.param_card_default[block])
logger.info("Block %s was missing. Adding the %s associated parameter(s)", block,nb_entry)
add_entry += nb_entry
text.append(str(self.param_card_default[block]))
# special check for the decay
input_in_block = decay
add_entry += check_block(self, 'decay')
if add_entry:
logger.info('write new param_card with %s new parameter(s).', add_entry, '$MG:color:BLACK')
open(self.paths['param'],'w').write(''.join(text))
self.reload_card(self.paths['param'])
else:
logger.info('No missing parameter detected.', '$MG:color:BLACK')
def check_answer_consistency(self):
"""function called if the code reads a file"""
self.check_card_consistency()
self.do_update('dependent', timer=20)
def help_set(self):
'''help message for set'''
logger.info('********************* HELP SET ***************************')
logger.info("syntax: set [run_card|param_card|...] NAME [VALUE|default]")
logger.info("syntax: set [param_card] BLOCK ID(s) [VALUE|default]")
logger.info('')
logger.info('-- Edit the param_card/run_card/... and replace the value of the')
logger.info(' parameter by the value VALUE.')
logger.info(' ')
logger.info('-- Example:')
logger.info(' set run_card ebeam1 4000')
logger.info(' set ebeam2 4000')
logger.info(' set lpp1 0')
logger.info(' set ptj default')
logger.info('')
logger.info(' set param_card mass 6 175')
logger.info(' set mass 25 125.3')
logger.info(' set mass mh 125')
logger.info(' set mh 125')
logger.info(' set decay 25 0.004')
logger.info(' set decay wh 0.004')
logger.info(' set vmix 2 1 2.326612e-01')
logger.info('')
logger.info(' set param_card default #return all parameter to default')
logger.info(' set run_card default')
logger.info('********************* HELP SET ***************************')
def default(self, line):
"""Default action if line is not recognized"""
line = line.strip()
args = line.split()
if line == '' and self.default_value is not None:
self.value = self.default_value
# check if input is a file
elif hasattr(self, 'do_%s' % args[0]):
self.do_set(' '.join(args[1:]))
elif os.path.isfile(line):
self.copy_file(line)
self.value = 'repeat'
elif self.me_dir and os.path.exists(pjoin(self.me_dir, line)):
self.copy_file(pjoin(self.me_dir,line))
self.value = 'repeat'
elif line.strip() != '0' and line.strip() != 'done' and \
str(line) != 'EOF' and line.strip() in self.allow_arg:
self.open_file(line)
self.value = 'repeat'
elif line.strip().startswith(('http:','www')):
self.value = 'repeat'
import tempfile
fsock, path = tempfile.mkstemp()
try:
text = urllib.urlopen(line.strip())
except Exception:
logger.error('fail to load the file')
else:
for line in text:
os.write(fsock, line)
os.close(fsock)
self.copy_file(path)
os.remove(path)
else:
self.value = line
return line
def do_decay(self, line):
"""edit the madspin_card to define the decay of the associate particle"""
signal.alarm(0) # avoid timer if any
path = self.paths['madspin']
if 'madspin_card.dat' not in self.cards or not os.path.exists(path):
logger.warning("Command decay not valid. Since MadSpin is not available.")
return
if ">" not in line:
logger.warning("invalid command for decay. Line ignored")
return
if "-add" in line:
# just to have to add the line to the end of the file
particle = line.split('>')[0].strip()
text = open(path).read()
line = line.replace('--add', '').replace('-add','')
logger.info("change madspin_card to add one decay to %s: %s" %(particle, line.strip()), '$MG:color:BLACK')
if 'launch' in text:
text = text.replace('launch', "\ndecay %s\nlaunch\n" % line,1)
else:
text += '\ndecay %s\n launch \n' % line
else:
# Here we have to remove all the previous definition of the decay
#first find the particle
particle = line.split('>')[0].strip()
logger.info("change madspin_card to define the decay of %s: %s" %(particle, line.strip()), '$MG:color:BLACK')
particle = particle.replace('+','\+').replace('-','\-')
decay_pattern = re.compile(r"^\s*decay\s+%s\s*>[\s\w+-~]*?$" % particle, re.I+re.M)
text= open(path).read()
text = decay_pattern.sub('', text)
if 'launch' in text:
text = text.replace('launch', "\ndecay %s\nlaunch\n" % line,1)
else:
text += '\ndecay %s\n launch \n' % line
with open(path,'w') as fsock:
fsock.write(text)
self.reload_card(path)
def do_compute_widths(self, line):
signal.alarm(0) # avoid timer if any
path = self.paths['param']
pattern = re.compile(r'''decay\s+(\+?\-?\d+)\s+auto(@NLO|)''',re.I)
text = open(path).read()
pdg_info = pattern.findall(text)
has_nlo = any("@nlo"==nlo.lower() for _, nlo in pdg_info)
pdg = [p for p,_ in pdg_info]
line = '%s %s' % (line, ' '.join(pdg))
if not '--path' in line:
line += ' --path=%s' % path
if has_nlo:
line += ' --nlo'
try:
return self.mother_interface.do_compute_widths(line)
except InvalidCmd, error:
logger.error("Invalid command: %s " % error)
def help_compute_widths(self):
signal.alarm(0) # avoid timer if any
return self.mother_interface.help_compute_widths()
def help_decay(self):
"""help for command decay which modifies MadSpin_card"""
signal.alarm(0) # avoid timer if any
print '--syntax: decay PROC [--add]'
print ' '
print ' modify the madspin_card to modify the decay of the associate particle.'
print ' and define it to PROC.'
print ' if --add is present, just add a new decay for the associate particle.'
def complete_compute_widths(self, *args, **opts):
prev_timer = signal.alarm(0) # avoid timer if any
if prev_timer:
nb_back = len(line)
self.stdout.write('\b'*nb_back + '[timer stopped]\n')
self.stdout.write(line)
self.stdout.flush()
return self.mother_interface.complete_compute_widths(*args,**opts)
def help_add(self):
"""help for add command"""
logger.info('********************* HELP ADD ***************************')
logger.info( '-- syntax: add pythia8_card NAME VALUE')
logger.info( " add a definition of name in the pythia8_card with the given value")
logger.info( " Do not work for the param_card" )
logger.info( '-- syntax: add filename [OPTION] line')
logger.info( ' add the given LINE to the end of the associate file (all file supportedd).')
logger.info( ' OPTION parameter allows to change the position where to write in the file')
logger.info( ' --after_line=banner : write the line at the end of the banner')
logger.info( ' --line_position=X : insert the line before line X (starts at 0)')
logger.info( ' --after_line="<regular-expression>" write the line after the first line matching the regular expression')
logger.info( ' --before_line="<regular-expression>" write the line before the first line matching the regular expression')
logger.info(' --clean remove all previously existing line in the file')
logger.info( ' example: change reweight --after_line="^\s*change mode" change model heft')
logger.info('********************* HELP ADD ***************************')
def complete_add(self, text, line, begidx, endidx, formatting=True):
""" auto-completion for add command"""
prev_timer = signal.alarm(0) # avoid timer if any
if prev_timer:
nb_back = len(line)
self.stdout.write('\b'*nb_back + '[timer stopped]\n')
self.stdout.write(line)
self.stdout.flush()
split = line[:begidx].split()
if len(split)==1:
possibilities = {}
cards = [c.rsplit('.',1)[0] for c in self.cards]
possibilities['category of parameter (optional)'] = \
self.list_completion(text, cards)
elif len(split) == 2:
possibilities = {}
options = ['--line_position=','--after_line=banner', '--after_line="','--before_line="']
possibilities['category of parameter (optional)'] = \
self.list_completion(text, options, line)
else:
return
return self.deal_multiple_categories(possibilities, formatting)
def do_add(self, line):
""" syntax: add filename NAME VALUE
syntax: add filename LINE"""
args = self.split_arg(line)
if len(args) == 3 and args[0] in ['pythia8_card', 'pythia8_card.dat'] and self.has_PY8:
name= args[1]
value = args[2]
self.PY8Card.userSet(name, value)
self.PY8Card.write(pjoin(self.me_dir,'Cards','pythia8_card.dat'),
pjoin(self.me_dir,'Cards','pythia8_card_default.dat'),
print_only_visible=True)
logger.info("add in the pythia8_card the parameter \"%s\" with value \"%s\"" % (name, value), '$MG:color:BLACK')
elif len(args) > 0:
if args[0] in self.cards:
card = args[0]
elif "%s.dat" % args[0] in self.cards:
card = "%s.dat" % args[0]
elif "%s_card.dat" % args[0] in self.cards:
card = "%s_card.dat" % args[0]
elif self.has_ml and args[0].lower() == "madloop":
card = "MadLoopParams.dat"
else:
logger.error("unknow card %s. Please retry." % args[0])
return
# handling the various option on where to write the line
if args[1] == '--clean':
ff = open(pjoin(self.me_dir,'Cards',card),'w')
ff.write("# %s \n" % card)
ff.write("%s \n" % line.split(None,2)[2])
ff.close()
logger.info("writing the line in %s (empty file) the line: \"%s\"" %(card, line.split(None,2)[2] ),'$MG:color:BLACK')
elif args[1].startswith('--line_position='):
#position in file determined by user
text = open(pjoin(self.me_dir,'Cards',card)).read()
split = text.split('\n')
pos = int(args[1].split('=',1)[1])
newline = line.split(None,2)[2]
split.insert(pos, newline)
ff = open(pjoin(self.me_dir,'Cards',card),'w')
ff.write('\n'.join(split))
logger.info("writting at line %d of the file %s the line: \"%s\"" %(pos, card, line.split(None,1)[1] ),'$MG:color:BLACK')
elif args[1].startswith('--after_line=banner'):
# write the line at the first not commented line
text = open(pjoin(self.me_dir,'Cards',card)).read()
split = text.split('\n')
for posline,l in enumerate(split):
if not l.startswith('#'):
break
split.insert(posline, line.split(None,2)[2])
ff = open(pjoin(self.me_dir,'Cards',card),'w')
ff.write('\n'.join(split))
logger.info("writting at line %d of the file %s the line: \"%s\"" %(posline, card, line.split(None,1)[1] ),'$MG:color:BLACK')
elif args[1].startswith('--before_line='):
# catch the line/regular expression and write before that line
text = open(pjoin(self.me_dir,'Cards',card)).read()
split = text.split('\n')
search_pattern=r'''before_line=(?P<quote>["'])(?:(?=(\\?))\2.)*?\1'''
pattern = re.search(search_pattern, line).group()[13:-1]
for posline,l in enumerate(split):
if re.search(pattern, l):
break
else:
raise Exception, 'invalid regular expression: not found in file'
split.insert(posline, re.split(search_pattern,line)[-1])
ff = open(pjoin(self.me_dir,'Cards',card),'w')
ff.write('\n'.join(split))
logger.info("writting at line %d of the file %s the line: \"%s\"" %(posline, card, line.split(None,1)[1] ),'$MG:color:BLACK')
elif args[1].startswith('--after_line='):
# catch the line/regular expression and write after that line
text = open(pjoin(self.me_dir,'Cards',card)).read()
split = text.split('\n')
search_pattern = r'''after_line=(?P<quote>["'])(?:(?=(\\?))\2.)*?\1'''
pattern = re.search(search_pattern, line).group()[12:-1]
for posline,l in enumerate(split):
if re.search(pattern, l):
break
else:
posline=len(split)
split.insert(posline+1, re.split(search_pattern,line)[-1])
ff = open(pjoin(self.me_dir,'Cards',card),'w')
ff.write('\n'.join(split))
logger.info("writting at line %d of the file %s the line: \"%s\"" %(posline, card, line.split(None,1)[1] ),'$MG:color:BLACK')
else:
ff = open(pjoin(self.me_dir,'Cards',card),'a')
ff.write("%s \n" % line.split(None,1)[1])
ff.close()
logger.info("adding at the end of the file %s the line: \"%s\"" %(card, line.split(None,1)[1] ),'$MG:color:BLACK')
self.reload_card(pjoin(self.me_dir,'Cards',card))
def help_asperge(self):
"""Help associated to the asperge command"""
signal.alarm(0)
print '-- syntax: asperge [options]'
print ' Call ASperGe to diagonalize all mass matrices in the model.'
print ' This works only if the ASperGE module is part of the UFO model (a subdirectory).'
print ' If you specify some names after the command (i.e. asperge m1 m2) then ASperGe will only'
print ' diagonalize the associate mass matrices (here m1 and m2).'
def complete_asperge(self, text, line, begidx, endidx, formatting=True):
prev_timer = signal.alarm(0) # avoid timer if any
if prev_timer:
nb_back = len(line)
self.stdout.write('\b'*nb_back + '[timer stopped]\n')
self.stdout.write(line)
self.stdout.flush()
blockname = self.pname2block.keys()
# remove those that we know for sure are not mixing
wrong = ['decay', 'mass', 'sminput']
valid = [k for k in blockname if 'mix' in k]
potential = [k for k in blockname if k not in valid+wrong]
output = {'Mixing matrices': self.list_completion(text, valid, line),
'Other potential valid input': self.list_completion(text, potential, line)}
return self.deal_multiple_categories(output, formatting)
def do_asperge(self, line):
"""Running ASperGe"""
signal.alarm(0) # avoid timer if any
path = pjoin(self.me_dir,'bin','internal','ufomodel','ASperGE')
if not os.path.exists(path):
logger.error('ASperge has not been detected in the current model, therefore it will not be run.')
return
elif not os.path.exists(pjoin(path,'ASperGe')):
logger.info('ASperGe has been detected but is not compiled. Running the compilation now.')
try:
misc.compile(cwd=path,shell=True)
except MadGraph5Error, error:
logger.error('''ASperGe failed to compile. Note that gsl is needed
for this compilation to go trough. More information on how to install this package on
http://www.gnu.org/software/gsl/
Full compilation log is available at %s''' % pjoin(self.me_dir, 'ASperge_compilation.log'))
open(pjoin(self.me_dir, 'ASperge_compilation.log'),'w').write(str(error))
return
opts = line.split()
card = self.paths['param']
logger.info('running ASperGE')
returncode = misc.call([pjoin(path,'ASperGe'), card, '%s.new' % card] + opts)
if returncode:
logger.error('ASperGE fails with status %s' % returncode)
else:
logger.info('AsPerGe creates the file succesfully')
files.mv(card, '%s.beforeasperge' % card)
files.mv('%s.new' % card, card)
def copy_file(self, path):
"""detect the type of the file and overwritte the current file"""
if path.endswith('.lhco'):
#logger.info('copy %s as Events/input.lhco' % (path))
#files.cp(path, pjoin(self.mother_interface.me_dir, 'Events', 'input.lhco' ))
self.do_set('mw_run inputfile %s' % os.path.relpath(path, self.mother_interface.me_dir))
return
elif path.endswith('.lhco.gz'):
#logger.info('copy %s as Events/input.lhco.gz' % (path))
#files.cp(path, pjoin(self.mother_interface.me_dir, 'Events', 'input.lhco.gz' ))
self.do_set('mw_run inputfile %s' % os.path.relpath(path, self.mother_interface.me_dir))
return
else:
card_name = CommonRunCmd.detect_card_type(path)
if card_name == 'unknown':
logger.warning('Fail to determine the type of the file. Not copied')
if card_name != 'banner':
logger.info('copy %s as %s' % (path, card_name))
files.cp(path, self.paths[card_name.split('_',1)[0]])
self.reload_card(self.paths[card_name.split('_',1)[0]])
elif card_name == 'banner':
banner_mod.split_banner(path, self.mother_interface.me_dir, proc_card=False)
logger.info('Splitting the banner in it\'s component')
if not self.mode == 'auto':
self.mother_interface.keep_cards(self.cards)
for card_name in self.cards:
self.reload_card(pjoin(self.me_dir, 'Cards', card_name))
def open_file(self, answer):
"""open the file"""
try:
me_dir = self.mother_interface.me_dir
except:
me_dir = None
if answer.isdigit():
if answer == '9':
answer = 'plot'
else:
answer = self.cards[int(answer)-1]
if 'madweight' in answer:
answer = answer.replace('madweight', 'MadWeight')
elif 'MadLoopParams' in answer:
answer = self.paths['ML']
elif 'pythia8_card' in answer:
answer = self.paths['pythia8']
if os.path.exists(answer):
path = answer
else:
if not '.dat' in answer and not '.lhco' in answer:
if answer != 'trigger':
path = self.paths[answer]
else:
path = self.paths['delphes']
elif not '.lhco' in answer:
if '_' in answer:
path = self.paths['_'.join(answer.split('_')[:-1])]
else:
path = pjoin(me_dir, 'Cards', answer)
else:
path = pjoin(me_dir, self.mw_card['mw_run']['inputfile'])
if not os.path.exists(path):
logger.info('Path in MW_card not existing')
path = pjoin(me_dir, 'Events', answer)
#security
path = path.replace('_card_card','_card')
try:
self.mother_interface.exec_cmd('open %s' % path)
except InvalidCmd, error:
if str(error) != 'No default path for this file':
raise
if answer == 'transfer_card.dat':
logger.warning('You have to specify a transfer function first!')
elif answer == 'input.lhco':
path = pjoin(me_dir,'Events', 'input.lhco')
ff = open(path,'w')
ff.write('''No LHCO information imported at current time.
To import a lhco file: Close this file and type the path of your file.
You can also copy/paste, your event file here.''')
ff.close()
self.open_file(path)
else:
raise
self.reload_card(path)
def reload_card(self, path):
"""reload object to have it in sync"""
if path == self.paths['param']:
try:
self.param_card = check_param_card.ParamCard(path)
except (check_param_card.InvalidParamCard, ValueError) as e:
logger.error('Current param_card is not valid. We are going to use the default one.')
logger.error('problem detected: %s' % e)
logger.error('Please re-open the file and fix the problem.')
logger.warning('using the \'set\' command without opening the file will discard all your manual change')
elif path == self.paths['run']:
self.run_card = banner_mod.RunCard(path)
elif path == self.paths['shower']:
self.shower_card = shower_card_mod.ShowerCard(path)
elif path == self.paths['ML']:
self.MLcard = banner_mod.MadLoopParam(path)
elif path == self.paths['pythia8']:
# Use the read function so that modified/new parameters are correctly
# set as 'user_set'
if not self.PY8Card:
self.PY8Card = banner_mod.PY8Card(self.paths['pythia8_default'])
self.PY8Card.read(self.paths['pythia8'], setter='user')
self.py8_vars = [k.lower() for k in self.PY8Card.keys()]
elif path == self.paths['MadWeight']:
try:
import madgraph.madweight.Cards as mwcards
except:
import internal.madweight.Cards as mwcards
self.mw_card = mwcards.Card(path)
else:
logger.debug('not keep in sync: %s', path)
return path
class EditParamCard(AskforEditCard):
"""a dedicated module for the param"""
special_shortcut ={}
def __init__(self, question, card=[], mode='auto', *args, **opt):
self.load_default()
cmd.OneLinePathCompletion.__init__(self, question, *args, **opt)
if os.path.isfile(card[0]):
self.param_card = check_param_card.ParamCard(card[0])
self.paths['param'] = card[0]
if os.path.isfile(card[0].replace('.dat', '_default.dat')):
self.paths['param_default'] = card[0].replace('.dat', '_default.dat')
else:
self.paths['param_default'] = card[0]
else:
raise Exception, 'path %s do not exists' % card[0]
self.pname2block, self.restricted_value = self.param_card.analyze_param_card()
self.cards=['param']
def do_asperge(self, *args, **opts):
"Not available"
logger.warning("asperge not available in this mode")
| 47.487073 | 276 | 0.527495 | gger.info("-- run delphes on RUN (current one by default)")
self.run_options_help([('-f','answer all question by default'),
('--tag=', 'define the tag for the delphes run'),
('--no_default', 'not run if delphes_card not present')])
def help_decay_events(self, skip_syntax=False):
if not skip_syntax:
logger.info("syntax: decay_events [RUN]")
logger.info("This functionality allows for the decay of resonances")
logger.info("in a .lhe file, keeping track of the spin correlation effets.")
logger.info("BE AWARE OF THE CURRENT LIMITATIONS:")
logger.info(" (1) Only a succession of 2 body decay are currently allowed")
class CheckValidForCmd(object):
""" The Series of check routines in common between amcatnlo_run and
madevent interface"""
def check_set(self, args):
""" check the validity of the line"""
if len(args) < 2:
if len(args)==1 and "=" in args[0]:
args[:] = args[0].split("=",1)
else:
self.help_set()
raise self.InvalidCmd('set needs an option and an argument')
if args[0] not in self._set_options + self.options.keys():
self.help_set()
raise self.InvalidCmd('Possible options for set are %s' % \
(self._set_options+self.options.keys()))
if args[0] in ['stdout_level']:
if args[1] not in ['DEBUG','INFO','WARNING','ERROR','CRITICAL'] \
and not args[1].isdigit():
raise self.InvalidCmd('output_level needs ' + \
'a valid level')
if args[0] in ['timeout']:
if not args[1].isdigit():
raise self.InvalidCmd('timeout values should be a integer')
def check_compute_widths(self, args):
"""check that the model is loadable and check that the format is of the
type: PART PATH --output=PATH -f --precision=N
return the model.
"""
# Check that MG5 directory is present .
if MADEVENT and not self.options['mg5_path']:
raise self.InvalidCmd, '''The automatic computations of widths requires that MG5 is installed on the system.
You can install it and set his path in ./Cards/me5_configuration.txt'''
elif MADEVENT:
sys.path.append(self.options['mg5_path'])
try:
import models.model_reader as model_reader
import models.import_ufo as import_ufo
except ImportError:
raise self.ConfigurationError, '''Can\'t load MG5.
The variable mg5_path should not be correctly configure.'''
ufo_path = pjoin(self.me_dir,'bin','internal', 'ufomodel')
if not MADEVENT:
modelname = self.find_model_name()
force_CMS = self.mother and self.mother.options['complex_mass_scheme']
model = import_ufo.import_model(modelname, decay=True,
restrict=True, complex_mass_scheme=force_CMS)
else:
force_CMS = self.proc_characteristics['complex_mass_scheme']
model = import_ufo.import_model(pjoin(self.me_dir,'bin','internal',
'ufomodel'), decay=True, complex_mass_scheme=force_CMS)
if '-modelname' not in open(pjoin(self.me_dir,'Cards','proc_card_mg5.dat')).read():
model.pass_particles_name_in_mg_default()
model = model_reader.ModelReader(model)
particles_name = dict([(p.get('name'), p.get('pdg_code'))
for p in model.get('particles')])
particles_name.update(dict([(p.get('antiname'), p.get('pdg_code'))
for p in model.get('particles')]))
output = {'model': model, 'force': False, 'output': None,
'path':None, 'particles': set(), 'body_decay':4.0025,
'min_br':None, 'precision_channel':0.01}
for arg in args:
if arg.startswith('--output='):
output_path = arg.split('=',1)[1]
if not os.path.exists(output_path):
raise self.InvalidCmd, 'Invalid Path for the output. Please retry.'
if not os.path.isfile(output_path):
output_path = pjoin(output_path, 'param_card.dat')
output['output'] = output_path
elif arg == '-f':
output['force'] = True
elif os.path.isfile(arg):
ftype = self.detect_card_type(arg)
if ftype != 'param_card.dat':
raise self.InvalidCmd , '%s is not a valid param_card.' % arg
output['path'] = arg
elif arg.startswith('--path='):
arg = arg.split('=',1)[1]
ftype = self.detect_card_type(arg)
if ftype != 'param_card.dat':
raise self.InvalidCmd , '%s is not a valid param_card.' % arg
output['path'] = arg
elif arg.startswith('--'):
if "=" in arg:
name, value = arg.split('=',1)
try:
value = float(value)
except Exception:
raise self.InvalidCmd, '--%s requires integer or a float' % name
output[name[2:]] = float(value)
elif arg == "--nlo":
output["nlo"] = True
elif arg in particles_name:
output['particles'].add(particles_name[arg])
elif arg.isdigit() and int(arg) in particles_name.values():
output['particles'].add(ast.literal_eval(arg))
elif arg == 'all':
output['particles'] = set(['all'])
else:
self.help_compute_widths()
raise self.InvalidCmd, '%s is not a valid argument for compute_widths' % arg
if self.force:
output['force'] = True
if not output['particles']:
raise self.InvalidCmd, '''This routines requires at least one particle in order to compute
the related width'''
if output['output'] is None:
output['output'] = output['path']
return output
def check_delphes(self, arg, nodefault=False):
"""Check the argument for pythia command
syntax: delphes [NAME]
Note that other option are already remove at this point
"""
if not self.options['delphes_path']:
logger.info('Retry to read configuration file to find delphes path')
self.set_configuration()
if not self.options['delphes_path']:
error_msg = 'No valid Delphes path set.\n'
error_msg += 'Please use the set command to define the path and retry.\n'
error_msg += 'You can also define it in the configuration file.\n'
raise self.InvalidCmd(error_msg)
tag = [a for a in arg if a.startswith('--tag=')]
if tag:
arg.remove(tag[0])
tag = tag[0][6:]
if len(arg) == 0 and not self.run_name:
if self.results.lastrun:
arg.insert(0, self.results.lastrun)
else:
raise self.InvalidCmd('No run name currently define. Please add this information.')
if len(arg) == 1 and self.run_name == arg[0]:
arg.pop(0)
filepath = None
if not len(arg):
prev_tag = self.set_run_name(self.run_name, tag, 'delphes')
paths = [pjoin(self.me_dir,'Events',self.run_name, '%(tag)s_pythia_events.hep.gz'),
pjoin(self.me_dir,'Events',self.run_name, '%(tag)s_pythia8_events.hepmc.gz'),
pjoin(self.me_dir,'Events',self.run_name, '%(tag)s_pythia_events.hep'),
pjoin(self.me_dir,'Events',self.run_name, '%(tag)s_pythia8_events.hepmc'),
pjoin(self.me_dir,'Events','pythia_events.hep'),
pjoin(self.me_dir,'Events','pythia_events.hepmc'),
pjoin(self.me_dir,'Events','pythia8_events.hep.gz'),
pjoin(self.me_dir,'Events','pythia8_events.hepmc.gz')
]
for p in paths:
if os.path.exists(p % {'tag': prev_tag}):
filepath = p % {'tag': prev_tag}
break
else:
a = raw_input("NO INPUT")
if nodefault:
return False
else:
self.help_pgs()
raise self.InvalidCmd('''No file file pythia_events.* currently available
Please specify a valid run_name''')
if len(arg) == 1:
prev_tag = self.set_run_name(arg[0], tag, 'delphes')
if os.path.exists(pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep.gz' % prev_tag)):
filepath = pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep.gz' % prev_tag)
elif os.path.exists(pjoin(self.me_dir,'Events',self.run_name, '%s_pythia8_events.hepmc.gz' % prev_tag)):
filepath = pjoin(self.me_dir,'Events',self.run_name, '%s_pythia8_events.hepmc.gz' % prev_tag)
elif os.path.exists(pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep' % prev_tag)):
filepath = pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep.gz' % prev_tag)
elif os.path.exists(pjoin(self.me_dir,'Events',self.run_name, '%s_pythia8_events.hepmc' % prev_tag)):
filepath = pjoin(self.me_dir,'Events',self.run_name, '%s_pythia8_events.hepmc.gz' % prev_tag)
else:
raise self.InvalidCmd('No events file corresponding to %s run with tag %s.:%s '\
% (self.run_name, prev_tag,
pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep.gz' % prev_tag)))
else:
if tag:
self.run_card['run_tag'] = tag
self.set_run_name(self.run_name, tag, 'delphes')
return filepath
def check_open(self, args):
""" check the validity of the line """
if len(args) != 1:
self.help_open()
raise self.InvalidCmd('OPEN command requires exactly one argument')
if args[0].startswith('./'):
if not os.path.isfile(args[0]):
raise self.InvalidCmd('%s: not such file' % args[0])
return True
if not self.me_dir:
if not os.path.isfile(args[0]):
self.help_open()
raise self.InvalidCmd('No MadEvent path defined. Unable to associate this name to a file')
else:
return True
path = self.me_dir
if os.path.isfile(os.path.join(path,args[0])):
args[0] = os.path.join(path,args[0])
elif os.path.isfile(os.path.join(path,'Cards',args[0])):
args[0] = os.path.join(path,'Cards',args[0])
elif os.path.isfile(os.path.join(path,'HTML',args[0])):
args[0] = os.path.join(path,'HTML',args[0])
elif '_card.dat' in args[0]:
name = args[0].replace('_card.dat','_card_default.dat')
if os.path.isfile(os.path.join(path,'Cards', name)):
files.cp(os.path.join(path,'Cards', name), os.path.join(path,'Cards', args[0]))
args[0] = os.path.join(path,'Cards', args[0])
else:
raise self.InvalidCmd('No default path for this file')
elif not os.path.isfile(args[0]):
raise self.InvalidCmd('No default path for this file')
def check_treatcards(self, args):
"""check that treatcards arguments are valid
[param|run|all] [--output_dir=] [--param_card=] [--run_card=]
"""
opt = {'output_dir':pjoin(self.me_dir,'Source'),
'param_card':pjoin(self.me_dir,'Cards','param_card.dat'),
'run_card':pjoin(self.me_dir,'Cards','run_card.dat')}
mode = 'all'
for arg in args:
if arg.startswith('--') and '=' in arg:
key,value =arg[2:].split('=',1)
if not key in opt:
self.help_treatcards()
raise self.InvalidCmd('Invalid option for treatcards command:%s ' \
% key)
if key in ['param_card', 'run_card']:
if os.path.isfile(value):
card_name = self.detect_card_type(value)
if card_name != key:
raise self.InvalidCmd('Format for input file detected as %s while expecting %s'
% (card_name, key))
opt[key] = value
elif os.path.isfile(pjoin(self.me_dir,value)):
card_name = self.detect_card_type(pjoin(self.me_dir,value))
if card_name != key:
raise self.InvalidCmd('Format for input file detected as %s while expecting %s'
% (card_name, key))
opt[key] = value
else:
raise self.InvalidCmd('No such file: %s ' % value)
elif key in ['output_dir']:
if os.path.isdir(value):
opt[key] = value
elif os.path.isdir(pjoin(self.me_dir,value)):
opt[key] = pjoin(self.me_dir, value)
else:
raise self.InvalidCmd('No such directory: %s' % value)
elif arg in ['MadLoop','param','run','all']:
mode = arg
else:
self.help_treatcards()
raise self.InvalidCmd('Unvalid argument %s' % arg)
return mode, opt
def check_decay_events(self,args):
"""Check the argument for decay_events command
syntax is "decay_events [NAME]"
Note that other option are already remove at this point
"""
opts = []
if '-from_cards' in args:
args.remove('-from_cards')
opts.append('-from_cards')
if len(args) == 0:
if self.run_name:
args.insert(0, self.run_name)
elif self.results.lastrun:
args.insert(0, self.results.lastrun)
else:
raise self.InvalidCmd('No run name currently defined. Please add this information.')
return
if args[0] != self.run_name:
self.set_run_name(args[0])
args[0] = self.get_events_path(args[0])
args += opts
def check_check_events(self,args):
"""Check the argument for decay_events command
syntax is "decay_events [NAME]"
Note that other option are already remove at this point
"""
if len(args) == 0:
if self.run_name:
args.insert(0, self.run_name)
elif self.results.lastrun:
args.insert(0, self.results.lastrun)
else:
raise self.InvalidCmd('No run name currently defined. Please add this information.')
return
if args[0] and os.path.isfile(args[0]):
pass
else:
if args[0] != self.run_name:
self.set_run_name(args[0], allow_new_tag=False)
args[0] = self.get_events_path(args[0])
def get_events_path(self, run_name):
"""return the path to the output events
"""
if self.mode == 'madevent':
possible_path = [
pjoin(self.me_dir,'Events', run_name, 'unweighted_events.lhe.gz'),
pjoin(self.me_dir,'Events', run_name, 'unweighted_events.lhe')]
else:
possible_path = [
pjoin(self.me_dir,'Events', run_name, 'events.lhe.gz'),
pjoin(self.me_dir,'Events', run_name, 'events.lhe')]
for path in possible_path:
if os.path.exists(path):
correct_path = path
break
else:
if os.path.exists(run_name):
correct_path = run_name
else:
raise self.InvalidCmd('No events file corresponding to %s run. ' % run_name)
return correct_path
class MadEventAlreadyRunning(InvalidCmd):
pass
class AlreadyRunning(MadEventAlreadyRunning):
pass
class CommonRunCmd(HelpToCmd, CheckValidForCmd, cmd.Cmd):
debug_output = 'ME5_debug'
helporder = ['Main Commands', 'Documented commands', 'Require MG5 directory',
'Advanced commands']
sleep_for_error = True
options_configuration = {'pythia8_path': './pythia8',
'hwpp_path': './herwigPP',
'thepeg_path': './thepeg',
'hepmc_path': './hepmc',
'madanalysis_path': './MadAnalysis',
'madanalysis5_path': './HEPTools/madanalysis5',
'pythia-pgs_path':'./pythia-pgs',
'td_path':'./td',
'delphes_path':'./Delphes',
'exrootanalysis_path':'./ExRootAnalysis',
'syscalc_path': './SysCalc',
'lhapdf': 'lhapdf-config',
'timeout': 60,
'f2py_compiler':None,
'web_browser':None,
'eps_viewer':None,
'text_editor':None,
'fortran_compiler':None,
'cpp_compiler': None,
'auto_update':7,
'cluster_type': 'condor',
'cluster_status_update': (600, 30),
'cluster_nb_retry':1,
'cluster_local_path': None,
'cluster_retry_wait':300}
options_madgraph= {'stdout_level':None}
options_madevent = {'automatic_html_opening':True,
'notification_center':True,
'run_mode':2,
'cluster_queue':None,
'cluster_time':None,
'cluster_size':100,
'cluster_memory':None,
'nb_core': None,
'cluster_temp_path':None}
def __init__(self, me_dir, options, *args, **opts):
"""common"""
self.force_run = False
if 'force_run' in opts and opts['force_run']:
self.force_run = True
del opts['force_run']
cmd.Cmd.__init__(self, *args, **opts)
if me_dir is None and MADEVENT:
me_dir = root_path
if os.path.isabs(me_dir):
self.me_dir = me_dir
else:
self.me_dir = pjoin(os.getcwd(),me_dir)
self.options = options
self.param_card_iterator = []
self.status = pjoin(self.me_dir, 'status')
self.error = pjoin(self.me_dir, 'error')
self.dirbin = pjoin(self.me_dir, 'bin', 'internal')
if not self.force_run:
if os.path.exists(pjoin(me_dir,'RunWeb')):
message = '''Another instance of the program is currently running.
(for this exact same directory) Please wait that this is instance is
closed. If no instance is running, you can delete the file
%s and try again.''' % pjoin(me_dir,'RunWeb')
raise AlreadyRunning, message
else:
pid = os.getpid()
fsock = open(pjoin(me_dir,'RunWeb'),'w')
fsock.write(`pid`)
fsock.close()
self.gen_card_html()
self.to_store = []
self.run_name = None
self.run_tag = None
self.banner = None
self.set_configuration()
self.configure_run_mode(self.options['run_mode'])
self.get_characteristics()
if not self.proc_characteristics['ninitial']:
nexternal = open(pjoin(self.me_dir,'Source','nexternal.inc')).read()
found = re.search("PARAMETER\s*\(NINCOMING=(\d)\)", nexternal)
self.ninitial = int(found.group(1))
else:
self.ninitial = self.proc_characteristics['ninitial']
def make_make_all_html_results(self, folder_names = [], jobs=[]):
return sum_html.make_all_html_results(self, folder_names, jobs)
nal','ufomodel','param_card.dat')
elif not os.path.exists(pjoin(self.me_dir,'bin','internal','ufomodel')):
fsock = open(pjoin(self.me_dir,'Source','param_card.inc'),'w')
fsock.write(' ')
fsock.close()
return
else:
subprocess.call(['python', 'write_param_card.py'],
cwd=pjoin(self.me_dir,'bin','internal','ufomodel'))
default = pjoin(self.me_dir,'bin','internal','ufomodel','param_card.dat')
if amcatnlo and not keepwidth:
pids = self.get_pid_final_initial_states()
if not MADEVENT and pjoin(self.me_dir,'bin','internal') not in sys.path:
sys.path.insert(0,pjoin(self.me_dir,'bin','internal'))
to_del = [name for name in sys.modules.keys()
if name.startswith('internal.ufomodel')
or name.startswith('ufomodel')]
for name in to_del:
del(sys.modules[name])
import ufomodel as ufomodel
zero = ufomodel.parameters.ZERO
no_width = [p for p in ufomodel.all_particles
if (str(p.pdg_code) in pids or str(-p.pdg_code) in pids)
and p.color != 1 and p.width != zero]
done = []
for part in no_width:
if abs(part.pdg_code) in done:
continue
done.append(abs(part.pdg_code))
param = param_card['decay'].get((part.pdg_code,))
if param.value != 0:
logger.info('''For gauge cancellation, the width of \'%s\' has been set to zero.'''\
% part.name,'$MG:color:BLACK')
param.value = 0
param_card.write_inc_file(outfile, ident_card, default)
def get_model(self):
"""return the model related to this process"""
if self.options['mg5_path']:
sys.path.append(self.options['mg5_path'])
import models.import_ufo as import_ufo
complexmass = self.proc_characteristics['complex_mass_scheme']
with misc.MuteLogger(['madgraph.model'],[50]):
out= import_ufo.import_model(pjoin(self.me_dir,'bin','internal','ufomodel'),
complex_mass_scheme=complexmass)
return out
else:
return None
def ask_edit_cards(self, cards, mode='fixed', plot=True, first_cmd=None):
""" """
if not self.options['madanalysis_path']:
plot = False
self.ask_edit_card_static(cards, mode, plot, self.options['timeout'],
self.ask, first_cmd=first_cmd)
@staticmethod
def ask_edit_card_static(cards, mode='fixed', plot=True,
timeout=0, ask=None, **opt):
if not ask:
ask = CommonRunCmd.ask
def path2name(path):
if '_card' in path:
return path.split('_card')[0]
elif path == 'delphes_trigger.dat':
return 'trigger'
elif path == 'input.lhco':
return 'lhco'
elif path == 'MadLoopParams.dat':
return 'MadLoopParams'
else:
raise Exception, 'Unknow cards name %s' % path
question = """Do you want to edit a card (press enter to bypass editing)?\n"""
possible_answer = ['0', 'done']
card = {0:'done'}
indent = max(len(path2name(card_name)) for card_name in cards)
question += '/'+'-'*60+'\\\n'
for i, card_name in enumerate(cards):
imode = path2name(card_name)
possible_answer.append(i+1)
possible_answer.append(imode)
question += '| %-77s|\n'%((' \x1b[31m%%s\x1b[0m. %%-%ds : \x1b[32m%%s\x1b[0m'%indent)%(i+1, imode, card_name))
card[i+1] = imode
if plot and not 'plot_card.dat' in cards:
question += '| %-77s|\n'%((' \x1b[31m9\x1b[0m. %%-%ds : \x1b[32mplot_card.dat\x1b[0m'%indent) % 'plot')
possible_answer.append(9)
possible_answer.append('plot')
card[9] = 'plot'
question += '\\'+'-'*60+'/\n'
if 'param_card.dat' in cards:
question += ' you can also\n'
question += ' - enter the path to a valid card or banner.\n'
question += ' - use the \'set\' command to modify a parameter directly.\n'
question += ' The set option works only for param_card and run_card.\n'
question += ' Type \'help set\' for more information on this command.\n'
question += ' - call an external program (ASperGE/MadWidth/...).\n'
question += ' Type \'help\' for the list of available command\n'
else:
question += ' you can also\n'
question += ' - enter the path to a valid card.\n'
if 'transfer_card.dat' in cards:
question += ' - use the \'change_tf\' command to set a transfer functions.\n'
out = 'to_run'
while out not in ['0', 'done']:
out = ask(question, '0', possible_answer, timeout=int(1.5*timeout),
path_msg='enter path', ask_class = AskforEditCard,
cards=cards, mode=mode, **opt)
@staticmethod
def detect_card_type(path):
"""detect the type of the card. Return value are
banner
param_card.dat
run_card.dat
pythia_card.dat
pythia8_card.dat
plot_card.dat
pgs_card.dat
delphes_card.dat
delphes_trigger.dat
shower_card.dat [aMCatNLO]
FO_analyse_card.dat [aMCatNLO]
madspin_card.dat [MS]
transfer_card.dat [MW]
madweight_card.dat [MW]
madanalysis5_hadron_card.dat
madanalysis5_parton_card.dat
Please update the unit-test: test_card_type_recognition when adding
cards.
"""
fulltext = open(path).read(50000)
if fulltext == '':
logger.warning('File %s is empty' % path)
return 'unknown'
to_search = ['<MGVersion>',
'<mg5proccard>'
'ParticlePropagator',
'ExecutionPath',
'Treewriter',
'CEN_max_tracker',
'#TRIGGER CARD',
'parameter set name',
'muon eta coverage',
'req_acc_FO',
'MSTP',
'b_stable',
'FO_ANALYSIS_FORMAT',
'MSTU',
'Begin Minpts',
'gridpack',
'ebeam1',
'block\s+mw_run',
'BLOCK',
'DECAY',
'launch',
'madspin',
'transfer_card\.dat',
'set',
'main:numberofevents',
'@MG5aMC skip_analysis',
'@MG5aMC\s*inputs\s*=\s*\*\.(?:hepmc|lhe)',
'@MG5aMC\s*reconstruction_name',
'@MG5aMC'
]
text = re.findall('(%s)' % '|'.join(to_search), fulltext, re.I)
text = [t.lower() for t in text]
if '<mgversion>' in text or '<mg5proccard>' in text:
return 'banner'
elif 'particlepropagator' in text or 'executionpath' in text or 'treewriter' in text:
return 'delphes_card.dat'
elif 'cen_max_tracker' in text:
return 'delphes_card.dat'
elif '@mg5amc' in text:
ma5_flag = [f[7:].strip() for f in text if f.startswith('@mg5amc')]
if any(f.startswith('reconstruction_name') for f in ma5_flag):
return 'madanalysis5_hadron_card.dat'
ma5_flag = [f.split('*.')[1] for f in ma5_flag if '*.' in f]
if any(f.startswith('lhe') for f in ma5_flag):
return 'madanalysis5_parton_card.dat'
if any(f.startswith(('hepmc','hep','stdhep','lhco')) for f in ma5_flag):
return 'madanalysis5_hadron_card.dat'
else:
return 'unknown'
elif '#trigger card' in text:
return 'delphes_trigger.dat'
elif 'parameter set name' in text:
return 'pgs_card.dat'
elif 'muon eta coverage' in text:
return 'pgs_card.dat'
elif 'mstp' in text and not 'b_stable' in text:
return 'pythia_card.dat'
elif 'begin minpts' in text:
return 'plot_card.dat'
elif ('gridpack' in text and 'ebeam1' in text) or \
('req_acc_fo' in text and 'ebeam1' in text):
return 'run_card.dat'
elif any(t.endswith('mw_run') for t in text):
return 'madweight_card.dat'
elif 'transfer_card.dat' in text:
return 'transfer_card.dat'
elif 'block' in text and 'decay' in text:
return 'param_card.dat'
elif 'b_stable' in text:
return 'shower_card.dat'
elif 'fo_analysis_format' in text:
return 'FO_analyse_card.dat'
elif 'main:numberofevents' in text:
return 'pythia8_card.dat'
elif 'launch' in text:
if 'madspin' in text:
return 'madspin_card.dat'
if 'decay' in text:
if re.search("(^|;)\s*decay", fulltext):
return 'madspin_card.dat'
else:
return 'reweight_card.dat'
else:
return 'reweight_card.dat'
else:
return 'unknown'
self.run_name, '%s_pythia_events.tree.gz' % tag), keep=True,
stdout=pjoin(self.me_dir,'Events','events.tree'))
files.mv(pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_xsecs.tree'),
pjoin(self.me_dir,'Events','xsecs.tree'))
misc.call([self.dirbin+'/create_matching_plots.sh',
self.run_name, tag, madir],
stdout = os.open(os.devnull, os.O_RDWR),
cwd=pjoin(self.me_dir,'Events'))
misc.gzip(pjoin(self.me_dir,"Events","events.tree"),
stdout=pjoin(self.me_dir,'Events',self.run_name, tag + '_pythia_events.tree.gz'))
files.mv(pjoin(self.me_dir,'Events','xsecs.tree'),
pjoin(self.me_dir,'Events',self.run_name, tag+'_pythia_xsecs.tree'))
elif mode == 'Pythia8' and (int(self.run_card['ickkw'])==1 or \
self.run_card['ktdurham']>0.0 or self.run_card['ptlund']>0.0):
self.update_status('Create matching plots for Pythia8',
level='pythia8')
if not os.path.isdir(PY8_plots_root_path):
os.makedirs(PY8_plots_root_path)
merging_scale_name = 'qCut' if int(self.run_card['ickkw'])==1 \
else 'TMS'
djr_path = pjoin(self.me_dir,'Events',
self.run_name, '%s_djrs.dat' % tag)
pt_path = pjoin(self.me_dir,'Events',
self.run_name, '%s_pts.dat' % tag)
for observable_name, data_path in [('djr',djr_path),
('pt',pt_path)]:
if not self.generate_Pythia8_HwU_plots(
PY8_plots_root_path, merging_scale_name,
observable_name,data_path):
return False
if mode == 'Pythia8':
plot_files = glob.glob(pjoin(PY8_plots_root_path,'*.gnuplot'))
if not misc.which('gnuplot'):
logger.warning("Install gnuplot to be able to view the plots"+\
" generated at :\n "+\
'\n '.join('%s.gnuplot'%p for p in plot_files))
return True
for plot in plot_files:
command = ['gnuplot',plot]
try:
subprocess.call(command,cwd=PY8_plots_root_path,stderr=subprocess.PIPE)
except Exception as e:
logger.warning("Automatic processing of the Pythia8 "+\
"merging plots with gnuplot failed. Try the"+\
" following command by hand:\n %s"%(' '.join(command))+\
"\nException was: %s"%str(e))
return False
plot_files = glob.glob(pjoin(PY8_plots_root_path,'*.pdf'))
if len(plot_files)>0:
html = "<html>\n<head>\n<TITLE>PLOT FOR PYTHIA8</TITLE>"
html+= '<link rel=stylesheet href="../../mgstyle.css" type="text/css">\n</head>\n<body>\n'
html += "<h2> Plot for Pythia8 </h2>\n"
html += '<a href=../../../crossx.html>return to summary</a><br>'
html += "<table>\n<tr> <td> <b>Obs.</b> </td> <td> <b>Type of plot</b> </td> <td><b> PDF</b> </td> <td><b> input file</b> </td> </tr>\n"
def sorted_plots(elem):
name = os.path.basename(elem[1])
if 'central' in name:
return -100
if 'min_max' in name:
return -10
merging_re = re.match(r'^.*_(\d+)_.*$',name)
if not merging_re is None:
return int(merging_re.group(1))
else:
return 1e10
djr_plot_files = sorted(
(('DJR',p) for p in plot_files if '_djr_' in p),
key = sorted_plots)
pt_plot_files = sorted(
(('Pt',p) for p in plot_files if '_pt_' in p),
key = sorted_plots)
last_obs = None
for obs, one_plot in djr_plot_files+pt_plot_files:
if obs!=last_obs:
html += "<tr><td></td></tr>"
last_obs = obs
name = os.path.basename(one_plot).replace('.pdf','')
short_name = name
for dummy in ['_plots','_djr','_pt']:
short_name = short_name.replace(dummy,'')
short_name = short_name.replace('_',' ')
if 'min max' in short_name:
short_name = "%s comparison with min/max merging scale"%obs
if 'central' in short_name:
short_name = "Merging uncertainty band around central scale"
html += "<tr><td>%(obs)s</td><td>%(sn)s</td><td> <a href=./%(n)s.pdf>PDF</a> </td><td> <a href=./%(n)s.HwU>HwU</a> <a href=./%(n)s.gnuplot>GNUPLOT</a> </td></tr>\n" %\
{'obs':obs, 'sn': short_name, 'n': name}
html += '</table>\n'
html += '<a href=../../../bin/internal/plot_djrs.py> Example of code to plot the above with matplotlib </a><br><br>'
html+='</body>\n</html>'
ff=open(pjoin(PY8_plots_root_path, 'index.html'),'w')
ff.write(html)
return True
if not event_path:
if mode == 'parton':
possibilities=[
pjoin(self.me_dir, 'Events', 'unweighted_events.lhe'),
pjoin(self.me_dir, 'Events', 'unweighted_events.lhe.gz'),
pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe'),
pjoin(self.me_dir, 'Events', self.run_name, 'unweighted_events.lhe.gz')]
for event_path in possibilities:
if os.path.exists(event_path):
break
output = pjoin(self.me_dir, 'HTML',self.run_name, 'plots_parton.html')
elif mode == 'Pythia':
event_path = pjoin(self.me_dir, 'Events','pythia_events.lhe')
output = pjoin(self.me_dir, 'HTML',self.run_name,
'plots_pythia_%s.html' % tag)
elif mode == 'PGS':
event_path = pjoin(self.me_dir, 'Events', self.run_name,
'%s_pgs_events.lhco' % tag)
output = pjoin(self.me_dir, 'HTML',self.run_name,
'plots_pgs_%s.html' % tag)
elif mode == 'Delphes':
event_path = pjoin(self.me_dir, 'Events', self.run_name,'%s_delphes_events.lhco' % tag)
output = pjoin(self.me_dir, 'HTML',self.run_name,
'plots_delphes_%s.html' % tag)
elif mode == "shower":
event_path = pjoin(self.me_dir, 'Events','pythia_events.lhe')
output = pjoin(self.me_dir, 'HTML',self.run_name,
'plots_shower_%s.html' % tag)
if not self.options['pythia-pgs_path']:
return
else:
raise self.InvalidCmd, 'Invalid mode %s' % mode
elif mode == 'reweight' and not output:
output = pjoin(self.me_dir, 'HTML',self.run_name,
'plots_%s.html' % tag)
if not os.path.exists(event_path):
if os.path.exists(event_path+'.gz'):
misc.gunzip('%s.gz' % event_path)
else:
raise self.InvalidCmd, 'Events file %s does not exist' % event_path
elif event_path.endswith(".gz"):
misc.gunzip(event_path)
event_path = event_path[:-3]
self.update_status('Creating Plots for %s level' % mode, level = mode.lower())
mode = mode.lower()
if mode not in ['parton', 'reweight']:
plot_dir = pjoin(self.me_dir, 'HTML', self.run_name,'plots_%s_%s' % (mode.lower(),tag))
elif mode == 'parton':
plot_dir = pjoin(self.me_dir, 'HTML', self.run_name,'plots_parton')
else:
plot_dir =pjoin(self.me_dir, 'HTML', self.run_name,'plots_%s' % (tag))
if not os.path.isdir(plot_dir):
os.makedirs(plot_dir)
files.ln(pjoin(self.me_dir, 'Cards','plot_card.dat'), plot_dir, 'ma_card.dat')
try:
proc = misc.Popen([os.path.join(madir, 'plot_events')],
stdout = open(pjoin(plot_dir, 'plot.log'),'w'),
stderr = subprocess.STDOUT,
stdin=subprocess.PIPE,
cwd=plot_dir)
proc.communicate('%s\n' % event_path)
del proc
misc.call(['%s/plot' % self.dirbin, madir, td],
stdout = open(pjoin(plot_dir, 'plot.log'),'a'),
stderr = subprocess.STDOUT,
cwd=plot_dir)
misc.call(['%s/plot_page-pl' % self.dirbin,
os.path.basename(plot_dir),
mode],
stdout = open(pjoin(plot_dir, 'plot.log'),'a'),
stderr = subprocess.STDOUT,
cwd=pjoin(self.me_dir, 'HTML', self.run_name))
shutil.move(pjoin(self.me_dir, 'HTML',self.run_name ,'plots.html'),
output)
logger.info("Plots for %s level generated, see %s" % \
(mode, output))
except OSError, error:
logger.error('fail to create plot: %s. Please check that MadAnalysis is correctly installed.' % error)
self.update_status('End Plots for %s level' % mode, level = mode.lower(),
makehtml=False)
return True
def run_hep2lhe(self, banner_path = None):
"""Run hep2lhe on the file Events/pythia_events.hep"""
if not self.options['pythia-pgs_path']:
raise self.InvalidCmd, 'No pythia-pgs path defined'
pydir = pjoin(self.options['pythia-pgs_path'], 'src')
eradir = self.options['exrootanalysis_path']
if misc.is_executable(pjoin(pydir, 'hep2lhe')):
self.update_status('Creating shower LHE File (for plot)', level='pythia')
out = open(pjoin(self.me_dir,'Events','pythia_events.lhe'), 'w')
out.writelines('<!--\n')
out.writelines('# Warning! Never use this file for detector studies!\n')
out.writelines('-->\n<!--\n')
if banner_path:
out.writelines(open(banner_path).read().replace('<LesHouchesEvents version="1.0">',''))
out.writelines('\n-->\n')
out.close()
self.cluster.launch_and_wait(self.dirbin+'/run_hep2lhe',
argument= [pydir],
cwd=pjoin(self.me_dir,'Events'),
stdout=os.devnull)
logger.info('Warning! Never use this lhe file for detector studies!')
if eradir and misc.is_executable(pjoin(eradir, 'ExRootLHEFConverter')):
self.update_status('Creating Pythia LHE Root File', level='pythia')
try:
misc.call([eradir+'/ExRootLHEFConverter',
'pythia_events.lhe',
pjoin(self.run_name, '%s_pythia_lhe_events.root' % self.run_tag)],
cwd=pjoin(self.me_dir,'Events'))
except Exception, error:
misc.sprint('ExRootLHEFConverter fails', str(error),
log=logger)
pass
def store_result(self):
"""Dummy routine, to be overwritten by daughter classes"""
pass
r the keep/remove_wgts options:", '$MG:color:BLACK')
logger.info(" all : keep/remove all weights")
logger.info(" name : keep/remove that particular weight")
logger.info(" id1,id2 : keep/remove all the weights between those two values --included--")
logger.info(" PATTERN : keep/remove all the weights matching the (python) regular expression.")
logger.info(" note that multiple entry of those arguments are allowed")
def complete_systematics(self, text, line, begidx, endidx):
"""auto completion for the systematics command"""
args = self.split_arg(line[0:begidx], error=False)
options = ['--mur=', '--muf=', '--pdf=', '--dyn=','--alps=',
'--together=','--from_card ','--remove_wgts=',
'--keep_wgts=','--start_id=']
if len(args) == 1 and os.path.sep not in text:
data = misc.glob(pjoin('*','*events.lhe*'), pjoin(self.me_dir, 'Events'))
data = [n.rsplit('/',2)[1] for n in data]
return self.list_completion(text, data, line)
elif len(args)==1:
return self.path_completion(text,
os.path.join('.',*[a for a in args \
if a.endswith(os.path.sep)]))
elif len(args)==2 and os.path.sep in args[1]:
return self.path_completion(text, '.')
elif not line.endswith(tuple(options)):
return self.list_completion(text, options)
e. Please retry'
elif self.options['nb_core'] != 1:
lhe = lhe_parser.EventFile(args[0])
nb_event = len(lhe)
lhe.close()
input = args[0]
if len(args)>1:
output = pjoin(os.getcwd(),args[1])
else:
output = input
lhaid = [self.run_card.get_lhapdf_id()]
if 'store_rwgt_info' in self.run_card and not self.run_card['store_rwgt_info']:
raise self.InvalidCmd, "The events was not generated with store_rwgt_info=True. Can not evaluate systematics error on this event file."
elif 'use_syst' in self.run_card:
if not self.run_card['use_syst']:
raise self.InvalidCmd, "The events was not generated with use_syst=True. Can not evaluate systematics error on this event file."
elif self.proc_characteristics['ninitial'] ==1:
if '--from_card' in opts:
logger.warning('systematics not available for decay processes. Bypass it')
return
else:
raise self.InvalidCmd, 'systematics not available for decay processes.'
try:
pdfsets_dir = self.get_lhapdf_pdfsetsdir()
except Exception, error:
logger.debug(str(error))
logger.warning('Systematic computation requires lhapdf to run. Bypass Systematics')
return
if '--from_card' in opts:
opts.remove('--from_card')
opts.append('--from_card=internal')
if 'sys_pdf' in self.run_card:
if '&&' in self.run_card['sys_pdf']:
line = ' '.join(self.run_card['sys_pdf'])
sys_pdf = line.split('&&')
lhaid += [l.split()[0] for l in sys_pdf]
else:
lhaid += [l for l in self.run_card['sys_pdf'].split() if not l.isdigit() or int(l) > 500]
else:
pdf = [a[6:] for a in opts if a.startswith('--pdf=')]
lhaid += [t.split('@')[0] for p in pdf for t in p.split(',')
if t not in ['errorset', 'central']]
try:
[self.copy_lhapdf_set([onelha], pdfsets_dir) for onelha in lhaid]
except Exception, error:
logger.debug(str(error))
logger.warning('impossible to download all the pdfsets. Bypass systematics')
return
if self.options['run_mode'] ==2:
nb_submit = min(self.options['nb_core'], nb_event//2500)
elif self.options['run_mode'] ==1:
nb_submit = min(self.options['cluster_size'], nb_event//25000)
else:
nb_submit =1
if MADEVENT:
import internal.systematics as systematics
else:
import madgraph.various.systematics as systematics
if nb_submit in [0,1]:
systematics.call_systematics([input, output] + opts,
log=lambda x: logger.info(str(x)),
result=result_file
)
elif self.options['run_mode'] in [1,2]:
event_per_job = nb_event // nb_submit
nb_job_with_plus_one = nb_event % nb_submit
start_event, stop_event = 0,0
for i in range(nb_submit):
event_requested = event_per_job
if i < nb_job_with_plus_one:
event_requested += 1
start_event = stop_event
stop_event = start_event + event_requested
prog = sys.executable
input_files = [os.path.basename(input)]
output_files = ['./tmp_%s_%s' % (i, os.path.basename(output)),
'./log_sys_%s.txt' % (i)]
argument = []
if not __debug__:
argument.append('-O')
argument += [pjoin(self.me_dir, 'bin', 'internal', 'systematics.py'),
input_files[0], output_files[0]] + opts +\
['--start_event=%i' % start_event,
'--stop_event=%i' %stop_event,
'--result=./log_sys_%s.txt' %i,
'--lhapdf_config=%s' % self.options['lhapdf']]
required_output = output_files
self.cluster.cluster_submit(prog, argument,
input_files=input_files,
output_files=output_files,
cwd=os.path.dirname(output),
required_output=required_output,
stdout='/dev/null'
)
starttime = time.time()
update_status = lambda idle, run, finish: \
self.update_status((idle, run, finish, 'running systematics'), level=None,
force=False, starttime=starttime)
try:
self.cluster.wait(os.path.dirname(output), update_status, update_first=update_status)
except Exception:
self.cluster.remove()
old_run_mode = self.options['run_mode']
self.options['run_mode'] =0
try:
out = self.do_systematics(line)
finally:
self.options['run_mode'] = old_run_mode
all_cross = []
for i in range(nb_submit):
pos=0
for line in open(pjoin(os.path.dirname(output), 'log_sys_%s.txt'%i)):
if line.startswith('#'):
continue
split = line.split()
if len(split) in [0,1]:
continue
key = tuple(float(x) for x in split[:-1])
cross= float(split[-1])
if 'event_norm' in self.run_card and \
self.run_card['event_norm'] in ['average', 'unity', 'bias']:
cross *= (event_per_job+1 if i <nb_job_with_plus_one else event_per_job)
if len(all_cross) > pos:
all_cross[pos] += cross
else:
all_cross.append(cross)
pos+=1
if 'event_norm' in self.run_card and \
self.run_card['event_norm'] in ['unity']:
all_cross= [cross/nb_event for cross in all_cross]
sys_obj = systematics.call_systematics([input, None] + opts,
log=lambda x: logger.info(str(x)),
result=result_file,
running=False
)
sys_obj.print_cross_sections(all_cross, nb_event, result_file)
subprocess.call(['cat']+\
['./tmp_%s_%s' % (i, os.path.basename(output)) for i in range(nb_submit)],
stdout=open(output,'w'),
cwd=os.path.dirname(output))
for i in range(nb_submit):
os.remove('%s/tmp_%s_%s' %(os.path.dirname(output),i,os.path.basename(output)))
self.update_status('End of systematics computation', level='parton', makehtml=False)
if not self.force_run:
# forbid this function to create an empty item in results.
if self.run_name and self.results.current and self.results.current['cross'] == 0:
self.results.delete_run(self.run_name, self.run_tag)
self.results.save()
# ensure that the run_card is present
if not hasattr(self, 'run_card'):
self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat'))
# we want to run this in a separate shell to avoid hard f2py crash
command = [sys.executable]
if os.path.exists(pjoin(self.me_dir, 'bin', 'madevent')):
command.append(pjoin(self.me_dir, 'bin', 'internal','madevent_interface.py'))
else:
command.append(pjoin(self.me_dir, 'bin', 'internal', 'amcatnlo_run_interface.py'))
if not isinstance(self, cmd.CmdShell):
command.append('--web')
command.append('reweight')
######### START SINGLE CORE MODE ############
if self.options['nb_core']==1 or self.run_card['nevents'] < 101 or not check_multicore(self):
if self.run_name:
command.append(self.run_name)
else:
command += args
if '-from_cards' not in command:
command.append('-from_cards')
p = misc.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, cwd=os.getcwd())
while p.poll() is None:
line = p.stdout.readline()
if any(t in line for t in ['INFO:', 'WARNING:', 'CRITICAL:', 'ERROR:', 'root:','KEEP:']) and \
not '***********' in line:
print line[:-1].replace('INFO', 'REWEIGHT').replace('KEEP:','')
elif __debug__ and line:
logger.debug(line[:-1])
if p.returncode !=0:
logger.error("Reweighting failed")
return
self.results = self.load_results_db()
# forbid this function to create an empty item in results.
try:
if self.results[self.run_name][-2]['cross']==0:
self.results.delete_run(self.run_name,self.results[self.run_name][-2]['tag'])
except:
pass
try:
if self.results.current['cross'] == 0 and self.run_name:
self.results.delete_run(self.run_name, self.run_tag)
except:
pass
# re-define current run
try:
self.results.def_current(self.run_name, self.run_tag)
except Exception:
pass
return
########## END SINGLE CORE HANDLING #############
else:
########## START MULTI-CORE HANDLING #############
if not isinstance(self.cluster, cluster.MultiCore):
mycluster = cluster.MultiCore(nb_core=self.options['nb_core'])
else:
mycluster = self.cluster
new_args=list(args)
self.check_decay_events(new_args)
try:
os.remove(pjoin(self.me_dir,'rw_me','rwgt.pkl'))
except Exception, error:
pass
# prepare multi-core job:
import madgraph.various.lhe_parser as lhe_parser
# args now alway content the path to the valid files
if 'nevt_job' in self.run_card and self.run_card['nevt_job'] !=-1:
nevt_job = self.run_card['nevt_job']
else:
nevt_job = max(2500, self.run_card['nevents']/self.options['nb_core'])
logger.info("split the event file in bunch of %s events" % nevt_job)
nb_file = lhe_parser.EventFile(new_args[0]).split(nevt_job)
starttime = time.time()
update_status = lambda idle, run, finish: \
self.update_status((idle, run, finish, 'reweight'), level=None,
force=False, starttime=starttime)
all_lhe = []
devnull= open(os.devnull)
for i in range(nb_file):
new_command = list(command)
new_command.append('%s_%s.lhe' % (new_args[0],i))
all_lhe.append('%s_%s.lhe' % (new_args[0],i))
if '-from_cards' not in command:
new_command.append('-from_cards')
if i==0:
if __debug__:
stdout = None
else:
stdout = open(pjoin(self.me_dir,'Events', self.run_name, 'reweight.log'),'w')
new_command.append('--multicore=create')
else:
stdout = devnull
#stdout = open(pjoin(self.me_dir,'Events', self.run_name, 'reweight%s.log' % i),'w')
new_command.append('--multicore=wait')
mycluster.submit(prog=command[0], argument=new_command[1:], stdout=stdout, cwd=os.getcwd())
mycluster.wait(self.me_dir,update_status)
devnull.close()
logger.info("Collect and combine the various output file.")
lhe = lhe_parser.MultiEventFile(all_lhe, parse=False)
nb_event, cross_sections = lhe.write(new_args[0], get_info=True)
if any(os.path.exists('%s_%s_debug.log' % (f, self.run_tag)) for f in all_lhe):
for f in all_lhe:
if os.path.exists('%s_%s_debug.log' % (f, self.run_tag)):
raise Exception, "Some of the run failed: Please read %s_%s_debug.log" % (f, self.run_tag)
if 'event_norm' in self.run_card and self.run_card['event_norm'] in ['average','bias']:
for key, value in cross_sections.items():
cross_sections[key] = value / (nb_event+1)
lhe.remove()
for key in cross_sections:
if key == 'orig' or key.isdigit():
continue
logger.info('%s : %s pb' % (key, cross_sections[key]))
return
########## END MULTI-CORE HANDLING #############
self.to_store.append('event')
# forbid this function to create an empty item in results.
if not self.force_run and self.results.current['cross'] == 0 and self.run_name:
self.results.delete_run(self.run_name, self.run_tag)
self.check_decay_events(args)
# args now alway content the path to the valid files
reweight_cmd = reweight_interface.ReweightInterface(args[0], mother=self)
#reweight_cmd.use_rawinput = False
#reweight_cmd.mother = self
wgt_names = reweight_cmd.get_weight_names()
if wgt_names == [''] and reweight_cmd.has_nlo:
self.update_status('Running Reweighting (LO approximate)', level='madspin')
else:
self.update_status('Running Reweighting', level='madspin')
path = pjoin(self.me_dir, 'Cards', 'reweight_card.dat')
reweight_cmd.raw_input=False
reweight_cmd.me_dir = self.me_dir
reweight_cmd.multicore = multicore #allow the directory creation or not
print "We are in mode", multicore
reweight_cmd.import_command_file(path)
reweight_cmd.do_quit('')
logger.info("quit rwgt")
# re-define current run
try:
self.results.def_current(self.run_name, self.run_tag)
except Exception:
pass
############################################################################
def do_pgs(self, line):
"""launch pgs"""
args = self.split_arg(line)
# Check argument's validity
if '--no_default' in args:
no_default = True
args.remove('--no_default')
else:
no_default = False
if no_default and not os.path.exists(pjoin(self.me_dir, 'Cards', 'pgs_card.dat')):
logger.info('No pgs_card detected, so not run pgs')
return
lock = self.check_pgs(args, no_default=no_default)
if not os.path.exists(pjoin(self.me_dir, 'Cards', 'pgs_card.dat')):
files.cp(pjoin(self.me_dir, 'Cards', 'pgs_card_default.dat'),
pjoin(self.me_dir, 'Cards', 'pgs_card.dat'))
logger.info('No pgs card found. Take the default one.')
if not (no_default or self.force):
self.ask_edit_cards(['pgs_card.dat'])
self.update_status('prepare PGS run', level=None)
pgsdir = pjoin(self.options['pythia-pgs_path'], 'src')
eradir = self.options['exrootanalysis_path']
madir = self.options['madanalysis_path']
td = self.options['td_path']
if not misc.is_executable(pjoin(pgsdir, 'pgs')):
logger.info('No PGS executable -- running make')
misc.compile(cwd=pgsdir)
self.update_status('Running PGS', level='pgs')
tag = self.run_tag
banner_path = pjoin(self.me_dir, 'Events', self.run_name, '%s_%s_banner.txt' % (self.run_name, self.run_tag))
if os.path.exists(pjoin(self.me_dir, 'Source', 'banner_header.txt')):
self.banner.add(pjoin(self.me_dir, 'Cards','pgs_card.dat'))
self.banner.write(banner_path)
else:
open(banner_path, 'w').close()
line and not any(arg.startswith(
'--input=') for arg in args):
return self.list_completion(text, ['--input=%s'%opt for opt in
(banner_mod.MadAnalysis5Card._default_hadron_inputs +['path'])], line)
else:
return self.list_completion(text, ['-f',
'--MA5_stdout_lvl=','--input=','--no_default', '--tag='], line)
def do_madanalysis5_hadron(self, line):
"""launch MadAnalysis5 at the hadron level."""
return self.run_madanalysis5(line,mode='hadron')
def run_madanalysis5(self, line, mode='parton'):
"""launch MadAnalysis5 at the parton level or at the hadron level with
a specific command line."""
# Check argument's validity
args = self.split_arg(line)
if '--no_default' in args:
no_default = True
args.remove('--no_default')
else:
no_default = False
if no_default:
if mode=='parton' and not os.path.exists(pjoin(self.me_dir, 'Cards',
'madanalysis5_parton_card.dat')):
return
if mode=='hadron' and not os.path.exists(pjoin(self.me_dir, 'Cards',
'madanalysis5_hadron_card.dat')):
return
else:
self.ask_madanalysis5_run_configuration(runtype=mode)
if not self.options['madanalysis5_path'] or \
all(not os.path.exists(pjoin(self.me_dir, 'Cards',card)) for card in
['madanalysis5_parton_card.dat','madanalysis5_hadron_card.dat']):
if no_default:
return
else:
raise InvalidCmd('You must have MadAnalysis5 available to run'+
" this command. Consider installing it with the 'install' function.")
if not self.run_name:
MA5_opts = self.check_madanalysis5(args, mode=mode)
self.configure_directory(html_opening =False)
else:
self.configure_directory(html_opening =False)
MA5_opts = self.check_madanalysis5(args, mode=mode)
if MA5_opts['inputs']==[]:
if no_default:
logger.warning('No hadron level input found to run MadAnalysis5 on.'+
' Skipping its hadron-level analysis.')
return
else:
raise self.InvalidCmd('\nNo input files specified or availabled for'+
' this MadAnalysis5 hadron-level run.\nPlease double-check the options of this'+
' MA5 command (or card) and which output files\nare currently in the chosen'+
" run directory '%s'."%self.run_name)
MA5_card = banner_mod.MadAnalysis5Card(pjoin(self.me_dir, 'Cards',
'madanalysis5_%s_card.dat'%mode), mode=mode)
if MA5_card._skip_analysis:
logger.info('Madanalysis5 %s-level analysis was skipped following user request.'%mode)
logger.info("To run the analysis, remove or comment the tag '%s skip_analysis' "
%banner_mod.MadAnalysis5Card._MG5aMC_escape_tag+
"in\n '%s'."%pjoin(self.me_dir, 'Cards','madanalysis5_%s_card.dat'%mode))
return
MA5_cmds_list = MA5_card.get_MA5_cmds(MA5_opts['inputs'],
pjoin(self.me_dir,'MA5_%s_ANALYSIS'%mode.upper()),
run_dir_path = pjoin(self.me_dir,'Events', self.run_name),
UFO_model_path=pjoin(self.me_dir,'bin','internal','ufomodel'),
run_tag = self.run_tag)
# for MA5_runtag, MA5_cmds in MA5_cmds_list:
# misc.sprint('****************************************')
# misc.sprint('* Commands for MA5 runtag %s:'%MA5_runtag)
# misc.sprint('\n'+('\n'.join('* %s'%cmd for cmd in MA5_cmds)))
# misc.sprint('****************************************')
self.update_status('\033[92mRunning MadAnalysis5 [arXiv:1206.1599]\033[0m',
level='madanalysis5_%s'%mode)
if mode=='hadron':
logger.info('Hadron input files considered:')
for input in MA5_opts['inputs']:
logger.info(' --> %s'%input)
elif mode=='parton':
logger.info('Parton input file considered:')
logger.info(' --> %s'%MA5_opts['inputs'])
# Obtain a main MA5 interpreter
# Ideally we would like to do it all with a single interpreter
# but we'd need a way to reset it for this.
if MA5_opts['MA5_stdout_lvl']=='default':
if MA5_card['stdout_lvl'] is None:
MA5_lvl = self.options['stdout_level']
else:
MA5_lvl = MA5_card['stdout_lvl']
else:
MA5_lvl = MA5_opts['MA5_stdout_lvl']
MA5_interpreter = CommonRunCmd.get_MadAnalysis5_interpreter(
self.options['mg5_path'],
self.options['madanalysis5_path'],
logstream=sys.stdout,
loglevel=100,
forced=True,
compilation=True)
if MA5_interpreter is None:
return
used_up_fifos = []
for MA5_runtag, MA5_cmds in MA5_cmds_list:
MA5_interpreter.setLogLevel(100)
if mode=='hadron':
MA5_interpreter.init_reco()
else:
MA5_interpreter.init_parton()
MA5_interpreter.setLogLevel(MA5_lvl)
if MA5_runtag!='default':
if MA5_runtag.startswith('_reco_'):
logger.info("MadAnalysis5 now running the reconstruction '%s'..."%
MA5_runtag[6:],'$MG:color:GREEN')
elif MA5_runtag=='Recasting':
logger.info("MadAnalysis5 now running the recasting...",
'$MG:color:GREEN')
else:
logger.info("MadAnalysis5 now running the '%s' analysis..."%
MA5_runtag,'$MG:color:GREEN')
if not CommonRunCmd.runMA5(MA5_interpreter, MA5_cmds, MA5_runtag,
pjoin(self.me_dir,'Events',self.run_name,'%s_MA5_%s.log'%(self.run_tag,MA5_runtag))):
# Unsuccessful MA5 run, we therefore stop here.
return
if MA5_runtag.startswith('_reco_'):
# When doing a reconstruction we must first link the event file
# created with MA5 reconstruction and then directly proceed to the
# next batch of instructions. There can be several output directory
# if there were several input files.
links_created=[]
for i, input in enumerate(MA5_opts['inputs']):
# Make sure it is not an lhco or root input, which would not
# undergo any reconstruction of course.
if not banner_mod.MadAnalysis5Card.events_can_be_reconstructed(input):
continue
if input.endswith('.fifo'):
if input in used_up_fifos:
# Only run once on each fifo
continue
else:
used_up_fifos.append(input)
reco_output = pjoin(self.me_dir,
'MA5_%s_ANALYSIS%s_%d'%(mode.upper(),MA5_runtag,i+1))
# Look for either a root or .lhe.gz output
reco_event_file = misc.glob('*.lhe.gz',pjoin(reco_output,'Output','_reco_events'))+\
misc.glob('*.root',pjoin(reco_output,'Output','_reco_events'))
if len(reco_event_file)==0:
raise MadGraph5Error, "MadAnalysis5 failed to produce the "+\
"reconstructed event file for reconstruction '%s'."%MA5_runtag[6:]
reco_event_file = reco_event_file[0]
# move the reconstruction output to the HTML directory
shutil.move(reco_output,pjoin(self.me_dir,'HTML',
self.run_name,'%s_MA5_%s_ANALYSIS%s_%d'%
(self.run_tag,mode.upper(),MA5_runtag,i+1)))
# link the reconstructed event file to the run directory
links_created.append(os.path.basename(reco_event_file))
files.ln(pjoin(self.me_dir,'HTML',self.run_name,
'%s_MA5_%s_ANALYSIS%s_%d'%(self.run_tag,mode.upper(),
MA5_runtag,i+1),'Output','_reco_events',links_created[-1]),
pjoin(self.me_dir,'Events',self.run_name))
logger.info("MadAnalysis5 successfully completed the reconstruction "+
"'%s'. Links to the reconstructed event files are:"%MA5_runtag[6:])
for link in links_created:
logger.info(' --> %s'%pjoin(self.me_dir,'Events',self.run_name,link))
continue
if MA5_runtag.upper()=='RECASTING':
target = pjoin(self.me_dir,'MA5_%s_ANALYSIS_%s'\
%(mode.upper(),MA5_runtag),'Output','CLs_output_summary.dat')
else:
target = pjoin(self.me_dir,'MA5_%s_ANALYSIS_%s'\
%(mode.upper(),MA5_runtag),'PDF','main.pdf')
has_pdf = True
if not os.path.isfile(target):
has_pdf = False
# Copy the PDF report or CLs in the Events/run directory.
if MA5_runtag.upper()=='RECASTING':
carboncopy_name = '%s_MA5_CLs.dat'%(self.run_tag)
else:
carboncopy_name = '%s_MA5_%s_analysis_%s.pdf'%(
self.run_tag,mode,MA5_runtag)
if has_pdf:
shutil.copy(target, pjoin(self.me_dir,'Events',self.run_name,carboncopy_name))
else:
logger.error('MadAnalysis5 failed to create PDF output')
if MA5_runtag!='default':
logger.info("MadAnalysis5 successfully completed the "+
"%s. Reported results are placed in:"%("analysis '%s'"%MA5_runtag
if MA5_runtag.upper()!='RECASTING' else "recasting"))
else:
logger.info("MadAnalysis5 successfully completed the analysis."+
" Reported results are placed in:")
logger.info(' --> %s'%pjoin(self.me_dir,'Events',self.run_name,carboncopy_name))
anal_dir = pjoin(self.me_dir,'MA5_%s_ANALYSIS_%s' %(mode.upper(),MA5_runtag))
if not os.path.exists(anal_dir):
logger.error('MadAnalysis5 failed to completed succesfully')
return
# Copy the entire analysis in the HTML directory
shutil.move(anal_dir, pjoin(self.me_dir,'HTML',self.run_name,
'%s_MA5_%s_ANALYSIS_%s'%(self.run_tag,mode.upper(),MA5_runtag)))
# Set the number of events and cross-section to the last one
# (maybe do something smarter later)
new_details={}
for detail in ['nb_event','cross','error']:
new_details[detail] = \
self.results[self.run_name].get_current_info()[detail]
for detail in new_details:
self.results.add_detail(detail,new_details[detail])
self.update_status('Finished MA5 analyses.', level='madanalysis5_%s'%mode,
makehtml=False)
#Update the banner
self.banner.add(pjoin(self.me_dir, 'Cards',
'madanalysis5_%s_card.dat'%mode))
banner_path = pjoin(self.me_dir,'Events', self.run_name,
'%s_%s_banner.txt'%(self.run_name, self.run_tag))
self.banner.write(banner_path)
if not no_default:
logger.info('Find more information about this run on the HTML local page')
logger.info(' --> %s'%pjoin(self.me_dir,'index.html'))
############################################################################
# End of MadAnalysis5 related function
############################################################################
def do_delphes(self, line):
""" run delphes and make associate root file/plot """
args = self.split_arg(line)
# Check argument's validity
if '--no_default' in args:
no_default = True
args.remove('--no_default')
else:
no_default = False
if no_default and not os.path.exists(pjoin(self.me_dir, 'Cards', 'delphes_card.dat')):
logger.info('No delphes_card detected, so not run Delphes')
return
filepath = self.check_delphes(args, nodefault=no_default)
if no_default and not filepath:
return
self.update_status('prepare delphes run', level=None)
if os.path.exists(pjoin(self.options['delphes_path'], 'data')):
delphes3 = False
prog = '../bin/internal/run_delphes'
if filepath and '.hepmc' in filepath[:-10]:
raise self.InvalidCmd, 'delphes2 do not support hepmc'
else:
delphes3 = True
prog = '../bin/internal/run_delphes3'
if not os.path.exists(pjoin(self.me_dir, 'Cards', 'delphes_card.dat')):
if no_default:
logger.info('No delphes_card detected, so not running Delphes')
return
files.cp(pjoin(self.me_dir, 'Cards', 'delphes_card_default.dat'),
pjoin(self.me_dir, 'Cards', 'delphes_card.dat'))
logger.info('No delphes card found. Take the default one.')
if not delphes3 and not os.path.exists(pjoin(self.me_dir, 'Cards', 'delphes_trigger.dat')):
files.cp(pjoin(self.me_dir, 'Cards', 'delphes_trigger_default.dat'),
pjoin(self.me_dir, 'Cards', 'delphes_trigger.dat'))
if not (no_default or self.force):
if delphes3:
self.ask_edit_cards(['delphes_card.dat'], args)
else:
self.ask_edit_cards(['delphes_card.dat', 'delphes_trigger.dat'], args)
self.update_status('Running Delphes', level=None)
delphes_dir = self.options['delphes_path']
tag = self.run_tag
if os.path.exists(pjoin(self.me_dir, 'Source', 'banner_header.txt')):
self.banner.add(pjoin(self.me_dir, 'Cards','delphes_card.dat'))
if not delphes3:
self.banner.add(pjoin(self.me_dir, 'Cards','delphes_trigger.dat'))
self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, '%s_%s_banner.txt' % (self.run_name, tag)))
cross = self.results[self.run_name].get_current_info()['cross']
delphes_log = pjoin(self.me_dir, 'Events', self.run_name, "%s_delphes.log" % tag)
if not self.cluster:
clus = cluster.onecore
else:
clus = self.cluster
clus.launch_and_wait(prog,
argument= [delphes_dir, self.run_name, tag, str(cross), filepath],
stdout=delphes_log, stderr=subprocess.STDOUT,
cwd=pjoin(self.me_dir,'Events'))
if not os.path.exists(pjoin(self.me_dir, 'Events',
self.run_name, '%s_delphes_events.lhco.gz' % tag))\
and not os.path.exists(pjoin(self.me_dir, 'Events',
self.run_name, '%s_delphes_events.lhco' % tag)):
logger.info('If you are interested in lhco output. please run root2lhco converter.')
logger.info(' or edit bin/internal/run_delphes3 to run the converter automatically.')
madir = self.options['madanalysis_path']
td = self.options['td_path']
if os.path.exists(pjoin(self.me_dir, 'Events',
self.run_name, '%s_delphes_events.lhco' % tag)):
self.create_plot('Delphes')
if os.path.exists(pjoin(self.me_dir, 'Events', self.run_name, '%s_delphes_events.lhco' % tag)):
misc.gzip(pjoin(self.me_dir, 'Events', self.run_name, '%s_delphes_events.lhco' % tag))
self.update_status('delphes done', level='delphes', makehtml=False)
o small (raise a warning if this is the case)
3) if dependent is on True check for dependent parameter (automatic for scan)"""
pattern_scan = re.compile(r'''^(decay)?[\s\d]*scan''', re.I+re.M)
pattern_width = re.compile(r'''decay\s+(\+?\-?\d+)\s+auto(@NLO|)''',re.I)
text = open(path).read()
if pattern_scan.search(text):
if not isinstance(self, cmd.CmdShell):
raise Exception, "Scan are not allowed in web mode"
main_card = check_param_card.ParamCardIterator(text)
self.param_card_iterator = main_card
first_card = main_card.next(autostart=True)
first_card.write(path)
return self.check_param_card(path, run, dependent=True)
pdg_info = pattern_width.findall(text)
if pdg_info:
if run:
logger.info('Computing the width set on auto in the param_card.dat')
has_nlo = any(nlo.lower()=="@nlo" for _,nlo in pdg_info)
pdg = [pdg for pdg,nlo in pdg_info]
if not has_nlo:
self.do_compute_widths('%s %s' % (' '.join(pdg), path))
else:
self.do_compute_widths('%s %s --nlo' % (' '.join(pdg), path))
else:
logger.info('''Some width are on Auto in the card.
Those will be computed as soon as you have finish the edition of the cards.
If you want to force the computation right now and being able to re-edit
the cards afterwards, you can type \"compute_wdiths\".''')
card = check_param_card.ParamCard(path)
if dependent:
AskforEditCard.update_dependent(self, self.me_dir, card, path, timer=20)
for param in card['decay']:
width = param.value
if width == 0:
continue
try:
mass = card['mass'].get(param.lhacode).value
except Exception:
logger.warning('Missing mass in the lhef file (%s) . Please fix this (use the "update missing" command if needed)', param.lhacode[0])
continue
if mass and width/mass < 1e-12:
logger.error('The width of particle %s is too small for an s-channel resonance (%s). If you have this particle in an s-channel, this is likely to create numerical instabilities .', param.lhacode[0], width)
if CommonRunCmd.sleep_for_error:
time.sleep(5)
CommonRunCmd.sleep_for_error = False
elif not mass and width:
logger.error('The width of particle %s is different of zero for a massless particle.', param.lhacode[0])
if CommonRunCmd.sleep_for_error:
time.sleep(5)
CommonRunCmd.sleep_for_error = False
return
def add_error_log_in_html(self, errortype=None):
"""If a ME run is currently running add a link in the html output"""
if hasattr(self, 'results') and hasattr(self.results, 'current') and\
self.results.current and 'run_name' in self.results.current and \
hasattr(self, 'me_dir'):
name = self.results.current['run_name']
tag = self.results.current['tag']
self.debug_output = pjoin(self.me_dir, '%s_%s_debug.log' % (name,tag))
if errortype:
self.results.current.debug = errortype
else:
self.results.current.debug = self.debug_output
else:
self.debug_output = CommonRunCmd.debug_output
if os.path.exists('ME5_debug') and not 'ME5_debug' in self.debug_output:
os.remove('ME5_debug')
if not 'ME5_debug' in self.debug_output:
os.system('ln -s %s ME5_debug &> /dev/null' % self.debug_output)
def do_quit(self, line):
"""Not in help: exit """
if not self.force_run:
try:
os.remove(pjoin(self.me_dir,'RunWeb'))
except Exception:
pass
try:
self.store_result()
except Exception:
pass
try:
self.update_status('', level=None)
except Exception, error:
pass
self.gen_card_html()
return super(CommonRunCmd, self).do_quit(line)
do_EOF = do_quit
do_exit = do_quit
def update_status(self, status, level, makehtml=True, force=True,
error=False, starttime = None, update_results=True,
print_log=True):
""" update the index status """
if makehtml and not force:
if hasattr(self, 'next_update') and time.time() < self.next_update:
return
else:
self.next_update = time.time() + 3
if print_log:
if isinstance(status, str):
if '<br>' not in status:
logger.info(status)
elif starttime:
running_time = misc.format_timer(time.time()-starttime)
logger.info(' Idle: %s, Running: %s, Completed: %s [ %s ]' % \
(status[0], status[1], status[2], running_time))
else:
logger.info(' Idle: %s, Running: %s, Completed: %s' % status[:3])
if isinstance(status, str) and status.startswith('\x1b['):
status = status[status.index('m')+1:-7]
if 'arXiv' in status:
if '[' in status:
status = status.split('[',1)[0]
else:
status = status.split('arXiv',1)[0]
if update_results:
self.results.update(status, level, makehtml=makehtml, error=error)
lf.options[key])
elif key == "notification_center":
if self.options[key] in ['False', 'True']:
self.allow_notification_center =ast.literal_eval(self.options[key])
self.options[key] =ast.literal_eval(self.options[key])
elif key not in ['text_editor','eps_viewer','web_browser','stdout_level',
'complex_mass_scheme', 'gauge', 'group_subprocesses']:
try:
self.do_set("%s %s --no_save" % (key, self.options[key]), log=False)
except self.InvalidCmd:
logger.warning("Option %s from config file not understood" \
% key)
misc.open_file.configure(self.options)
self.configure_run_mode(self.options['run_mode'])
return self.options
@staticmethod
def find_available_run_name(me_dir):
""" find a valid run_name for the current job """
name = 'run_%02d'
data = [int(s[4:j]) for s in os.listdir(pjoin(me_dir,'Events')) for
j in range(4,len(s)+1) if \
s.startswith('run_') and s[4:j].isdigit()]
return name % (max(data+[0])+1)
he decayed event file has been moved to the following location: ")
logger.info(new_file)
if hasattr(self, 'results'):
current = self.results.current
nb_event = self.results.current['nb_event']
if not nb_event:
current = self.results[self.run_name][0]
nb_event = current['nb_event']
cross = current['cross']
error = current['error']
self.results.add_run( new_run, self.run_card)
self.results.add_detail('nb_event', int(nb_event*madspin_cmd.efficiency))
self.results.add_detail('cross', madspin_cmd.cross)#cross * madspin_cmd.branching_ratio)
self.results.add_detail('error', madspin_cmd.error+ cross * madspin_cmd.err_branching_ratio)
self.results.add_detail('run_mode', current['run_mode'])
self.run_name = new_run
self.banner = madspin_cmd.banner
self.banner.add(path)
self.banner.write(pjoin(self.me_dir,'Events',self.run_name, '%s_%s_banner.txt' %
(self.run_name, self.run_tag)))
self.update_status('MadSpin Done', level='parton', makehtml=False)
if 'unweighted' in os.path.basename(args[0]):
self.create_plot('parton')
def complete_decay_events(self, text, line, begidx, endidx):
args = self.split_arg(line[0:begidx], error=False)
if len(args) == 1:
return self.complete_plot(text, line, begidx, endidx)
else:
return
def complete_print_results(self,text, line, begidx, endidx):
"Complete the print results command"
args = self.split_arg(line[0:begidx], error=False)
if len(args) == 1:
#return valid run_name
data = misc.glob(pjoin('*','unweighted_events.lhe.gz'),
pjoin(self.me_dir, 'Events'))
data = [n.rsplit('/',2)[1] for n in data]
tmp1 = self.list_completion(text, data)
return tmp1
else:
data = misc.glob('*_pythia_events.hep.gz', pjoin(self.me_dir, 'Events', args[0]))
data = [os.path.basename(p).rsplit('_',1)[0] for p in data]
data += ["--mode=a", "--mode=w", "--path=", "--format=short"]
tmp1 = self.list_completion(text, data)
return tmp1
def help_print_result(self):
logger.info("syntax: print_result [RUN] [TAG] [options]")
logger.info("-- show in text format the status of the run (cross-section/nb-event/...)")
logger.info("--path= defines the path of the output file.")
logger.info("--mode=a allow to add the information at the end of the file.")
logger.info("--format=short (only if --path is define)")
logger.info(" allows to have a multi-column output easy to parse")
############################################################################
def do_check_events(self, line):
""" Run some sanity check on the generated events."""
# Check that MG5 directory is present .
if MADEVENT and not self.options['mg5_path']:
raise self.InvalidCmd, '''The module reweight requires that MG5 is installed on the system.
You can install it and set its path in ./Cards/me5_configuration.txt'''
elif MADEVENT:
sys.path.append(self.options['mg5_path'])
try:
import madgraph.interface.reweight_interface as reweight_interface
except ImportError:
raise self.ConfigurationError, '''Can\'t load Reweight module.
The variable mg5_path might not be correctly configured.'''
args = self.split_arg(line)
self.check_check_events(args)
reweight_cmd = reweight_interface.ReweightInterface(args[0], allow_madspin=True)
reweight_cmd.mother = self
self.update_status('Running check on events', level='check')
reweight_cmd.check_events()
_full(make_opts, self.make_opts_var)
@staticmethod
def update_make_opts_full(path, def_variables, keep_old=True):
"""update the make_opts file writing the environmental variables
of def_variables.
if a value of the dictionary is None then it is not written.
"""
make_opts = path
pattern = re.compile(r'^(\w+)\s*=\s*(.*)$',re.DOTALL)
diff = False
tag = '#end_of_make_opts_variables\n'
make_opts_variable = True
content = []
variables = dict(def_variables)
need_keys = variables.keys()
for line in open(make_opts):
line = line.strip()
if make_opts_variable:
if line.startswith('#') or not line:
if line.startswith('#end_of_make_opts_variables'):
make_opts_variable = False
continue
elif pattern.search(line):
key, value = pattern.search(line).groups()
if key not in variables:
variables[key] = value
elif value != variables[key]:
diff=True
else:
need_keys.remove(key)
else:
make_opts_variable = False
content.append(line)
else:
content.append(line)
if need_keys:
diff=True
content_variables = '\n'.join('%s=%s' % (k,v) for k, v in variables.items() if v is not None)
content_variables += '\n%s' % tag
if diff:
with open(make_opts, 'w') as fsock:
fsock.write(content_variables + '\n'.join(content))
return
def link_lhapdf(self, libdir, extra_dirs = []):
"""links lhapdf into libdir"""
lhapdf_version = self.get_lhapdf_version()
logger.info('Using LHAPDF v%s interface for PDFs' % lhapdf_version)
lhalibdir = subprocess.Popen([self.options['lhapdf'], '--libdir'],
stdout = subprocess.PIPE).stdout.read().strip()
if lhapdf_version.startswith('5.'):
pdfsetsdir = subprocess.Popen([self.options['lhapdf'], '--pdfsets-path'],
stdout = subprocess.PIPE).stdout.read().strip()
else:
pdfsetsdir = subprocess.Popen([self.options['lhapdf'], '--datadir'],
stdout = subprocess.PIPE).stdout.read().strip()
self.lhapdf_pdfsets = self.get_lhapdf_pdfsets_list(pdfsetsdir)
lhalib = 'libLHAPDF.a'
if os.path.exists(pjoin(libdir, lhalib)):
files.rm(pjoin(libdir, lhalib))
files.ln(pjoin(lhalibdir, lhalib), libdir)
if not os.path.isdir(pjoin(libdir, 'PDFsets')):
os.mkdir(pjoin(libdir, 'PDFsets'))
self.make_opts_var['lhapdf'] = self.options['lhapdf']
self.make_opts_var['lhapdfversion'] = lhapdf_version[0]
self.make_opts_var['lhapdfsubversion'] = lhapdf_version.split('.',2)[1]
self.make_opts_var['lhapdf_config'] = self.options['lhapdf']
def get_characteristics(self, path=None):
"""reads the proc_characteristics file and initialises the correspondant
dictionary"""
if not path:
path = os.path.join(self.me_dir, 'SubProcesses', 'proc_characteristics')
self.proc_characteristics = banner_mod.ProcCharacteristic(path)
return self.proc_characteristics
def copy_lhapdf_set(self, lhaid_list, pdfsets_dir):
"""copy (if needed) the lhapdf set corresponding to the lhaid in lhaid_list
into lib/PDFsets"""
if not hasattr(self, 'lhapdf_pdfsets'):
self.lhapdf_pdfsets = self.get_lhapdf_pdfsets_list(pdfsets_dir)
pdfsetname=set()
for lhaid in lhaid_list:
if isinstance(lhaid, str) and lhaid.isdigit():
lhaid = int(lhaid)
if isinstance(lhaid, (int,float)):
try:
if lhaid in self.lhapdf_pdfsets:
pdfsetname.add(self.lhapdf_pdfsets[lhaid]['filename'])
else:
raise MadGraph5Error('lhaid %s not valid input number for the current lhapdf' % lhaid )
except KeyError:
if self.lhapdf_version.startswith('5'):
raise MadGraph5Error(\
('invalid lhaid set in th run_card: %d .\nPlease note that some sets' % lhaid) + \
'(eg MSTW 90%CL error sets) \nare not available in aMC@NLO + LHAPDF 5.x.x')
else:
logger.debug('%d not found in pdfsets.index' % lhaid)
else:
pdfsetname.add(lhaid)
if not os.path.isdir(pdfsets_dir):
try:
os.mkdir(pdfsets_dir)
except OSError:
pdfsets_dir = pjoin(self.me_dir, 'lib', 'PDFsets')
elif os.path.exists(pjoin(self.me_dir, 'lib', 'PDFsets')):
for name in os.listdir(pjoin(self.me_dir, 'lib', 'PDFsets')):
if name not in pdfsetname:
try:
if os.path.isdir(pjoin(self.me_dir, 'lib', 'PDFsets', name)):
shutil.rmtree(pjoin(self.me_dir, 'lib', 'PDFsets', name))
else:
os.remove(pjoin(self.me_dir, 'lib', 'PDFsets', name))
except Exception, error:
logger.debug('%s', error)
if self.options["cluster_local_path"]:
lhapdf_cluster_possibilities = [self.options["cluster_local_path"],
pjoin(self.options["cluster_local_path"], "lhapdf"),
pjoin(self.options["cluster_local_path"], "lhapdf", "pdfsets"),
pjoin(self.options["cluster_local_path"], "..", "lhapdf"),
pjoin(self.options["cluster_local_path"], "..", "lhapdf", "pdfsets"),
pjoin(self.options["cluster_local_path"], "..", "lhapdf","pdfsets", "6.1")
]
else:
lhapdf_cluster_possibilities = []
for pdfset in pdfsetname:
if self.options["cluster_local_path"] and self.options["run_mode"] == 1 and \
any((os.path.exists(pjoin(d, pdfset)) for d in lhapdf_cluster_possibilities)):
os.environ["LHAPATH"] = [d for d in lhapdf_cluster_possibilities if os.path.exists(pjoin(d, pdfset))][0]
os.environ["CLUSTER_LHAPATH"] = os.environ["LHAPATH"]
if os.path.exists(pjoin(pdfsets_dir, pdfset)):
try:
if os.path.isdir(pjoin(pdfsets_dir, name)):
shutil.rmtree(pjoin(pdfsets_dir, name))
else:
os.remove(pjoin(pdfsets_dir, name))
except Exception, error:
logger.debug('%s', error)
elif not os.path.exists(pjoin(self.me_dir, 'lib', 'PDFsets', pdfset)) and \
not os.path.isdir(pjoin(self.me_dir, 'lib', 'PDFsets', pdfset)):
if pdfset and not os.path.exists(pjoin(pdfsets_dir, pdfset)):
self.install_lhapdf_pdfset(pdfsets_dir, pdfset)
if os.path.exists(pjoin(pdfsets_dir, pdfset)):
files.cp(pjoin(pdfsets_dir, pdfset), pjoin(self.me_dir, 'lib', 'PDFsets'))
elif os.path.exists(pjoin(os.path.dirname(pdfsets_dir), pdfset)):
files.cp(pjoin(os.path.dirname(pdfsets_dir), pdfset), pjoin(self.me_dir, 'lib', 'PDFsets'))
def install_lhapdf_pdfset(self, pdfsets_dir, filename):
"""idownloads and install the pdfset filename in the pdfsets_dir"""
lhapdf_version = self.get_lhapdf_version()
local_path = pjoin(self.me_dir, 'lib', 'PDFsets')
return self.install_lhapdf_pdfset_static(self.options['lhapdf'],
pdfsets_dir, filename,
lhapdf_version=lhapdf_version,
alternate_path=local_path)
@staticmethod
def install_lhapdf_pdfset_static(lhapdf_config, pdfsets_dir, filename,
lhapdf_version=None, alternate_path=None):
"""idownloads and install the pdfset filename in the pdfsets_dir.
Version which can be used independently of the class.
local path is used if the global installation fails.
"""
if not lhapdf_version:
lhapdf_version = subprocess.Popen([lhapdf_config, '--version'],
stdout = subprocess.PIPE).stdout.read().strip()
if not pdfsets_dir:
pdfsets_dir = subprocess.Popen([lhapdf_config, '--datadir'],
stdout = subprocess.PIPE).stdout.read().strip()
if isinstance(filename, int):
pdf_info = CommonRunCmd.get_lhapdf_pdfsets_list_static(pdfsets_dir, lhapdf_version)
filename = pdf_info[filename]['filename']
if os.path.exists(pjoin(pdfsets_dir, filename)):
logger.debug('%s is already present in %s', filename, pdfsets_dir)
return
logger.info('Trying to download %s' % filename)
if lhapdf_version.startswith('5.'):
getdata = lhapdf_config.replace('lhapdf-config', ('lhapdf-getdata'))
misc.call([getdata, filename], cwd = pdfsets_dir)
elif lhapdf_version.startswith('6.'):
getdata = lhapdf_config.replace('lhapdf-config', ('lhapdf'))
misc.call([getdata, 'install', filename], cwd = pdfsets_dir)
else:
raise MadGraph5Error('Not valid LHAPDF version: %s' % lhapdf_version)
if os.path.exists(pjoin(pdfsets_dir, filename)) or \
os.path.isdir(pjoin(pdfsets_dir, filename)):
logger.info('%s successfully downloaded and stored in %s' \
% (filename, pdfsets_dir))
elif lhapdf_version.startswith('5.'):
logger.warning('Could not download %s into %s. Trying to save it locally' \
% (filename, pdfsets_dir))
CommonRunCmd.install_lhapdf_pdfset_static(lhapdf_config, alternate_path, filename,
lhapdf_version=lhapdf_version)
elif lhapdf_version.startswith('6.') and '.LHgrid' in filename:
logger.info('Could not download %s: Try %s', filename, filename.replace('.LHgrid',''))
return CommonRunCmd.install_lhapdf_pdfset_static(lhapdf_config, pdfsets_dir,
filename.replace('.LHgrid',''),
lhapdf_version, alternate_path)
else:
raise MadGraph5Error, \
'Could not download %s into %s. Please try to install it manually.' \
% (filename, pdfsets_dir)
def get_lhapdf_pdfsets_list(self, pdfsets_dir):
"""read the PDFsets.index file, which should be located in the same
place as pdfsets_dir, and return a list of dictionaries with the information
about each pdf set"""
lhapdf_version = self.get_lhapdf_version()
return self.get_lhapdf_pdfsets_list_static(pdfsets_dir, lhapdf_version)
@staticmethod
def get_lhapdf_pdfsets_list_static(pdfsets_dir, lhapdf_version):
if lhapdf_version.startswith('5.'):
if os.path.exists('%s.index' % pdfsets_dir):
indexfile = '%s.index' % pdfsets_dir
else:
raise MadGraph5Error, 'index of lhapdf file not found'
pdfsets_lines = \
[l for l in open(indexfile).read().split('\n') if l.strip() and \
not '90cl' in l]
lhapdf_pdfsets = dict( (int(l.split()[0]), {'lhaid': int(l.split()[0]),
'pdflib_ntype': int(l.split()[1]),
'pdflib_ngroup': int(l.split()[2]),
'pdflib_nset': int(l.split()[3]),
'filename': l.split()[4],
'lhapdf_nmem': int(l.split()[5]),
'q2min': float(l.split()[6]),
'q2max': float(l.split()[7]),
'xmin': float(l.split()[8]),
'xmax': float(l.split()[9]),
'description': l.split()[10]}) \
for l in pdfsets_lines)
elif lhapdf_version.startswith('6.'):
pdfsets_lines = \
[l for l in open(pjoin(pdfsets_dir, 'pdfsets.index')).read().split('\n') if l.strip()]
lhapdf_pdfsets = dict( (int(l.split()[0]),
{'lhaid': int(l.split()[0]),
'filename': l.split()[1]}) \
for l in pdfsets_lines)
else:
raise MadGraph5Error('Not valid LHAPDF version: %s' % lhapdf_version)
return lhapdf_pdfsets
def get_lhapdf_version(self):
"""returns the lhapdf version number"""
if not hasattr(self, 'lhapdfversion'):
try:
self.lhapdf_version = \
subprocess.Popen([self.options['lhapdf'], '--version'],
stdout = subprocess.PIPE).stdout.read().strip()
except OSError, error:
if error.errno == 2:
raise Exception, 'lhapdf executable (%s) is not found on your system. Please install it and/or indicate the path to the correct executable in input/mg5_configuration.txt' % self.options['lhapdf']
else:
raise
if self.lhapdf_version.startswith('6.0'):
raise MadGraph5Error('LHAPDF 6.0.x not supported. Please use v6.1 or later')
if self.lhapdf_version.startswith('6.2'):
logger.warning('Support of LHAPDF 6.2.x is still in beta phase. Consider to use LHAPDF 6.1.x in case of problem.')
return self.lhapdf_version
def get_lhapdf_pdfsetsdir(self):
lhapdf_version = self.get_lhapdf_version()
if 'LHAPDF_DATA_PATH' in os.environ.keys() and os.environ['LHAPDF_DATA_PATH']:
datadir = os.environ['LHAPDF_DATA_PATH']
elif lhapdf_version.startswith('5.'):
datadir = subprocess.Popen([self.options['lhapdf'], '--pdfsets-path'],
stdout = subprocess.PIPE).stdout.read().strip()
elif lhapdf_version.startswith('6.'):
datadir = subprocess.Popen([self.options['lhapdf'], '--datadir'],
stdout = subprocess.PIPE).stdout.read().strip()
return datadir
def get_lhapdf_libdir(self):
lhapdf_version = self.get_lhapdf_version()
if lhapdf_version.startswith('5.'):
libdir = subprocess.Popen([self.options['lhapdf-config'], '--libdir'],
stdout = subprocess.PIPE).stdout.read().strip()
elif lhapdf_version.startswith('6.'):
libdir = subprocess.Popen([self.options['lhapdf'], '--libs'],
stdout = subprocess.PIPE).stdout.read().strip()
return libdir
class AskforEditCard(cmd.OneLinePathCompletion):
"""A class for asking a question where in addition you can have the
set command define and modifying the param_card/run_card correctly"""
all_card_name = ['param_card', 'run_card', 'pythia_card', 'pythia8_card',
'madweight_card', 'MadLoopParams', 'shower_card']
special_shortcut = {'ebeam':([float],['run_card ebeam1 %(0)s', 'run_card ebeam2 %(0)s']),
'lpp': ([int],['run_card lpp1 %(0)s', 'run_card lpp2 %(0)s' ]),
'lhc': ([int],['run_card lpp1 1', 'run_card lpp2 1', 'run_card ebeam1 %(0)s*1000/2', 'run_card ebeam2 %(0)s*1000/2']),
'lep': ([int],['run_card lpp1 0', 'run_card lpp2 0', 'run_card ebeam1 %(0)s/2', 'run_card ebeam2 %(0)s/2']),
'ilc': ([int],['run_card lpp1 0', 'run_card lpp2 0', 'run_card ebeam1 %(0)s/2', 'run_card ebeam2 %(0)s/2']),
'lcc': ([int],['run_card lpp1 1', 'run_card lpp2 1', 'run_card ebeam1 %(0)s*1000/2', 'run_card ebeam2 %(0)s*1000/2']),
'fixed_scale': ([float],['run_card fixed_fac_scale T', 'run_card fixed_ren_scale T', 'run_card scale %(0)s', 'run_card dsqrt_q2fact1 %(0)s' ,'run_card dsqrt_q2fact2 %(0)s']),
'simplepy8':([],['pythia8_card hadronlevel:all False',
'pythia8_card partonlevel:mpi False',
'pythia8_card BeamRemnants:primordialKT False',
'pythia8_card PartonLevel:Remnants False',
'pythia8_card Check:event False',
'pythia8_card TimeShower:QEDshowerByQ False',
'pythia8_card TimeShower:QEDshowerByL False',
'pythia8_card SpaceShower:QEDshowerByQ False',
'pythia8_card SpaceShower:QEDshowerByL False',
'pythia8_card PartonLevel:FSRinResonances False',
'pythia8_card ProcessLevel:resonanceDecays False',
]),
'mpi':([bool],['pythia8_card partonlevel:mpi %(0)s']),
'no_parton_cut':([],['run_card nocut T'])
}
special_shortcut_help = {
'ebeam' : 'syntax: set ebeam VALUE:\n This parameter sets the energy to both beam to the value in GeV',
'lpp' : 'syntax: set ebeam VALUE:\n'+\
' Set the type of beam to a given value for both beam\n'+\
' 0 : means no PDF\n'+\
' 1 : means proton PDF\n'+\
' -1 : means antiproton PDF\n'+\
' 2 : means PDF for elastic photon emited from a proton\n'+\
' 3 : means PDF for elastic photon emited from an electron',
'lhc' : 'syntax: set lhc VALUE:\n Set for a proton-proton collision with that given center of mass energy (in TeV)',
'lep' : 'syntax: set lep VALUE:\n Set for a electron-positron collision with that given center of mass energy (in GeV)',
'fixed_scale' : 'syntax: set fixed_scale VALUE:\n Set all scales to the give value (in GeV)',
'simplepy8' : 'Turn off non-perturbative slow features of Pythia8.',
'mpi' : 'syntax: set mpi value: allow to turn mpi in Pythia8 on/off'
}
def load_default(self):
""" define all default variable. No load of card here.
This allow to subclass this class and just change init and still have
all variables defined."""
self.me_dir = None
self.param_card = None
self.run_card = {}
self.pname2block = {}
self.conflict = []
self.restricted_value = {}
self.mode = ''
self.cards = []
self.run_set = []
self.has_mw = False
self.has_ml = False
self.has_shower = False
self.has_PY8 = False
self.paths = {}
def define_paths(self, **opt):
if 'pwd' in opt:
self.me_dir = opt['pwd']
elif 'mother_interface' in opt:
self.mother_interface = opt['mother_interface']
if not hasattr(self, 'me_dir') or not self.me_dir:
self.me_dir = self.mother_interface.me_dir
self.paths['param'] = pjoin(self.me_dir,'Cards','param_card.dat')
self.paths['param_default'] = pjoin(self.me_dir,'Cards','param_card_default.dat')
self.paths['run'] = pjoin(self.me_dir,'Cards','run_card.dat')
self.paths['run_default'] = pjoin(self.me_dir,'Cards','run_card_default.dat')
self.paths['transfer'] =pjoin(self.me_dir,'Cards','transfer_card.dat')
self.paths['MadWeight'] =pjoin(self.me_dir,'Cards','MadWeight_card.dat')
self.paths['MadWeight_default'] =pjoin(self.me_dir,'Cards','MadWeight_card_default.dat')
self.paths['ML'] =pjoin(self.me_dir,'Cards','MadLoopParams.dat')
self.paths['shower'] = pjoin(self.me_dir,'Cards','shower_card.dat')
self.paths['shower_default'] = pjoin(self.me_dir,'Cards','shower_card_default.dat')
self.paths['FO_analyse'] = pjoin(self.me_dir,'Cards','FO_analyse_card.dat')
self.paths['FO_analyse_default'] = pjoin(self.me_dir,'Cards','FO_analyse_card_default.dat')
self.paths['pythia'] =pjoin(self.me_dir, 'Cards','pythia_card.dat')
self.paths['pythia8'] = pjoin(self.me_dir, 'Cards','pythia8_card.dat')
self.paths['pythia8_default'] = pjoin(self.me_dir, 'Cards','pythia8_card_default.dat')
self.paths['madspin_default'] = pjoin(self.me_dir,'Cards/madspin_card_default.dat')
self.paths['madspin'] = pjoin(self.me_dir,'Cards/madspin_card.dat')
self.paths['reweight'] = pjoin(self.me_dir,'Cards','reweight_card.dat')
self.paths['delphes'] = pjoin(self.me_dir,'Cards','delphes_card.dat')
self.paths['plot'] = pjoin(self.me_dir,'Cards','plot_card.dat')
self.paths['plot_default'] = pjoin(self.me_dir,'Cards','plot_card_default.dat')
self.paths['madanalysis5_parton'] = pjoin(self.me_dir,'Cards','madanalysis5_parton_card.dat')
self.paths['madanalysis5_hadron'] = pjoin(self.me_dir,'Cards','madanalysis5_hadron_card.dat')
self.paths['madanalysis5_parton_default'] = pjoin(self.me_dir,'Cards','madanalysis5_parton_card_default.dat')
self.paths['madanalysis5_hadron_default'] = pjoin(self.me_dir,'Cards','madanalysis5_hadron_card_default.dat')
self.paths['FO_analyse'] = pjoin(self.me_dir,'Cards', 'FO_analyse_card.dat')
def __init__(self, question, cards=[], mode='auto', *args, **opt):
self.load_default()
self.define_paths(**opt)
cmd.OneLinePathCompletion.__init__(self, question, *args, **opt)
try:
self.param_card = check_param_card.ParamCard(self.paths['param'])
except (check_param_card.InvalidParamCard, ValueError) as e:
logger.error('Current param_card is not valid. We are going to use the default one.')
logger.error('problem detected: %s' % e)
files.cp(self.paths['param_default'], self.paths['param'])
self.param_card = check_param_card.ParamCard(self.paths['param'])
default_param = check_param_card.ParamCard(self.paths['param_default'])
self.param_card_default = default_param
try:
self.run_card = banner_mod.RunCard(self.paths['run'], consistency='warning')
except IOError:
self.run_card = {}
try:
run_card_def = banner_mod.RunCard(self.paths['run_default'])
except IOError:
run_card_def = {}
self.pname2block = {}
self.conflict = []
self.restricted_value = {}
self.mode = mode
self.cards = cards
self.pname2block, self.restricted_value = \
default_param.analyze_param_card()
if run_card_def:
self.run_set = run_card_def.keys() + self.run_card.hidden_param
elif self.run_card:
self.run_set = self.run_card.keys()
else:
self.run_set = []
for var in self.pname2block:
if var in self.run_set:
self.conflict.append(var)
self.has_delphes = False
if 'delphes_card.dat' in cards:
self.has_delphes = True
self.has_mw = False
if 'madweight_card.dat' in cards:
self.do_change_tf = self.mother_interface.do_define_transfer_fct
self.complete_change_tf = self.mother_interface.complete_define_transfer_fct
self.help_change_tf = self.mother_interface.help_define_transfer_fct
if not os.path.exists(self.paths['transfer']):
logger.warning('No transfer function currently define. Please use the change_tf command to define one.')
self.has_mw = True
try:
import madgraph.madweight.Cards as mwcards
except:
import internal.madweight.Cards as mwcards
self.mw_card = mwcards.Card(self.paths['MadWeight'])
self.mw_card = self.mw_card.info
self.mw_vars = []
for key in self.mw_card:
if key == 'comment':
continue
for key2 in self.mw_card.info[key]:
if isinstance(key2, str) and not key2.isdigit():
self.mw_vars.append(key2)
for var in self.pname2block:
if var in self.mw_vars:
self.conflict.append(var)
for var in self.mw_vars:
if var in self.run_card:
self.conflict.append(var)
self.has_ml = False
if os.path.isfile(self.paths['ML']):
self.has_ml = True
self.MLcard = banner_mod.MadLoopParam(self.paths['ML'])
self.MLcardDefault = banner_mod.MadLoopParam()
self.ml_vars = [k.lower() for k in self.MLcard.keys()]
for var in self.ml_vars:
if var in self.run_card:
self.conflict.append(var)
if var in self.pname2block:
self.conflict.append(var)
if self.has_mw and var in self.mw_vars:
self.conflict.append(var)
self.has_shower = False
if 'shower_card.dat' in cards:
self.has_shower = True
self.shower_card = shower_card_mod.ShowerCard(self.paths['shower'])
self.shower_vars = self.shower_card.keys()
for var in self.pname2block:
if var in self.shower_vars:
self.conflict.append(var)
for var in self.shower_vars:
if var in self.run_card:
self.conflict.append(var)
self.has_PY8 = False
if 'pythia8_card.dat' in cards:
self.has_PY8 = True
self.PY8Card = banner_mod.PY8Card(self.paths['pythia8'])
self.PY8CardDefault = banner_mod.PY8Card()
self.py8_vars = [k.lower() for k in self.PY8Card.keys()]
for var in self.py8_vars:
if var in self.run_card:
self.conflict.append(var)
if var in self.pname2block:
self.conflict.append(var)
if self.has_mw and var in self.mw_vars:
self.conflict.append(var)
if self.has_ml and var in self.ml_vars:
self.conflict.append(var)
def do_help(self, line, conflict_raise=False, banner=True):
if banner:
logger.info('*** HELP MESSAGE ***', '$MG:color:BLACK')
args = self.split_arg(line)
if len(args)==0 or (len(args) == 1 and hasattr(self, 'do_%s' % args[0])):
out = cmd.BasicCmd.do_help(self, line)
if len(args)==0:
print 'Allowed Argument'
print '================'
print '\t'.join(self.allow_arg)
print
print 'Special shortcut: (type help <name>)'
print '===================================='
print ' syntax: set <name> <value>'
print '\t'.join(self.special_shortcut)
print
if banner:
logger.info('*** END HELP ***', '$MG:color:BLACK')
return out
if args[0] in self.special_shortcut:
if args[0] in self.special_shortcut_help:
print self.special_shortcut_help[args[0]]
if banner:
logger.info('*** END HELP ***', '$MG:color:BLACK')
return
start = 0
card = ''
if args[0]+'_card' in self.all_card_name+ self.cards:
args[0] += '_card'
elif args[0]+'.dat' in self.all_card_name+ self.cards:
args[0] += '.dat'
elif args[0]+'_card.dat' in self.all_card_name+ self.cards:
args[0] += '_card.dat'
if args[0] in self.all_card_name + self.cards:
start += 1
card = args[0]
if len(args) == 1:
if args[0] == 'pythia8_card':
args[0] = 'PY8Card'
if args[0] == 'param_card':
logger.info("Param_card information: ", '$MG:color:BLUE')
print "File to define the various model parameter"
logger.info("List of the Block defined:",'$MG:color:BLUE')
print "\t".join(self.param_card.keys())
elif args[0].startswith('madanalysis5'):
print 'This card allow to make plot with the madanalysis5 package'
print 'An example card is provided. For more information about the '
print 'syntax please refer to: https://madanalysis.irmp.ucl.ac.be/'
print 'or to the user manual [arXiv:1206.1599]'
if args[0].startswith('madanalysis5_hadron'):
print
print 'This card also allow to make recasting analysis'
print 'For more detail, see: arXiv:1407.3278'
elif hasattr(self, args[0]):
logger.info("%s information: " % args[0], '$MG:color:BLUE')
print(eval('self.%s' % args[0]).__doc__)
logger.info("List of parameter associated", '$MG:color:BLUE')
print "\t".join(eval('self.%s' % args[0]).keys())
if banner:
logger.info('*** END HELP ***', '$MG:color:BLACK')
return
er() for l in self.run_card.keys()] and card in ['', 'run_card']:
if args[start] not in self.run_set:
args[start] = [l for l in self.run_set if l.lower() == args[start]][0]
if args[start] in self.conflict and not conflict_raise:
conflict_raise = True
logger.info('** AMBIGUOUS NAME: %s **', args[start], '$MG:color:BLACK')
if card == '':
logger.info('** If not explicitely speficy this parameter will modif the run_card file', '$MG:color:BLACK')
self.run_card.do_help(args[start])
'param_card']:
if args[start] in self.conflict and not conflict_raise:
conflict_raise = True
logger.info('** AMBIGUOUS NAME: %s **', args[start], '$MG:color:BLACK')
if card == '':
logger.info('** If not explicitely speficy this parameter will modif the param_card file', '$MG:color:BLACK')
if args[start] == 'width':
args[start] = 'decay'
if len(args) == start+1:
self.param_card.do_help(args[start], tuple())
key = None
elif args[start+1] in self.pname2block:
all_var = self.pname2block[args[start+1]]
key = None
for bname, lhaid in all_var:
if bname == args[start]:
key = lhaid
break
else:
logger.warning('%s is not part of block "%s" but "%s". please correct.' %
(args[start+1], args[start], bname))
else:
try:
key = tuple([int(i) for i in args[start+1:]])
except ValueError:
logger.warning('Failed to identify LHA information')
return
if key in self.param_card[args[start]].param_dict:
self.param_card.do_help(args[start], key, default=self.param_card_default)
elif key:
logger.warning('invalid information: %s not defined in the param_card' % (key,))
elif args[start] in self.pname2block and card in ['','param_card']:
if args[start] in self.conflict and not conflict_raise:
conflict_raise = True
logger.info('** AMBIGUOUS NAME: %s **', args[start], '$MG:color:BLACK')
if card == '':
logger.info('** If not explicitely speficy this parameter will modif the param_card file', '$MG:color:BLACK')
all_var = self.pname2block[args[start]]
for bname, lhaid in all_var:
new_line = 'param_card %s %s %s' % (bname,
' '.join([ str(i) for i in lhaid]), ' '.join(args[start+1:]))
self.do_help(new_line, conflict_raise=True, banner=False)
elif self.has_ml and args[start] in self.ml_vars \
and card in ['', 'MadLoop_card']:
if args[start] in self.conflict and not conflict_raise:
conflict_raise = True
logger.info('** AMBIGUOUS NAME: %s **', args[start], '$MG:color:BLACK')
if card == '':
logger.info('** If not explicitely speficy this parameter will modif the madloop_card file', '$MG:color:BLACK')
self.MLcard.do_help(args[start])
elif self.has_PY8 and args[start] in self.PY8Card:
if args[start] in self.conflict and not conflict_raise:
conflict_raise = True
logger.info('** AMBIGUOUS NAME: %s **', args[start], '$MG:color:BLACK')
if card == '':
logger.info('** If not explicitely speficy this parameter will modif the pythia8_card file', '$MG:color:BLACK')
self.PY8Card.do_help(args[start])
elif card.startswith('madanalysis5'):
print 'MA5'
else:
print "no help available"
if banner:
logger.info('*** END HELP ***', '$MG:color:BLACK')
return
def complete_help(self, text, line, begidx, endidx):
prev_timer = signal.alarm(0)
if prev_timer:
nb_back = len(line)
self.stdout.write('\b'*nb_back + '[timer stopped]\n')
self.stdout.write(line)
self.stdout.flush()
possibilities = self.complete_set(text, line, begidx, endidx,formatting=False)
if line[:begidx].strip() == 'help':
possibilities['Defined command'] = cmd.BasicCmd.completenames(self, text, line)
possibilities.update(self.complete_add(text, line, begidx, endidx,formatting=False))
return self.deal_multiple_categories(possibilities)
def complete_update(self, text, line, begidx, endidx):
prev_timer = signal.alarm(0)
if prev_timer:
nb_back = len(line)
self.stdout.write('\b'*nb_back + '[timer stopped]\n')
self.stdout.write(line)
self.stdout.flush()
arg = line[:begidx].split()
if len(arg) <=1:
return self.list_completion(text, ['dependent', 'missing', 'to_slha1', 'to_slha2'], line)
def complete_set(self, text, line, begidx, endidx, formatting=True):
""" Complete the set command"""
prev_timer = signal.alarm(0)
if prev_timer:
nb_back = len(line)
self.stdout.write('\b'*nb_back + '[timer stopped]\n')
self.stdout.write(line)
self.stdout.flush()
possibilities = {}
allowed = {}
args = self.split_arg(line[0:begidx])
if args[-1] in ['Auto', 'default']:
return
if len(args) == 1:
allowed = {'category':'', 'run_card':'', 'block':'all', 'param_card':'','shortcut':''}
if self.has_mw:
allowed['madweight_card'] = ''
allowed['mw_block'] = 'all'
if self.has_shower:
allowed['shower_card'] = ''
if self.has_ml:
allowed['madloop_card'] = ''
if self.has_PY8:
allowed['pythia8_card'] = ''
if self.has_delphes:
allowed['delphes_card'] = ''
elif len(args) == 2:
if args[1] == 'run_card':
allowed = {'run_card':'default'}
elif args[1] == 'param_card':
allowed = {'block':'all', 'param_card':'default'}
elif args[1] in self.param_card.keys():
allowed = {'block':args[1]}
elif args[1] == 'width':
allowed = {'block': 'decay'}
elif args[1] == 'MadWeight_card':
allowed = {'madweight_card':'default', 'mw_block': 'all'}
elif args[1] == 'MadLoop_card':
allowed = {'madloop_card':'default'}
elif args[1] == 'pythia8_card':
allowed = {'pythia8_card':'default'}
elif self.has_mw and args[1] in self.mw_card.keys():
allowed = {'mw_block':args[1]}
elif args[1] == 'shower_card':
allowed = {'shower_card':'default'}
elif args[1] == 'delphes_card':
allowed = {'delphes_card':'default'}
else:
allowed = {'value':''}
else:
start = 1
if args[1] in ['run_card', 'param_card', 'MadWeight_card', 'shower_card',
'MadLoop_card','pythia8_card','delphes_card','plot_card',
'madanalysis5_parton_card','madanalysis5_hadron_card']:
start = 2
if args[-1] in self.pname2block.keys():
allowed['value'] = 'default'
elif args[start] in self.param_card.keys() or args[start] == 'width':
if args[start] == 'width':
args[start] = 'decay'
if args[start+1:]:
allowed = {'block':(args[start], args[start+1:])}
else:
allowed = {'block':args[start]}
elif self.has_mw and args[start] in self.mw_card.keys():
if args[start+1:]:
allowed = {'mw_block':(args[start], args[start+1:])}
else:
allowed = {'mw_block':args[start]}
else:
allowed['value'] = ''
if 'category' in allowed.keys():
categories = ['run_card', 'param_card']
if self.has_mw:
categories.append('MadWeight_card')
if self.has_shower:
categories.append('shower_card')
if self.has_ml:
categories.append('MadLoop_card')
if self.has_PY8:
categories.append('pythia8_card')
if self.has_delphes:
categories.append('delphes_card')
possibilities['category of parameter (optional)'] = \
self.list_completion(text, categories)
if 'shortcut' in allowed.keys():
possibilities['special values'] = self.list_completion(text, self.special_shortcut.keys()+['qcut', 'showerkt'])
if 'run_card' in allowed.keys():
opts = self.run_set
if allowed['run_card'] == 'default':
opts.append('default')
possibilities['Run Card'] = self.list_completion(text, opts)
if 'param_card' in allowed.keys():
opts = self.pname2block.keys()
if allowed['param_card'] == 'default':
opts.append('default')
possibilities['Param Card'] = self.list_completion(text, opts)
if 'madweight_card' in allowed.keys():
opts = self.mw_vars + [k for k in self.mw_card.keys() if k !='comment']
if allowed['madweight_card'] == 'default':
opts.append('default')
possibilities['MadWeight Card'] = self.list_completion(text, opts)
if 'madloop_card' in allowed.keys():
opts = self.ml_vars
if allowed['madloop_card'] == 'default':
opts.append('default')
possibilities['MadLoop Parameter'] = self.list_completion(text, opts)
if 'pythia8_card' in allowed.keys():
opts = self.py8_vars
if allowed['pythia8_card'] == 'default':
opts.append('default')
possibilities['Pythia8 Parameter'] = self.list_completion(text, opts)
if 'shower_card' in allowed.keys():
opts = self.shower_vars + [k for k in self.shower_card.keys() if k !='comment']
if allowed['shower_card'] == 'default':
opts.append('default')
possibilities['Shower Card'] = self.list_completion(text, opts)
if 'delphes_card' in allowed:
if allowed['delphes_card'] == 'default':
opts = ['default', 'atlas', 'cms']
possibilities['Delphes Card'] = self.list_completion(text, opts)
if 'value' in allowed.keys():
opts = ['default']
if 'decay' in args:
opts.append('Auto')
opts.append('Auto@NLO')
elif args[-1] in self.pname2block and self.pname2block[args[-1]][0][0] == 'decay':
opts.append('Auto')
opts.append('Auto@NLO')
possibilities['Special Value'] = self.list_completion(text, opts)
if 'block' in allowed.keys():
if allowed['block'] == 'all':
allowed_block = [i for i in self.param_card.keys() if 'qnumbers' not in i]
allowed_block.append('width')
possibilities['Param Card Block' ] = \
self.list_completion(text, allowed_block)
elif isinstance(allowed['block'], basestring):
block = self.param_card[allowed['block']].param_dict
ids = [str(i[0]) for i in block
if (allowed['block'], i) not in self.restricted_value]
possibilities['Param Card id' ] = self.list_completion(text, ids)
varname = [name for name, all_var in self.pname2block.items()
if any((bname == allowed['block']
for bname,lhaid in all_var))]
possibilities['Param card variable'] = self.list_completion(text,
varname)
else:
block = self.param_card[allowed['block'][0]].param_dict
nb = len(allowed['block'][1])
ids = [str(i[nb]) for i in block if len(i) > nb and \
[str(a) for a in i[:nb]] == allowed['block'][1]]
if not ids:
if tuple([int(i) for i in allowed['block'][1]]) in block:
opts = ['default']
if allowed['block'][0] == 'decay':
opts.append('Auto')
opts.append('Auto@NLO')
possibilities['Special value'] = self.list_completion(text, opts)
possibilities['Param Card id' ] = self.list_completion(text, ids)
if 'mw_block' in allowed.keys():
if allowed['mw_block'] == 'all':
allowed_block = [i for i in self.mw_card.keys() if 'comment' not in i]
possibilities['MadWeight Block' ] = \
self.list_completion(text, allowed_block)
elif isinstance(allowed['mw_block'], basestring):
block = self.mw_card[allowed['mw_block']]
ids = [str(i[0]) if isinstance(i, tuple) else str(i) for i in block]
possibilities['MadWeight Card id' ] = self.list_completion(text, ids)
else:
block = self.mw_card[allowed['mw_block'][0]]
nb = len(allowed['mw_block'][1])
ids = [str(i[nb]) for i in block if isinstance(i, tuple) and\
len(i) > nb and \
[str(a) for a in i[:nb]] == allowed['mw_block'][1]]
if not ids:
if tuple([i for i in allowed['mw_block'][1]]) in block or \
allowed['mw_block'][1][0] in block.keys():
opts = ['default']
possibilities['Special value'] = self.list_completion(text, opts)
possibilities['MadWeight Card id' ] = self.list_completion(text, ids)
return self.deal_multiple_categories(possibilities, formatting)
def do_set(self, line):
""" edit the value of one parameter in the card"""
args = self.split_arg(line)
if len(args) == 0:
logger.warning("No argument. For help type 'help set'.")
if len(args)==1 and '=' in args[-1]:
arg1, arg2 = args.pop(-1).split('=',1)
args += [arg1, arg2]
if '=' in args:
args.remove('=')
args[:-1] = [ a.lower() for a in args[:-1]]
if args[0] in self.special_shortcut:
targettypes , cmd = self.special_shortcut[args[0]]
if len(args) != len(targettypes) +1:
logger.warning('shortcut %s requires %s argument' % (args[0], len(targettypes)))
if len(args) < len(targettypes) +1:
return
else:
logger.warning('additional argument will be ignored')
values ={}
for i, argtype in enumerate(targettypes):
try:
values = {str(i): banner_mod.ConfigFile.format_variable(args[i+1], argtype, args[0])}
except ValueError as e:
logger.warning("Wrong argument: The entry #%s should be of type %s.", i+1, argtype)
return
for arg in cmd:
try:
text = arg % values
except KeyError:
logger.warning("This command requires one argument")
return
except Exception as e:
logger.warning(str(e))
return
else:
self.do_set(arg % values)
return
start = 0
if len(args) < 2:
logger.warning('Invalid set command %s (need two arguments)' % line)
return
if args[0].lower() == 'qcut':
pythia_path = self.paths['pythia']
if os.path.exists(pythia_path):
logger.info('add line QCUT = %s in pythia_card.dat' % args[1])
p_card = open(pythia_path,'r').read()
p_card, n = re.subn('''^\s*QCUT\s*=\s*[\de\+\-\.]*\s*$''',
''' QCUT = %s ''' % args[1], \
p_card, flags=(re.M+re.I))
if n==0:
p_card = '%s \n QCUT= %s' % (p_card, args[1])
with open(pythia_path, 'w') as fsock:
fsock.write(p_card)
return
if args[0].lower() == 'showerkt':
pythia_path = self.paths['pythia']
if os.path.exists(pythia_path):
logger.info('add line SHOWERKT = %s in pythia_card.dat' % args[1].upper())
p_card = open(pythia_path,'r').read()
p_card, n = re.subn('''^\s*SHOWERKT\s*=\s*[default\de\+\-\.]*\s*$''',
''' SHOWERKT = %s ''' % args[1].upper(), \
p_card, flags=(re.M+re.I))
if n==0:
p_card = '%s \n SHOWERKT= %s' % (p_card, args[1].upper())
with open(pythia_path, 'w') as fsock:
fsock.write(p_card)
return
card = ''
if args[0] == 'madweight_card':
if not self.mw_card:
logger.warning('Invalid Command: No MadWeight card defined.')
return
args[0] = 'MadWeight_card'
if args[0] == 'shower_card':
if not self.shower_card:
logger.warning('Invalid Command: No Shower card defined.')
return
args[0] = 'shower_card'
if args[0] == "madloop_card":
if not self.has_ml:
logger.warning('Invalid Command: No MadLoopParam card defined.')
return
args[0] = 'MadLoop_card'
if args[0] == "pythia8_card":
if not self.has_PY8:
logger.warning('Invalid Command: No Pythia8 card defined.')
return
args[0] = 'pythia8_card'
if args[0] == 'delphes_card':
if not self.has_delphes:
logger.warning('Invalid Command: No Delphes card defined.')
return
if args[1] == 'atlas':
logger.info("set default ATLAS configuration for Delphes", '$MG:color:BLACK')
files.cp(pjoin(self.me_dir,'Cards', 'delphes_card_ATLAS.dat'),
pjoin(self.me_dir,'Cards', 'delphes_card.dat'))
return
elif args[1] == 'cms':
logger.info("set default CMS configuration for Delphes",'$MG:color:BLACK')
files.cp(pjoin(self.me_dir,'Cards', 'delphes_card_CMS.dat'),
pjoin(self.me_dir,'Cards', 'delphes_card.dat'))
return
if args[0] in ['run_card', 'param_card', 'MadWeight_card', 'shower_card',
'delphes_card','madanalysis5_hadron_card','madanalysis5_parton_card']:
if args[1] == 'default':
logger.info('replace %s by the default card' % args[0],'$MG:color:BLACK')
files.cp(self.paths['%s_default' %args[0][:-5]], self.paths[args[0][:-5]])
if args[0] == 'param_card':
self.param_card = check_param_card.ParamCard(self.paths['param'])
elif args[0] == 'run_card':
self.run_card = banner_mod.RunCard(self.paths['run'])
elif args[0] == 'shower_card':
self.shower_card = shower_card_mod.ShowerCard(self.paths['shower'])
return
else:
card = args[0]
start=1
if len(args) < 3:
logger.warning('Invalid set command: %s (not enough arguments)' % line)
return
elif args[0] in ['MadLoop_card']:
if args[1] == 'default':
logger.info('replace MadLoopParams.dat by the default card','$MG:color:BLACK')
self.MLcard = banner_mod.MadLoopParam(self.MLcardDefault)
self.MLcard.write(self.paths['ML'],
commentdefault=True)
return
else:
card = args[0]
start=1
if len(args) < 3:
logger.warning('Invalid set command: %s (not enough arguments)' % line)
return
elif args[0] in ['pythia8_card']:
if args[1] == 'default':
logger.info('replace pythia8_card.dat by the default card','$MG:color:BLACK')
self.PY8Card = banner_mod.PY8Card(self.PY8CardDefault)
self.PY8Card.write(pjoin(self.me_dir,'Cards','pythia8_card.dat'),
pjoin(self.me_dir,'Cards','pythia8_card_default.dat'),
print_only_visible=True)
return
else:
card = args[0]
start=1
if len(args) < 3:
logger.warning('Invalid set command: %s (not enough arguments)' % line)
return
elif args[0] in ['madspin_card']:
if args[1] == 'default':
logger.info('replace madspin_card.dat by the default card','$MG:color:BLACK')
files.cp(self.paths['MS_default'], self.paths['madspin'])
return
else:
logger.warning("""Command set not allowed for modifying the madspin_card.
Check the command \"decay\" instead.""")
return
er() for l in self.run_card.keys()] and card in ['', 'run_card']:
if args[start] not in self.run_set:
args[start] = [l for l in self.run_set if l.lower() == args[start]][0]
if args[start] in self.conflict and card == '':
text = 'Ambiguous name (present in more than one card). Will assume it to be referred to run_card.\n'
text += 'If this is not intended, please reset it in the run_card and specify the relevant card to \n'
text += 'edit, in the format < set card parameter value >'
logger.warning(text)
if args[start+1] == 'default':
default = banner_mod.RunCard(self.paths['run_default'])
if args[start] in default.keys():
self.setR(args[start],default[args[start]])
else:
logger.info('remove information %s from the run_card' % args[start],'$MG:color:BLACK')
del self.run_card[args[start]]
else:
if args[0].startswith('sys_') or \
args[0] in self.run_card.list_parameter or \
args[0] in self.run_card.dict_parameter:
val = ' '.join(args[start+1:])
val = val.split('#')[0]
else:
val = args[start+1]
self.setR(args[start], val)
self.run_card.write(self.paths['run'], self.paths['run_default'])
elif card == 'run_card' and args[start] in ['nocut', 'no_cut']:
logger.info("Going to remove all cuts from the run_card", '$MG:color:BLACK')
self.run_card.remove_all_cut()
self.run_card.write(self.paths['run'], self.paths['run_default'])
'param_card']:
if any(t.startswith('scan') for t in args):
index = [i for i,t in enumerate(args) if t.startswith('scan')][0]
args = args[:index] + [' '.join(args[index:])]
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
text += ' in the format < set card parameter value>'
logger.warning(text)
return
if args[start] == 'width':
args[start] = 'decay'
if args[start+1] in self.pname2block:
all_var = self.pname2block[args[start+1]]
key = None
for bname, lhaid in all_var:
if bname == args[start]:
key = lhaid
break
else:
logger.warning('%s is not part of block "%s" but "%s". please correct.' %
(args[start+1], args[start], bname))
return
else:
try:
key = tuple([int(i) for i in args[start+1:-1]])
except ValueError:
if args[start] == 'decay' and args[start+1:-1] == ['all']:
for key in self.param_card[args[start]].param_dict:
if (args[start], key) in self.restricted_value:
continue
else:
self.setP(args[start], key, args[-1])
self.param_card.write(self.paths['param'])
return
logger.warning('invalid set command %s (failed to identify LHA information)' % line)
return
if key in self.param_card[args[start]].param_dict:
if (args[start], key) in self.restricted_value:
text = "Note that this parameter seems to be ignore by MG.\n"
text += "MG will use instead the expression: %s\n" % \
self.restricted_value[(args[start], key)]
text += "You need to match this expression for external program (such pythia)."
logger.warning(text)
if args[-1].lower() in ['default', 'auto', 'auto@nlo'] or args[-1].startswith('scan'):
self.setP(args[start], key, args[-1])
else:
try:
value = float(args[-1])
except Exception:
logger.warning('Invalid input: Expected number and not \'%s\'' \
% args[-1])
return
self.setP(args[start], key, value)
else:
logger.warning('invalid set command %s' % line)
return
self.param_card.write(self.paths['param'])
elif args[start] in self.pname2block and card in ['','param_card']:
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
text += ' in the format < set card parameter value>'
logger.warning(text)
return
all_var = self.pname2block[args[start]]
for bname, lhaid in all_var:
new_line = 'param_card %s %s %s' % (bname,
' '.join([ str(i) for i in lhaid]), ' '.join(args[start+1:]))
self.do_set(new_line)
if len(all_var) > 1:
logger.warning('This variable correspond to more than one parameter in the param_card.')
for bname, lhaid in all_var:
logger.warning(' %s %s' % (bname, ' '.join([str(i) for i in lhaid])))
logger.warning('all listed variables have been modified')
elif self.has_mw and (args[start] in self.mw_card and args[start] != 'comment') \
and card in ['','MadWeight_card']:
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
text += ' in the format < set card parameter value>'
logger.warning(text)
return
block = args[start]
name = args[start+1]
value = args[start+2:]
self.setM(block, name, value)
self.mw_card.write(self.paths['MadWeight'])
elif self.has_mw and args[start] in self.mw_vars \
and card in ['', 'MadWeight_card']:
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
text += ' in the format < set card parameter value>'
logger.warning(text)
return
block = [b for b, data in self.mw_card.items() if args[start] in data]
if len(block) > 1:
logger.warning('%s is define in more than one block: %s.Please specify.'
% (args[start], ','.join(block)))
return
block = block[0]
name = args[start]
value = args[start+1:]
self.setM(block, name, value)
self.mw_card.write(self.paths['MadWeight'])
elif self.has_mw and args[start].startswith('mw_') and len(args[start:]) == 3\
and card == 'MadWeight_card':
block = args[start]
name = args[start+1]
value = args[start+2]
self.setM(block, name, value)
self.mw_card.write(self.paths['MadWeight'])
rt].lower() in [l.lower() for l in \
self.shower_card.keys()] and card in ['', 'shower_card']:
if args[start] not in self.shower_card:
args[start] = [l for l in self.shower_card if l.lower() == args[start].lower()][0]
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
text += ' in the format < set card parameter value>'
logger.warning(text)
return
if args[start+1].lower() == 'default':
default = shower_card_mod.ShowerCard(self.paths['shower_default'])
if args[start] in default.keys():
self.shower_card.set_param(args[start],default[args[start]], self.paths['shower'])
else:
logger.info('remove information %s from the shower_card' % args[start],'$MG:color:BLACK')
del self.shower_card[args[start]]
elif args[start+1].lower() in ['t','.true.','true']:
self.shower_card.set_param(args[start],'.true.',self.paths['shower'])
elif args[start+1].lower() in ['f','.false.','false']:
self.shower_card.set_param(args[start],'.false.',self.paths['shower'])
elif args[start] in ['analyse', 'extralibs', 'extrapaths', 'includepaths'] or\
args[start].startswith('dm_'):
args = line.split()
args_str = ' '.join(str(a) for a in args[start+1:len(args)])
self.shower_card.set_param(args[start],args_str,pjoin(self.me_dir,'Cards','shower_card.dat'))
else:
args_str = ' '.join(str(a) for a in args[start+1:len(args)])
self.shower_card.set_param(args[start],args_str,self.paths['shower'])
elif self.has_ml and args[start] in self.ml_vars \
and card in ['', 'MadLoop_card']:
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
logger.warning(text)
return
if args[start+1] == 'default':
value = self.MLcardDefault[args[start]]
default = True
else:
value = args[start+1]
default = False
self.setML(args[start], value, default=default)
self.MLcard.write(self.paths['ML'],
commentdefault=True)
elif self.has_PY8 and (card == 'pythia8_card' or (card == '' and \
args[start] in self.PY8Card)):
if args[start] in self.conflict and card == '':
text = 'ambiguous name (present in more than one card). Please specify which card to edit'
logger.warning(text)
return
if args[start+1] == 'default':
value = self.PY8CardDefault[args[start]]
default = True
else:
value = ' '.join(args[start+1:])
default = False
self.setPY8(args[start], value, default=default)
self.PY8Card.write(pjoin(self.me_dir,'Cards','pythia8_card.dat'),
pjoin(self.me_dir,'Cards','pythia8_card_default.dat'),
print_only_visible=True)
else:
logger.warning('invalid set command %s ' % line)
arg = args[start].lower()
if self.has_PY8:
close_opts = [name for name in self.PY8Card if name.lower().startswith(arg[:3]) or arg in name.lower()]
if close_opts:
logger.info('Did you mean one of the following PY8 options:\n%s' % '\t'.join(close_opts))
if self.run_card:
close_opts = [name for name in self.run_card if name.lower().startswith(arg[:3]) or arg in name.lower()]
if close_opts:
logger.info('Did you mean one of the following run_card options:\n%s' % '\t'.join(close_opts))
return
def setM(self, block, name, value):
if isinstance(value, list) and len(value) == 1:
value = value[0]
if block not in self.mw_card:
logger.warning('block %s was not present in the current MadWeight card. We are adding it' % block)
self.mw_card[block] = {}
elif name not in self.mw_card[block]:
logger.info('name %s was not present in the block %s for the current MadWeight card. We are adding it' % (name,block),'$MG:color:BLACK')
if value == 'default':
import madgraph.madweight.Cards as mwcards
mw_default = mwcards.Card(self.paths['MadWeight_default'])
try:
value = mw_default[block][name]
except KeyError:
logger.info('removing id "%s" from Block "%s" '% (name, block),'$MG:color:BLACK')
if name in self.mw_card[block]:
del self.mw_card[block][name]
return
if value:
logger.info('modify madweight_card information BLOCK "%s" with id "%s" set to %s',
block, name, value, '$MG:color:BLACK')
else:
logger.warning("Invalid command: No value. To set default value. Use \"default\" as value")
return
self.mw_card[block][name] = value
def setR(self, name, value):
logger.info('modify parameter %s of the run_card.dat to %s' % (name, value),'$MG:color:BLACK')
self.run_card.set(name, value, user=True)
def setML(self, name, value, default=False):
try:
self.MLcard.set(name, value, user=True)
except Exception, error:
logger.warning("Fail to change parameter. Please Retry. Reason: %s." % error)
return
logger.info('modify parameter %s of the MadLoopParam.dat to %s' % (name, value),'$MG:color:BLACK')
if default and name.lower() in self.MLcard.user_set:
self.MLcard.user_set.remove(name.lower())
def setPY8(self, name, value, default=False):
try:
self.PY8Card.userSet(name, value)
except Exception, error:
logger.warning("Fail to change parameter. Please Retry. Reason: %s." % error)
return
logger.info('modify parameter %s of the pythia8_card.dat to %s' % (name, value), '$MG:color:BLACK')
if default and name.lower() in self.PY8Card.user_set:
self.PY8Card.user_set.remove(name.lower())
def setP(self, block, lhaid, value):
if isinstance(value, str):
value = value.lower()
if value == 'default':
default = check_param_card.ParamCard(self.paths['param_default'])
value = default[block].param_dict[lhaid].value
elif value in ['auto', 'auto@nlo']:
if 'nlo' in value:
value = 'Auto@NLO'
else:
value = 'Auto'
if block != 'decay':
logger.warning('Invalid input: \'Auto\' value only valid for DECAY')
return
elif value.startswith('scan'):
if ':' not in value:
logger.warning('Invalid input: \'scan\' mode requires a \':\' before the definition.')
return
tag = value.split(':')[0]
tag = tag[4:].strip()
if tag and not tag.isdigit():
logger.warning('Invalid input: scan tag need to be integer and not "%s"' % tag)
return
pass
else:
try:
value = float(value)
except ValueError:
logger.warning('Invalid input: \'%s\' not valid intput.'% value)
logger.info('modify param_card information BLOCK %s with id %s set to %s' %\
(block, lhaid, value), '$MG:color:BLACK')
self.param_card[block].param_dict[lhaid].value = value
def check_card_consistency(self):
"""This is run on quitting the class. Apply here all the self-consistency
rule that you want. Do the modification via the set command."""
if 'reweight' in self.allow_arg and 'run' in self.allow_arg and \
isinstance(self.run_card,banner_mod.RunCardNLO) and \
not self.run_card['store_rwgt_info']:
re_pattern = re.compile(r'''^\s*change\s*mode\s* (LO\+NLO|LO|NLO|NLO_tree)\s*(?:#|$)''', re.M+re.I)
text = open(self.paths['reweight']).read()
options = re_pattern.findall(text)
if any(o in ['NLO', 'LO+NLO'] for o in options):
logger.info('NLO reweighting is on ON. Automatically set store_rwgt_info to True', '$MG:color:BLACK' )
self.do_set('run_card store_rwgt_info True')
if 'run' in self.allow_arg and \
self.run_card['systematics_program'] == 'systematics' and \
isinstance(self.run_card,banner_mod.RunCardNLO) and \
not self.run_card['store_rwgt_info']:
logger.warning('To be able to run systematics program, we set store_rwgt_info to True')
self.do_set('run_card store_rwgt_info True')
if 'pythia_card.dat' in self.cards:
if self.run_card['event_norm'] != 'sum':
logger.info('Pythia6 needs a specific normalisation of the events. We will change it accordingly.', '$MG:color:BLACK' )
self.do_set('run_card event_norm sum')
elif 'pythia8_card.dat' in self.cards:
if self.run_card['event_norm'] == 'sum':
logger.info('Pythia8 needs a specific normalisation of the events. We will change it accordingly.', '$MG:color:BLACK' )
self.do_set('run_card event_norm average')
if self.has_shower and isinstance(self.run_card, banner_mod.RunCardNLO):
modify_extralibs, modify_extrapaths = False,False
extralibs = self.shower_card['extralibs'].split()
extrapaths = self.shower_card['extrapaths'].split()
if self.run_card['parton_shower'] in ['PYTHIA8', 'HERWIGPP', 'HW7']:
if 'stdhep' in self.shower_card['extralibs']:
extralibs.remove('stdhep')
modify_extralibs = True
if 'Fmcfio' in self.shower_card['extralibs']:
extralibs.remove('Fmcfio')
modify_extralibs = True
if self.run_card['parton_shower'] == 'PYTHIA8':
if not self.mother_interface.options['pythia8_path']:
raise self.mother_interface.InvalidCmd, 'Pythia8 is not correctly specified to MadGraph5_aMC@NLO'
executable = pjoin(self.mother_interface.options['pythia8_path'], 'bin', 'pythia8-config')
if not os.path.exists(executable):
raise self.mother.InvalidCmd, 'Pythia8 is not correctly specified to MadGraph5_aMC@NLO'
libs , paths = [], []
p = misc.subprocess.Popen([executable, '--libs'], stdout=subprocess.PIPE)
stdout, _ = p. communicate()
libs = [x[2:] for x in stdout.split() if x.startswith('-l') or paths.append(x[2:])]
p = misc.subprocess.Popen([executable, '--config'], stdout=subprocess.PIPE)
stdout, _ = p. communicate()
for lib in ['-ldl','-lstdc++','-lc++']:
if lib in stdout:
libs.append(lib[2:])
supports_HEPMCHACK = '-DHEPMC2HACK' in stdout
for l in libs:
if l not in extralibs:
modify_extralibs = True
extralibs.append(l)
for L in paths:
if L not in extrapaths:
modify_extrapaths = True
extrapaths.append(L)
if modify_extralibs:
if extralibs:
self.do_set('shower_card extralibs %s ' % ' '.join(extralibs))
else:
self.do_set('shower_card extralibs None ')
if modify_extrapaths:
if extrapaths:
self.do_set('shower_card extrapaths %s ' % ' '.join(extrapaths))
else:
self.do_set('shower_card extrapaths None ')
def reask(self, *args, **opt):
cmd.OneLinePathCompletion.reask(self,*args, **opt)
if self.has_mw and not os.path.exists(pjoin(self.me_dir,'Cards','transfer_card.dat')):
logger.warning('No transfer function currently define. Please use the change_tf command to define one.')
fail_due_to_format = 0
def postcmd(self, stop, line):
ending_question = cmd.OneLinePathCompletion.postcmd(self,stop,line)
if ending_question:
self.check_card_consistency()
try:
self.do_update('dependent', timer=20)
except MadGraph5Error, error:
if 'Missing block:' in str(error):
self.fail_due_to_format +=1
if self.fail_due_to_format == 10:
missing, unknow = str(error).split('\n')[-2:]
logger.warning("Invalid param_card:\n%s\n%s\n" % (missing, unknow))
logger.info("Type \"update missing\" to use default value.\n ", '$MG:color:BLACK')
self.value = False
return self.reask(True)
else:
raise
return ending_question
def do_update(self, line, timer=0):
""" syntax: update dependent: Change the mass/width of particles which are not free parameter for the model.
update missing: add to the current param_card missing blocks/parameters.
update to_slha1: pass SLHA2 card to SLHA1 convention. (beta)
update to_slha2: pass SLHA1 card to SLHA2 convention. (beta)"""
args = self.split_arg(line)
if len(args)==0:
logger.warning('miss an argument (dependent or missing). Please retry')
return
if args[0] == 'dependent':
if not self.mother_interface:
logger.warning('Failed to update dependent parameter. This might create trouble for external program (like MadSpin/shower/...)')
pattern_width = re.compile(r'''decay\s+(\+?\-?\d+)\s+auto(@NLO|)''',re.I)
pattern_scan = re.compile(r'''^(decay)?[\s\d]*scan''', re.I+re.M)
param_text= open(self.paths['param']).read()
if pattern_scan.search(param_text):
return
elif pattern_width.search(param_text):
self.do_compute_widths('')
self.param_card = check_param_card.ParamCard(self.paths['param'])
self.update_dependent(self.mother_interface, self.me_dir, self.param_card,
self.paths['param'], timer)
elif args[0] == 'missing':
self.update_missing()
return
elif args[0] == 'to_slha2':
try:
check_param_card.convert_to_mg5card(self.paths['param'])
logger.info('card updated')
except Exception, error:
logger.warning('failed to update to slha2 due to %s' % error)
self.param_card = check_param_card.ParamCard(self.paths['param'])
elif args[0] == 'to_slha1':
try:
check_param_card.convert_to_slha1(self.paths['param'])
logger.info('card updated')
except Exception, error:
logger.warning('failed to update to slha1 due to %s' % error)
self.param_card = check_param_card.ParamCard(self.paths['param'])
@staticmethod
def update_dependent(mecmd, me_dir, param_card, path ,timer=0):
"""static method which can also be called from outside the class
usefull in presence of scan.
return if the param_card was updated or not
"""
logger.info('Update the dependent parameter of the param_card.dat')
modify = True
class TimeOutError(Exception):
pass
def handle_alarm(signum, frame):
raise TimeOutError
signal.signal(signal.SIGALRM, handle_alarm)
if timer:
signal.alarm(timer)
log_level=30
else:
log_level=20
try:
model = mecmd.get_model()
signal.alarm(0)
except TimeOutError:
logger.warning('The model takes too long to load so we bypass the updating of dependent parameter.\n'+\
'This might create trouble for external program (like MadSpin/shower/...)\n'+\
'The update can be forced without timer by typing \'update dependent\' at the time of the card edition')
modify =False
except Exception,error:
logger.debug(str(error))
logger.warning('Failed to update dependent parameter. This might create trouble for external program (like MadSpin/shower/...)')
signal.alarm(0)
else:
restrict_card = pjoin(me_dir,'Source','MODEL','param_card_rule.dat')
if not os.path.exists(restrict_card):
restrict_card = None
if model:
modify = param_card.update_dependent(model, restrict_card, log_level)
if modify and path:
param_card.write(path)
else:
logger.warning('missing MG5aMC code. Fail to update dependent parameter. This might create trouble for program like MadSpin/shower/...')
if log_level==20:
logger.info('param_card up to date.')
return modify
def update_missing(self):
def check_block(self, blockname):
add_entry = 0
if blockname.lower() not in self.param_card_default:
logger.info('unknow block %s: block will be ignored', blockname)
return add_entry
block = self.param_card_default[blockname]
for key in block.keys():
if key not in input_in_block:
param = block.get(key)
if blockname != 'decay':
text.append('\t%s\t%s # %s\n' % (' \t'.join([`i` for i in param.lhacode]), param.value, param.comment))
else:
text.append('DECAY \t%s\t%s # %s\n' % (' \t'.join([`i` for i in param.lhacode]), param.value, param.comment))
add_entry += 1
if add_entry:
text.append('\n')
if add_entry:
logger.info("Adding %s parameter(s) to block %s", add_entry, blockname)
return add_entry
current_block = ''
input_in_block = set()
defined_blocks = set()
decay = set()
text = []
add_entry = 0
for line in open(self.paths['param']):
new_block = re.findall(r'^\s*(block|decay)\s*(\w*)', line, re.I)
if new_block:
new_block = new_block[0]
defined_blocks.add(new_block[1].lower())
if current_block:
add_entry += check_block(self, current_block)
current_block= new_block[1]
input_in_block = set()
if new_block[0].lower() == 'decay':
decay.add((int(new_block[1]),))
current_block = ''
if new_block[1].lower() == 'qnumbers':
current_block = ''
text.append(line)
if not current_block:
continue
line = line.split('#',1)[0]
split = line.split()
if not split:
continue
else:
try:
lhacode = [int(i) for i in split[:-1]]
except:
continue
input_in_block.add(tuple(lhacode))
if current_block:
add_entry += check_block(self, current_block)
for block in self.param_card_default:
if block.startswith(('qnumbers', 'decay')):
continue
if block not in defined_blocks:
nb_entry = len(self.param_card_default[block])
logger.info("Block %s was missing. Adding the %s associated parameter(s)", block,nb_entry)
add_entry += nb_entry
text.append(str(self.param_card_default[block]))
input_in_block = decay
add_entry += check_block(self, 'decay')
if add_entry:
logger.info('write new param_card with %s new parameter(s).', add_entry, '$MG:color:BLACK')
open(self.paths['param'],'w').write(''.join(text))
self.reload_card(self.paths['param'])
else:
logger.info('No missing parameter detected.', '$MG:color:BLACK')
def check_answer_consistency(self):
"""function called if the code reads a file"""
self.check_card_consistency()
self.do_update('dependent', timer=20)
def help_set(self):
'''help message for set'''
logger.info('********************* HELP SET ***************************')
logger.info("syntax: set [run_card|param_card|...] NAME [VALUE|default]")
logger.info("syntax: set [param_card] BLOCK ID(s) [VALUE|default]")
logger.info('')
logger.info('-- Edit the param_card/run_card/... and replace the value of the')
logger.info(' parameter by the value VALUE.')
logger.info(' ')
logger.info('-- Example:')
logger.info(' set run_card ebeam1 4000')
logger.info(' set ebeam2 4000')
logger.info(' set lpp1 0')
logger.info(' set ptj default')
logger.info('')
logger.info(' set param_card mass 6 175')
logger.info(' set mass 25 125.3')
logger.info(' set mass mh 125')
logger.info(' set mh 125')
logger.info(' set decay 25 0.004')
logger.info(' set decay wh 0.004')
logger.info(' set vmix 2 1 2.326612e-01')
logger.info('')
logger.info(' set param_card default #return all parameter to default')
logger.info(' set run_card default')
logger.info('********************* HELP SET ***************************')
def default(self, line):
"""Default action if line is not recognized"""
line = line.strip()
args = line.split()
if line == '' and self.default_value is not None:
self.value = self.default_value
elif hasattr(self, 'do_%s' % args[0]):
self.do_set(' '.join(args[1:]))
elif os.path.isfile(line):
self.copy_file(line)
self.value = 'repeat'
elif self.me_dir and os.path.exists(pjoin(self.me_dir, line)):
self.copy_file(pjoin(self.me_dir,line))
self.value = 'repeat'
elif line.strip() != '0' and line.strip() != 'done' and \
str(line) != 'EOF' and line.strip() in self.allow_arg:
self.open_file(line)
self.value = 'repeat'
elif line.strip().startswith(('http:','www')):
self.value = 'repeat'
import tempfile
fsock, path = tempfile.mkstemp()
try:
text = urllib.urlopen(line.strip())
except Exception:
logger.error('fail to load the file')
else:
for line in text:
os.write(fsock, line)
os.close(fsock)
self.copy_file(path)
os.remove(path)
else:
self.value = line
return line
def do_decay(self, line):
"""edit the madspin_card to define the decay of the associate particle"""
signal.alarm(0)
path = self.paths['madspin']
if 'madspin_card.dat' not in self.cards or not os.path.exists(path):
logger.warning("Command decay not valid. Since MadSpin is not available.")
return
if ">" not in line:
logger.warning("invalid command for decay. Line ignored")
return
if "-add" in line:
particle = line.split('>')[0].strip()
text = open(path).read()
line = line.replace('--add', '').replace('-add','')
logger.info("change madspin_card to add one decay to %s: %s" %(particle, line.strip()), '$MG:color:BLACK')
if 'launch' in text:
text = text.replace('launch', "\ndecay %s\nlaunch\n" % line,1)
else:
text += '\ndecay %s\n launch \n' % line
else:
particle = line.split('>')[0].strip()
logger.info("change madspin_card to define the decay of %s: %s" %(particle, line.strip()), '$MG:color:BLACK')
particle = particle.replace('+','\+').replace('-','\-')
decay_pattern = re.compile(r"^\s*decay\s+%s\s*>[\s\w+-~]*?$" % particle, re.I+re.M)
text= open(path).read()
text = decay_pattern.sub('', text)
if 'launch' in text:
text = text.replace('launch', "\ndecay %s\nlaunch\n" % line,1)
else:
text += '\ndecay %s\n launch \n' % line
with open(path,'w') as fsock:
fsock.write(text)
self.reload_card(path)
def do_compute_widths(self, line):
signal.alarm(0)
path = self.paths['param']
pattern = re.compile(r'''decay\s+(\+?\-?\d+)\s+auto(@NLO|)''',re.I)
text = open(path).read()
pdg_info = pattern.findall(text)
has_nlo = any("@nlo"==nlo.lower() for _, nlo in pdg_info)
pdg = [p for p,_ in pdg_info]
line = '%s %s' % (line, ' '.join(pdg))
if not '--path' in line:
line += ' --path=%s' % path
if has_nlo:
line += ' --nlo'
try:
return self.mother_interface.do_compute_widths(line)
except InvalidCmd, error:
logger.error("Invalid command: %s " % error)
def help_compute_widths(self):
signal.alarm(0)
return self.mother_interface.help_compute_widths()
def help_decay(self):
"""help for command decay which modifies MadSpin_card"""
signal.alarm(0)
print '--syntax: decay PROC [--add]'
print ' '
print ' modify the madspin_card to modify the decay of the associate particle.'
print ' and define it to PROC.'
print ' if --add is present, just add a new decay for the associate particle.'
def complete_compute_widths(self, *args, **opts):
prev_timer = signal.alarm(0)
if prev_timer:
nb_back = len(line)
self.stdout.write('\b'*nb_back + '[timer stopped]\n')
self.stdout.write(line)
self.stdout.flush()
return self.mother_interface.complete_compute_widths(*args,**opts)
def help_add(self):
"""help for add command"""
logger.info('********************* HELP ADD ***************************')
logger.info( '-- syntax: add pythia8_card NAME VALUE')
logger.info( " add a definition of name in the pythia8_card with the given value")
logger.info( " Do not work for the param_card" )
logger.info( '-- syntax: add filename [OPTION] line')
logger.info( ' add the given LINE to the end of the associate file (all file supportedd).')
logger.info( ' OPTION parameter allows to change the position where to write in the file')
logger.info( ' --after_line=banner : write the line at the end of the banner')
logger.info( ' --line_position=X : insert the line before line X (starts at 0)')
logger.info( ' --after_line="<regular-expression>" write the line after the first line matching the regular expression')
logger.info( ' --before_line="<regular-expression>" write the line before the first line matching the regular expression')
logger.info(' --clean remove all previously existing line in the file')
logger.info( ' example: change reweight --after_line="^\s*change mode" change model heft')
logger.info('********************* HELP ADD ***************************')
def complete_add(self, text, line, begidx, endidx, formatting=True):
""" auto-completion for add command"""
prev_timer = signal.alarm(0)
if prev_timer:
nb_back = len(line)
self.stdout.write('\b'*nb_back + '[timer stopped]\n')
self.stdout.write(line)
self.stdout.flush()
split = line[:begidx].split()
if len(split)==1:
possibilities = {}
cards = [c.rsplit('.',1)[0] for c in self.cards]
possibilities['category of parameter (optional)'] = \
self.list_completion(text, cards)
elif len(split) == 2:
possibilities = {}
options = ['--line_position=','--after_line=banner', '--after_line="','--before_line="']
possibilities['category of parameter (optional)'] = \
self.list_completion(text, options, line)
else:
return
return self.deal_multiple_categories(possibilities, formatting)
def do_add(self, line):
""" syntax: add filename NAME VALUE
syntax: add filename LINE"""
args = self.split_arg(line)
if len(args) == 3 and args[0] in ['pythia8_card', 'pythia8_card.dat'] and self.has_PY8:
name= args[1]
value = args[2]
self.PY8Card.userSet(name, value)
self.PY8Card.write(pjoin(self.me_dir,'Cards','pythia8_card.dat'),
pjoin(self.me_dir,'Cards','pythia8_card_default.dat'),
print_only_visible=True)
logger.info("add in the pythia8_card the parameter \"%s\" with value \"%s\"" % (name, value), '$MG:color:BLACK')
elif len(args) > 0:
if args[0] in self.cards:
card = args[0]
elif "%s.dat" % args[0] in self.cards:
card = "%s.dat" % args[0]
elif "%s_card.dat" % args[0] in self.cards:
card = "%s_card.dat" % args[0]
elif self.has_ml and args[0].lower() == "madloop":
card = "MadLoopParams.dat"
else:
logger.error("unknow card %s. Please retry." % args[0])
return
if args[1] == '--clean':
ff = open(pjoin(self.me_dir,'Cards',card),'w')
ff.write("# %s \n" % card)
ff.write("%s \n" % line.split(None,2)[2])
ff.close()
logger.info("writing the line in %s (empty file) the line: \"%s\"" %(card, line.split(None,2)[2] ),'$MG:color:BLACK')
elif args[1].startswith('--line_position='):
text = open(pjoin(self.me_dir,'Cards',card)).read()
split = text.split('\n')
pos = int(args[1].split('=',1)[1])
newline = line.split(None,2)[2]
split.insert(pos, newline)
ff = open(pjoin(self.me_dir,'Cards',card),'w')
ff.write('\n'.join(split))
logger.info("writting at line %d of the file %s the line: \"%s\"" %(pos, card, line.split(None,1)[1] ),'$MG:color:BLACK')
elif args[1].startswith('--after_line=banner'):
text = open(pjoin(self.me_dir,'Cards',card)).read()
split = text.split('\n')
for posline,l in enumerate(split):
if not l.startswith('#'):
break
split.insert(posline, line.split(None,2)[2])
ff = open(pjoin(self.me_dir,'Cards',card),'w')
ff.write('\n'.join(split))
logger.info("writting at line %d of the file %s the line: \"%s\"" %(posline, card, line.split(None,1)[1] ),'$MG:color:BLACK')
elif args[1].startswith('--before_line='):
text = open(pjoin(self.me_dir,'Cards',card)).read()
split = text.split('\n')
search_pattern=r'''before_line=(?P<quote>["'])(?:(?=(\\?))\2.)*?\1'''
pattern = re.search(search_pattern, line).group()[13:-1]
for posline,l in enumerate(split):
if re.search(pattern, l):
break
else:
raise Exception, 'invalid regular expression: not found in file'
split.insert(posline, re.split(search_pattern,line)[-1])
ff = open(pjoin(self.me_dir,'Cards',card),'w')
ff.write('\n'.join(split))
logger.info("writting at line %d of the file %s the line: \"%s\"" %(posline, card, line.split(None,1)[1] ),'$MG:color:BLACK')
elif args[1].startswith('--after_line='):
# catch the line/regular expression and write after that line
text = open(pjoin(self.me_dir,'Cards',card)).read()
split = text.split('\n')
search_pattern = r'''after_line=(?P<quote>["'])(?:(?=(\\?))\2.)*?\1'''
pattern = re.search(search_pattern, line).group()[12:-1]
for posline,l in enumerate(split):
if re.search(pattern, l):
break
else:
posline=len(split)
split.insert(posline+1, re.split(search_pattern,line)[-1])
ff = open(pjoin(self.me_dir,'Cards',card),'w')
ff.write('\n'.join(split))
logger.info("writting at line %d of the file %s the line: \"%s\"" %(posline, card, line.split(None,1)[1] ),'$MG:color:BLACK')
else:
ff = open(pjoin(self.me_dir,'Cards',card),'a')
ff.write("%s \n" % line.split(None,1)[1])
ff.close()
logger.info("adding at the end of the file %s the line: \"%s\"" %(card, line.split(None,1)[1] ),'$MG:color:BLACK')
self.reload_card(pjoin(self.me_dir,'Cards',card))
def help_asperge(self):
"""Help associated to the asperge command"""
signal.alarm(0)
print '-- syntax: asperge [options]'
print ' Call ASperGe to diagonalize all mass matrices in the model.'
print ' This works only if the ASperGE module is part of the UFO model (a subdirectory).'
print ' If you specify some names after the command (i.e. asperge m1 m2) then ASperGe will only'
print ' diagonalize the associate mass matrices (here m1 and m2).'
def complete_asperge(self, text, line, begidx, endidx, formatting=True):
prev_timer = signal.alarm(0)
if prev_timer:
nb_back = len(line)
self.stdout.write('\b'*nb_back + '[timer stopped]\n')
self.stdout.write(line)
self.stdout.flush()
blockname = self.pname2block.keys()
wrong = ['decay', 'mass', 'sminput']
valid = [k for k in blockname if 'mix' in k]
potential = [k for k in blockname if k not in valid+wrong]
output = {'Mixing matrices': self.list_completion(text, valid, line),
'Other potential valid input': self.list_completion(text, potential, line)}
return self.deal_multiple_categories(output, formatting)
def do_asperge(self, line):
"""Running ASperGe"""
signal.alarm(0)
path = pjoin(self.me_dir,'bin','internal','ufomodel','ASperGE')
if not os.path.exists(path):
logger.error('ASperge has not been detected in the current model, therefore it will not be run.')
return
elif not os.path.exists(pjoin(path,'ASperGe')):
logger.info('ASperGe has been detected but is not compiled. Running the compilation now.')
try:
misc.compile(cwd=path,shell=True)
except MadGraph5Error, error:
logger.error('''ASperGe failed to compile. Note that gsl is needed
for this compilation to go trough. More information on how to install this package on
http://www.gnu.org/software/gsl/
Full compilation log is available at %s''' % pjoin(self.me_dir, 'ASperge_compilation.log'))
open(pjoin(self.me_dir, 'ASperge_compilation.log'),'w').write(str(error))
return
opts = line.split()
card = self.paths['param']
logger.info('running ASperGE')
returncode = misc.call([pjoin(path,'ASperGe'), card, '%s.new' % card] + opts)
if returncode:
logger.error('ASperGE fails with status %s' % returncode)
else:
logger.info('AsPerGe creates the file succesfully')
files.mv(card, '%s.beforeasperge' % card)
files.mv('%s.new' % card, card)
def copy_file(self, path):
"""detect the type of the file and overwritte the current file"""
if path.endswith('.lhco'):
self.do_set('mw_run inputfile %s' % os.path.relpath(path, self.mother_interface.me_dir))
return
elif path.endswith('.lhco.gz'):
self.do_set('mw_run inputfile %s' % os.path.relpath(path, self.mother_interface.me_dir))
return
else:
card_name = CommonRunCmd.detect_card_type(path)
if card_name == 'unknown':
logger.warning('Fail to determine the type of the file. Not copied')
if card_name != 'banner':
logger.info('copy %s as %s' % (path, card_name))
files.cp(path, self.paths[card_name.split('_',1)[0]])
self.reload_card(self.paths[card_name.split('_',1)[0]])
elif card_name == 'banner':
banner_mod.split_banner(path, self.mother_interface.me_dir, proc_card=False)
logger.info('Splitting the banner in it\'s component')
if not self.mode == 'auto':
self.mother_interface.keep_cards(self.cards)
for card_name in self.cards:
self.reload_card(pjoin(self.me_dir, 'Cards', card_name))
def open_file(self, answer):
"""open the file"""
try:
me_dir = self.mother_interface.me_dir
except:
me_dir = None
if answer.isdigit():
if answer == '9':
answer = 'plot'
else:
answer = self.cards[int(answer)-1]
if 'madweight' in answer:
answer = answer.replace('madweight', 'MadWeight')
elif 'MadLoopParams' in answer:
answer = self.paths['ML']
elif 'pythia8_card' in answer:
answer = self.paths['pythia8']
if os.path.exists(answer):
path = answer
else:
if not '.dat' in answer and not '.lhco' in answer:
if answer != 'trigger':
path = self.paths[answer]
else:
path = self.paths['delphes']
elif not '.lhco' in answer:
if '_' in answer:
path = self.paths['_'.join(answer.split('_')[:-1])]
else:
path = pjoin(me_dir, 'Cards', answer)
else:
path = pjoin(me_dir, self.mw_card['mw_run']['inputfile'])
if not os.path.exists(path):
logger.info('Path in MW_card not existing')
path = pjoin(me_dir, 'Events', answer)
#security
path = path.replace('_card_card','_card')
try:
self.mother_interface.exec_cmd('open %s' % path)
except InvalidCmd, error:
if str(error) != 'No default path for this file':
raise
if answer == 'transfer_card.dat':
logger.warning('You have to specify a transfer function first!')
elif answer == 'input.lhco':
path = pjoin(me_dir,'Events', 'input.lhco')
ff = open(path,'w')
ff.write('''No LHCO information imported at current time.
To import a lhco file: Close this file and type the path of your file.
You can also copy/paste, your event file here.''')
ff.close()
self.open_file(path)
else:
raise
self.reload_card(path)
def reload_card(self, path):
"""reload object to have it in sync"""
if path == self.paths['param']:
try:
self.param_card = check_param_card.ParamCard(path)
except (check_param_card.InvalidParamCard, ValueError) as e:
logger.error('Current param_card is not valid. We are going to use the default one.')
logger.error('problem detected: %s' % e)
logger.error('Please re-open the file and fix the problem.')
logger.warning('using the \'set\' command without opening the file will discard all your manual change')
elif path == self.paths['run']:
self.run_card = banner_mod.RunCard(path)
elif path == self.paths['shower']:
self.shower_card = shower_card_mod.ShowerCard(path)
elif path == self.paths['ML']:
self.MLcard = banner_mod.MadLoopParam(path)
elif path == self.paths['pythia8']:
# Use the read function so that modified/new parameters are correctly
# set as 'user_set'
if not self.PY8Card:
self.PY8Card = banner_mod.PY8Card(self.paths['pythia8_default'])
self.PY8Card.read(self.paths['pythia8'], setter='user')
self.py8_vars = [k.lower() for k in self.PY8Card.keys()]
elif path == self.paths['MadWeight']:
try:
import madgraph.madweight.Cards as mwcards
except:
import internal.madweight.Cards as mwcards
self.mw_card = mwcards.Card(path)
else:
logger.debug('not keep in sync: %s', path)
return path
class EditParamCard(AskforEditCard):
"""a dedicated module for the param"""
special_shortcut ={}
def __init__(self, question, card=[], mode='auto', *args, **opt):
self.load_default()
cmd.OneLinePathCompletion.__init__(self, question, *args, **opt)
if os.path.isfile(card[0]):
self.param_card = check_param_card.ParamCard(card[0])
self.paths['param'] = card[0]
if os.path.isfile(card[0].replace('.dat', '_default.dat')):
self.paths['param_default'] = card[0].replace('.dat', '_default.dat')
else:
self.paths['param_default'] = card[0]
else:
raise Exception, 'path %s do not exists' % card[0]
self.pname2block, self.restricted_value = self.param_card.analyze_param_card()
self.cards=['param']
def do_asperge(self, *args, **opts):
"Not available"
logger.warning("asperge not available in this mode")
| false | true |
f7faf4388477d5b39cad4364e1f806ea9360bb66 | 10,802 | py | Python | modin/engines/base/io/text/csv_reader.py | reshamas/modin | b01f91fb3a628ce374aa830964d094480f73afca | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | modin/engines/base/io/text/csv_reader.py | reshamas/modin | b01f91fb3a628ce374aa830964d094480f73afca | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | modin/engines/base/io/text/csv_reader.py | reshamas/modin | b01f91fb3a628ce374aa830964d094480f73afca | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
from modin.engines.base.io.text.text_file_reader import TextFileReader
from modin.data_management.utils import compute_chunksize
from pandas.io.parsers import _validate_usecols_arg
import pandas
import csv
import sys
class CSVReader(TextFileReader):
@classmethod
def _read(cls, filepath_or_buffer, **kwargs):
if isinstance(filepath_or_buffer, str):
if not cls.file_exists(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
filepath_or_buffer = cls.get_path(filepath_or_buffer)
elif not cls.pathlib_or_pypath(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
compression_type = cls.infer_compression(
filepath_or_buffer, kwargs.get("compression")
)
if compression_type is not None:
if (
compression_type == "gzip"
or compression_type == "bz2"
or compression_type == "xz"
):
kwargs["compression"] = compression_type
elif (
compression_type == "zip"
and sys.version_info[0] == 3
and sys.version_info[1] >= 7
):
# need python3.7 to .seek and .tell ZipExtFile
kwargs["compression"] = compression_type
else:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
chunksize = kwargs.get("chunksize")
if chunksize is not None:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
skiprows = kwargs.get("skiprows")
if skiprows is not None and not isinstance(skiprows, int):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
nrows = kwargs.pop("nrows", None)
names = kwargs.get("names", None)
index_col = kwargs.get("index_col", None)
usecols = kwargs.get("usecols", None)
if names is None:
# For the sake of the empty df, we assume no `index_col` to get the correct
# column names before we build the index. Because we pass `names` in, this
# step has to happen without removing the `index_col` otherwise it will not
# be assigned correctly
names = pandas.read_csv(
filepath_or_buffer,
**dict(kwargs, usecols=None, nrows=0, skipfooter=0, index_col=None),
).columns
elif index_col is None and not usecols:
# When names is set to some list that is smaller than the number of columns
# in the file, the first columns are built as a hierarchical index.
empty_pd_df = pandas.read_csv(filepath_or_buffer, nrows=0)
num_cols = len(empty_pd_df.columns)
if num_cols > len(names):
index_col = list(range(num_cols - len(names)))
if len(index_col) == 1:
index_col = index_col[0]
kwargs["index_col"] = index_col
empty_pd_df = pandas.read_csv(
filepath_or_buffer, **dict(kwargs, nrows=0, skipfooter=0)
)
column_names = empty_pd_df.columns
skipfooter = kwargs.get("skipfooter", None)
skiprows = kwargs.pop("skiprows", None)
usecols_md = _validate_usecols_arg(usecols)
if usecols is not None and usecols_md[1] != "integer":
del kwargs["usecols"]
all_cols = pandas.read_csv(
cls.file_open(filepath_or_buffer, "rb"),
**dict(kwargs, nrows=0, skipfooter=0),
).columns
usecols = all_cols.get_indexer_for(list(usecols_md[0]))
parse_dates = kwargs.pop("parse_dates", False)
partition_kwargs = dict(
kwargs,
header=None,
names=names,
skipfooter=0,
skiprows=None,
parse_dates=parse_dates,
usecols=usecols,
)
encoding = kwargs.get("encoding", None)
quotechar = kwargs.get("quotechar", '"').encode(
encoding if encoding is not None else "UTF-8"
)
is_quoting = kwargs.get("quoting", "") != csv.QUOTE_NONE
with cls.file_open(filepath_or_buffer, "rb", compression_type) as f:
# Skip the header since we already have the header information and skip the
# rows we are told to skip.
if isinstance(skiprows, int) or skiprows is None:
if skiprows is None:
skiprows = 0
header = kwargs.get("header", "infer")
if header == "infer" and kwargs.get("names", None) is None:
skiprows += 1
elif isinstance(header, int):
skiprows += header + 1
elif hasattr(header, "__iter__") and not isinstance(header, str):
skiprows += max(header) + 1
if kwargs.get("encoding", None) is not None:
partition_kwargs["skiprows"] = 1
# Launch tasks to read partitions
partition_ids = []
index_ids = []
dtypes_ids = []
# Max number of partitions available
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
# This is the number of splits for the columns
num_splits = min(len(column_names), num_partitions)
# Metadata
column_chunksize = compute_chunksize(empty_pd_df, num_splits, axis=1)
if column_chunksize > len(column_names):
column_widths = [len(column_names)]
# This prevents us from unnecessarily serializing a bunch of empty
# objects.
num_splits = 1
else:
column_widths = [
column_chunksize
if len(column_names) > (column_chunksize * (i + 1))
else 0
if len(column_names) < (column_chunksize * i)
else len(column_names) - (column_chunksize * i)
for i in range(num_splits)
]
args = {
"fname": filepath_or_buffer,
"num_splits": num_splits,
**partition_kwargs,
}
splits = cls.partitioned_file(
f,
num_partitions=num_partitions,
nrows=nrows,
skiprows=skiprows,
quotechar=quotechar,
is_quoting=is_quoting,
)
for start, end in splits:
args.update({"start": start, "end": end})
partition_id = cls.deploy(cls.parse, num_splits + 2, args)
partition_ids.append(partition_id[:-2])
index_ids.append(partition_id[-2])
dtypes_ids.append(partition_id[-1])
# Compute the index based on a sum of the lengths of each partition (by default)
# or based on the column(s) that were requested.
if index_col is None:
row_lengths = cls.materialize(index_ids)
new_index = pandas.RangeIndex(sum(row_lengths))
else:
index_objs = cls.materialize(index_ids)
row_lengths = [len(o) for o in index_objs]
new_index = index_objs[0].append(index_objs[1:])
new_index.name = empty_pd_df.index.name
# Compute dtypes by getting collecting and combining all of the partitions. The
# reported dtypes from differing rows can be different based on the inference in
# the limited data seen by each worker. We use pandas to compute the exact dtype
# over the whole column for each column. The index is set below.
dtypes = cls.get_dtypes(dtypes_ids) if len(dtypes_ids) > 0 else None
partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)
# If parse_dates is present, the column names that we have might not be
# the same length as the returned column names. If we do need to modify
# the column names, we remove the old names from the column names and
# insert the new one at the front of the Index.
if parse_dates is not None:
# We have to recompute the column widths if `parse_dates` is set because
# we are not guaranteed to have the correct information regarding how many
# columns are on each partition.
column_widths = None
# Check if is list of lists
if isinstance(parse_dates, list) and isinstance(parse_dates[0], list):
for group in parse_dates:
new_col_name = "_".join(group)
column_names = column_names.drop(group).insert(0, new_col_name)
# Check if it is a dictionary
elif isinstance(parse_dates, dict):
for new_col_name, group in parse_dates.items():
column_names = column_names.drop(group).insert(0, new_col_name)
# Set the index for the dtypes to the column names
if isinstance(dtypes, pandas.Series):
dtypes.index = column_names
else:
dtypes = pandas.Series(dtypes, index=column_names)
new_frame = cls.frame_cls(
partition_ids,
new_index,
column_names,
row_lengths,
column_widths,
dtypes=dtypes,
)
new_query_compiler = cls.query_compiler_cls(new_frame)
if skipfooter:
new_query_compiler = new_query_compiler.drop(
new_query_compiler.index[-skipfooter:]
)
if kwargs.get("squeeze", False) and len(new_query_compiler.columns) == 1:
return new_query_compiler[new_query_compiler.columns[0]]
if index_col is None:
new_query_compiler._modin_frame._apply_index_objs(axis=0)
return new_query_compiler
| 46.360515 | 88 | 0.60063 |
from modin.engines.base.io.text.text_file_reader import TextFileReader
from modin.data_management.utils import compute_chunksize
from pandas.io.parsers import _validate_usecols_arg
import pandas
import csv
import sys
class CSVReader(TextFileReader):
@classmethod
def _read(cls, filepath_or_buffer, **kwargs):
if isinstance(filepath_or_buffer, str):
if not cls.file_exists(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
filepath_or_buffer = cls.get_path(filepath_or_buffer)
elif not cls.pathlib_or_pypath(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
compression_type = cls.infer_compression(
filepath_or_buffer, kwargs.get("compression")
)
if compression_type is not None:
if (
compression_type == "gzip"
or compression_type == "bz2"
or compression_type == "xz"
):
kwargs["compression"] = compression_type
elif (
compression_type == "zip"
and sys.version_info[0] == 3
and sys.version_info[1] >= 7
):
kwargs["compression"] = compression_type
else:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
chunksize = kwargs.get("chunksize")
if chunksize is not None:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
skiprows = kwargs.get("skiprows")
if skiprows is not None and not isinstance(skiprows, int):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
nrows = kwargs.pop("nrows", None)
names = kwargs.get("names", None)
index_col = kwargs.get("index_col", None)
usecols = kwargs.get("usecols", None)
if names is None:
names = pandas.read_csv(
filepath_or_buffer,
**dict(kwargs, usecols=None, nrows=0, skipfooter=0, index_col=None),
).columns
elif index_col is None and not usecols:
empty_pd_df = pandas.read_csv(filepath_or_buffer, nrows=0)
num_cols = len(empty_pd_df.columns)
if num_cols > len(names):
index_col = list(range(num_cols - len(names)))
if len(index_col) == 1:
index_col = index_col[0]
kwargs["index_col"] = index_col
empty_pd_df = pandas.read_csv(
filepath_or_buffer, **dict(kwargs, nrows=0, skipfooter=0)
)
column_names = empty_pd_df.columns
skipfooter = kwargs.get("skipfooter", None)
skiprows = kwargs.pop("skiprows", None)
usecols_md = _validate_usecols_arg(usecols)
if usecols is not None and usecols_md[1] != "integer":
del kwargs["usecols"]
all_cols = pandas.read_csv(
cls.file_open(filepath_or_buffer, "rb"),
**dict(kwargs, nrows=0, skipfooter=0),
).columns
usecols = all_cols.get_indexer_for(list(usecols_md[0]))
parse_dates = kwargs.pop("parse_dates", False)
partition_kwargs = dict(
kwargs,
header=None,
names=names,
skipfooter=0,
skiprows=None,
parse_dates=parse_dates,
usecols=usecols,
)
encoding = kwargs.get("encoding", None)
quotechar = kwargs.get("quotechar", '"').encode(
encoding if encoding is not None else "UTF-8"
)
is_quoting = kwargs.get("quoting", "") != csv.QUOTE_NONE
with cls.file_open(filepath_or_buffer, "rb", compression_type) as f:
# Skip the header since we already have the header information and skip the
# rows we are told to skip.
if isinstance(skiprows, int) or skiprows is None:
if skiprows is None:
skiprows = 0
header = kwargs.get("header", "infer")
if header == "infer" and kwargs.get("names", None) is None:
skiprows += 1
elif isinstance(header, int):
skiprows += header + 1
elif hasattr(header, "__iter__") and not isinstance(header, str):
skiprows += max(header) + 1
if kwargs.get("encoding", None) is not None:
partition_kwargs["skiprows"] = 1
# Launch tasks to read partitions
partition_ids = []
index_ids = []
dtypes_ids = []
# Max number of partitions available
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
# This is the number of splits for the columns
num_splits = min(len(column_names), num_partitions)
# Metadata
column_chunksize = compute_chunksize(empty_pd_df, num_splits, axis=1)
if column_chunksize > len(column_names):
column_widths = [len(column_names)]
# This prevents us from unnecessarily serializing a bunch of empty
# objects.
num_splits = 1
else:
column_widths = [
column_chunksize
if len(column_names) > (column_chunksize * (i + 1))
else 0
if len(column_names) < (column_chunksize * i)
else len(column_names) - (column_chunksize * i)
for i in range(num_splits)
]
args = {
"fname": filepath_or_buffer,
"num_splits": num_splits,
**partition_kwargs,
}
splits = cls.partitioned_file(
f,
num_partitions=num_partitions,
nrows=nrows,
skiprows=skiprows,
quotechar=quotechar,
is_quoting=is_quoting,
)
for start, end in splits:
args.update({"start": start, "end": end})
partition_id = cls.deploy(cls.parse, num_splits + 2, args)
partition_ids.append(partition_id[:-2])
index_ids.append(partition_id[-2])
dtypes_ids.append(partition_id[-1])
# Compute the index based on a sum of the lengths of each partition (by default)
# or based on the column(s) that were requested.
if index_col is None:
row_lengths = cls.materialize(index_ids)
new_index = pandas.RangeIndex(sum(row_lengths))
else:
index_objs = cls.materialize(index_ids)
row_lengths = [len(o) for o in index_objs]
new_index = index_objs[0].append(index_objs[1:])
new_index.name = empty_pd_df.index.name
# Compute dtypes by getting collecting and combining all of the partitions. The
# reported dtypes from differing rows can be different based on the inference in
# the limited data seen by each worker. We use pandas to compute the exact dtype
# over the whole column for each column. The index is set below.
dtypes = cls.get_dtypes(dtypes_ids) if len(dtypes_ids) > 0 else None
partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)
# If parse_dates is present, the column names that we have might not be
# the same length as the returned column names. If we do need to modify
# the column names, we remove the old names from the column names and
# insert the new one at the front of the Index.
if parse_dates is not None:
# We have to recompute the column widths if `parse_dates` is set because
# we are not guaranteed to have the correct information regarding how many
# columns are on each partition.
column_widths = None
# Check if is list of lists
if isinstance(parse_dates, list) and isinstance(parse_dates[0], list):
for group in parse_dates:
new_col_name = "_".join(group)
column_names = column_names.drop(group).insert(0, new_col_name)
# Check if it is a dictionary
elif isinstance(parse_dates, dict):
for new_col_name, group in parse_dates.items():
column_names = column_names.drop(group).insert(0, new_col_name)
# Set the index for the dtypes to the column names
if isinstance(dtypes, pandas.Series):
dtypes.index = column_names
else:
dtypes = pandas.Series(dtypes, index=column_names)
new_frame = cls.frame_cls(
partition_ids,
new_index,
column_names,
row_lengths,
column_widths,
dtypes=dtypes,
)
new_query_compiler = cls.query_compiler_cls(new_frame)
if skipfooter:
new_query_compiler = new_query_compiler.drop(
new_query_compiler.index[-skipfooter:]
)
if kwargs.get("squeeze", False) and len(new_query_compiler.columns) == 1:
return new_query_compiler[new_query_compiler.columns[0]]
if index_col is None:
new_query_compiler._modin_frame._apply_index_objs(axis=0)
return new_query_compiler
| true | true |
f7faf5d2d4bf2b5616444dc7a52cdfb5ea475ac7 | 950 | py | Python | galileo_screenshots/urls.py | wasmitnetzen/galileo-screenshots | dbac61382b25d82dc413d928f1cd40448173b06f | [
"MIT"
] | null | null | null | galileo_screenshots/urls.py | wasmitnetzen/galileo-screenshots | dbac61382b25d82dc413d928f1cd40448173b06f | [
"MIT"
] | null | null | null | galileo_screenshots/urls.py | wasmitnetzen/galileo-screenshots | dbac61382b25d82dc413d928f1cd40448173b06f | [
"MIT"
] | null | null | null | """galileo_screenshots URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('frontend.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 38 | 77 | 0.727368 | from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('frontend.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| true | true |
f7faf690dc165a423e33f81289c0d0f2103d27ed | 12,860 | py | Python | capsulenet.py | bretthandrews/CapsNet-Keras | d9bb39688a048b965bed92114e2836c38e2a960b | [
"MIT"
] | null | null | null | capsulenet.py | bretthandrews/CapsNet-Keras | d9bb39688a048b965bed92114e2836c38e2a960b | [
"MIT"
] | 4 | 2019-05-20T18:01:46.000Z | 2019-05-23T01:36:16.000Z | capsulenet.py | bretthandrews/CapsNet-Keras | d9bb39688a048b965bed92114e2836c38e2a960b | [
"MIT"
] | null | null | null | """
Keras implementation of CapsNet in Hinton's paper Dynamic Routing
Between Capsules. The current version maybe only works for TensorFlow
backend. Actually it will be straightforward to re-write to TF code.
Adopting to other backends should be easy, but I have not tested this.
Usage:
python capsulenet.py
python capsulenet.py --epochs 50
python capsulenet.py --epochs 50 --routings 3
... ...
Result:
Validation accuracy > 99.5% after 20 epochs. Converge to 99.66%
after 50 epochs. About 110 seconds per epoch on a single GTX1070
GPU card.
Author: Xifeng Guo, E-mail: `guoxifeng1990@163.com`,
Github: `https://github.com/XifengGuo/CapsNet-Keras`
"""
from keras import layers, models, optimizers
from keras import backend as K
from keras.utils import to_categorical
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from capsulelayers import CapsuleLayer, PrimaryCap, Length, Mask
from utils import combine_images, TimeHistory
K.set_image_data_format("channels_last")
def CapsNet(input_shape, n_class, routings):
"""
A Capsule Network on CIFAR-10.
:param input_shape: data shape, 3d, [width, height, channels]
:param n_class: number of classes
:param routings: number of routing iterations
:return: Two Keras Models, the first one used for training, and the
second one for evaluation. `eval_model` can also be used for
training.
"""
x = layers.Input(shape=input_shape)
# Layer 1: Just a conventional Conv2D layer
conv1 = layers.Conv2D(
filters=256,
kernel_size=9,
strides=1,
padding="valid",
activation="relu",
name="conv1",
)(x)
# Layer 2: Conv2D layer with `squash` activation, then reshape to
# [None, num_capsule, dim_capsule]
primarycaps = PrimaryCap(
conv1, dim_capsule=8, n_channels=64, kernel_size=9, strides=2, padding="valid"
)
# Layer 3: Capsule layer. Routing algorithm works here.
digitcaps = CapsuleLayer(
num_capsule=n_class, dim_capsule=16, routings=routings, name="digitcaps"
)(primarycaps)
# Layer 4: This is an auxiliary layer to replace each capsule with
# its length. Just to match the true label's shape.
# If using tensorflow, this will not be necessary. :)
out_caps = Length(name="capsnet")(digitcaps)
# Decoder network.
y = layers.Input(shape=(n_class,))
masked_by_y = Mask()(
[digitcaps, y]
) # The true label is used to mask the output of capsule layer. For training
masked = Mask()(
digitcaps
) # Mask using the capsule with maximal length. For prediction
# Shared Decoder model in training and prediction
decoder = models.Sequential(name="decoder")
decoder.add(layers.Dense(512, activation="relu", input_dim=16 * n_class))
decoder.add(layers.Dense(1024, activation="relu"))
decoder.add(layers.Dense(np.prod(input_shape), activation="sigmoid"))
decoder.add(layers.Reshape(target_shape=input_shape, name="out_recon"))
# Models for training and evaluation (prediction)
train_model = models.Model([x, y], [out_caps, decoder(masked_by_y)])
eval_model = models.Model(x, [out_caps, decoder(masked)])
# manipulate model
noise = layers.Input(shape=(n_class, 16))
noised_digitcaps = layers.Add()([digitcaps, noise])
masked_noised_y = Mask()([noised_digitcaps, y])
manipulate_model = models.Model([x, y, noise], decoder(masked_noised_y))
return train_model, eval_model, manipulate_model
def margin_loss(y_true, y_pred):
"""
Margin loss for Eq.(4). When y_true[i, :] contains not just one
`1`, this loss should work too. Not test it.
:param y_true: [None, n_classes]
:param y_pred: [None, num_capsule]
:return: a scalar loss value.
"""
L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + 0.5 * (1 - y_true) * K.square(
K.maximum(0., y_pred - 0.1)
)
return K.mean(K.sum(L, 1))
def train(model, data, args):
"""
Training a CapsuleNet
:param model: the CapsuleNet model
:param data: a tuple containing training and testing data, like
`((x_train, y_train), (x_test, y_test))`
:param args: arguments
:return: The trained model
"""
# unpacking the data
(x_train, y_train), (x_test, y_test) = data
# callbacks
log = callbacks.CSVLogger(args.save_dir + "/log.csv")
tb = callbacks.TensorBoard(
log_dir=args.save_dir + "/tensorboard-logs",
batch_size=args.batch_size,
histogram_freq=int(args.debug),
)
checkpoint = callbacks.ModelCheckpoint(
args.save_dir + "/weights-{epoch:02d}.h5",
monitor="val_capsnet_acc",
save_best_only=True,
save_weights_only=True,
verbose=1,
)
lr_decay = callbacks.LearningRateScheduler(
schedule=lambda epoch: args.lr * (args.lr_decay ** epoch)
)
timing = TimeHistory()
# compile the model
model.compile(
optimizer=optimizers.Adam(lr=args.lr),
loss=[margin_loss, "mse"],
loss_weights=[1., args.lam_recon],
metrics={"capsnet": "accuracy"},
)
if args.data_augmentation:
def train_generator(x, y, batch_size, shift_fraction=0.):
# shift up to 2 pixel for MNIST
train_datagen = ImageDataGenerator(
width_shift_range=shift_fraction, height_shift_range=shift_fraction
)
generator = train_datagen.flow(x, y, batch_size=batch_size)
while 1:
x_batch, y_batch = generator.next()
yield ([x_batch, y_batch], [y_batch, x_batch])
assert args.shift_fraction != 0, "No data augmentation if ``shift_fraction`` == 0."
model.fit_generator(
generator=train_generator(
x_train, y_train, args.batch_size, args.shift_fraction
),
steps_per_epoch=int(y_train.shape[0] / args.batch_size),
epochs=args.epochs,
validation_data=[[x_test, y_test], [y_test, x_test]],
callbacks=[log, tb, checkpoint, lr_decay, timing],
)
else:
assert args.shift_fraction == 0, "Set ``data_augmentation`` flag to shift pixels."
model.fit(
[x_train, y_train],
[y_train, x_train],
batch_size=args.batch_size,
epochs=args.epochs,
validation_data=[[x_test, y_test], [y_test, x_test]],
callbacks=[log, tb, checkpoint, lr_decay, timing],
)
print("Time per epoch", timing.times)
model.save_weights(args.save_dir + "/trained_model.h5")
print("Trained model saved to '%s/trained_model.h5'" % args.save_dir)
from utils import plot_log
plot_log(args.save_dir + "/log.csv", show=True)
return model
def test(model, data, args):
x_test, y_test = data
y_pred, x_recon = model.predict(x_test, batch_size=100)
print("-" * 30 + "Begin: test" + "-" * 30)
print(
"Test acc:",
np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1)) / y_test.shape[0],
)
img = combine_images(np.concatenate([x_test[:50], x_recon[:50]]))
image = img * 255
Image.fromarray(image.astype(np.uint8)).save(args.save_dir + "/real_and_recon.png")
print()
print("Reconstructed images are saved to %s/real_and_recon.png" % args.save_dir)
print("-" * 30 + "End: test" + "-" * 30)
plt.imshow(plt.imread(args.save_dir + "/real_and_recon.png"))
plt.show()
def manipulate_latent(model, data, args):
print("-" * 30 + "Begin: manipulate" + "-" * 30)
x_test, y_test = data
index = np.argmax(y_test, 1) == args.digit
number = np.random.randint(low=0, high=sum(index) - 1)
x, y = x_test[index][number], y_test[index][number]
x, y = np.expand_dims(x, 0), np.expand_dims(y, 0)
noise = np.zeros([1, 10, 16])
x_recons = []
for dim in range(16):
for r in [-0.25, -0.2, -0.15, -0.1, -0.05, 0, 0.05, 0.1, 0.15, 0.2, 0.25]:
tmp = np.copy(noise)
tmp[:, :, dim] = r
x_recon = model.predict([x, y, tmp])
x_recons.append(x_recon)
x_recons = np.concatenate(x_recons)
img = combine_images(x_recons, height=16)
image = img * 255
Image.fromarray(image.astype(np.uint8)).save(
args.save_dir + "/manipulate-%d.png" % args.digit
)
print(
"manipulated result saved to %s/manipulate-%d.png" % (args.save_dir, args.digit)
)
print("-" * 30 + "End: manipulate" + "-" * 30)
def load_mnist():
# the data, shuffled and split between train and test sets
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1).astype("float32") / 255.
x_test = x_test.reshape(-1, 28, 28, 1).astype("float32") / 255.
y_train = to_categorical(y_train.astype("float32"))
y_test = to_categorical(y_test.astype("float32"))
return (x_train, y_train), (x_test, y_test)
def load_cifar10():
from keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.reshape(-1, 32, 32, 3).astype("float32") / 255.
x_test = x_test.reshape(-1, 32, 32, 3).astype("float32") / 255.
y_train = to_categorical(y_train.astype("float32"))
y_test = to_categorical(y_test.astype("float32"))
return (x_train, y_train), (x_test, y_test)
if __name__ == "__main__":
import os
import argparse
from keras.preprocessing.image import ImageDataGenerator
from keras import callbacks
# setting the hyper parameters
parser = argparse.ArgumentParser(description="Capsule Network on CIFAR-10.")
parser.add_argument("--epochs", default=50, type=int)
parser.add_argument("--batch_size", default=100, type=int)
parser.add_argument("--lr", default=0.001, type=float, help="Initial learning rate")
parser.add_argument(
"--lr_decay",
default=0.9,
type=float,
help="The value multiplied by lr at each epoch. Set a larger value for larger epochs",
)
parser.add_argument(
"--lam_recon",
default=0.392,
type=float,
help="The coefficient for the loss of decoder",
)
parser.add_argument(
"-r",
"--routings",
default=3,
type=int,
help="Number of iterations used in routing algorithm. should > 0",
)
parser.add_argument(
"--shift_fraction",
default=0,
type=float,
help="Fraction of pixels to shift at most in each direction.",
)
parser.add_argument(
"--debug", action="store_true", help="Save weights by TensorBoard"
)
parser.add_argument("--save_dir", default="./result")
parser.add_argument(
"-t",
"--testing",
action="store_true",
help="Test the trained model on testing dataset",
)
parser.add_argument("--digit", default=5, type=int, help="Digit to manipulate")
parser.add_argument(
"-w",
"--weights",
default=None,
help="The path of the saved weights. Should be specified when testing",
)
parser.add_argument('--data_augmentation', dest='data_augmentation', action='store_true')
parser.add_argument('--no-data_augmentation', dest='data_augmentation', action='store_false')
parser.set_defaults(data_augmentation=False)
parser.add_argument(
"--dataset", default="mnist", help="Available datasets: {'mnist'}, 'cifar10'."
)
args = parser.parse_args()
print(f"\nargs: {args}\n")
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
# load data
if args.dataset == "mnist":
(x_train, y_train), (x_test, y_test) = load_mnist()
elif args.dataset == "cifar10":
(x_train, y_train), (x_test, y_test) = load_cifar10()
else:
raise ValueError("Available datasets: 'mnist', 'cifar10'.")
(x_train, y_train), (x_test, y_test) = load_cifar10()
# define model
model, eval_model, manipulate_model = CapsNet(
input_shape=x_train.shape[1:],
n_class=len(np.unique(np.argmax(y_train, 1))),
routings=args.routings,
)
model.summary()
# train or test
if args.weights is not None: # init the model weights with provided one
model.load_weights(args.weights)
if not args.testing:
train(model=model, data=((x_train, y_train), (x_test, y_test)), args=args)
else: # as long as weights are given, will run testing
if args.weights is None:
print(
"No weights are provided. Will test using random initialized weights."
)
manipulate_latent(manipulate_model, (x_test, y_test), args)
test(model=eval_model, data=(x_test, y_test), args=args)
| 34.569892 | 97 | 0.639813 |
from keras import layers, models, optimizers
from keras import backend as K
from keras.utils import to_categorical
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from capsulelayers import CapsuleLayer, PrimaryCap, Length, Mask
from utils import combine_images, TimeHistory
K.set_image_data_format("channels_last")
def CapsNet(input_shape, n_class, routings):
x = layers.Input(shape=input_shape)
conv1 = layers.Conv2D(
filters=256,
kernel_size=9,
strides=1,
padding="valid",
activation="relu",
name="conv1",
)(x)
primarycaps = PrimaryCap(
conv1, dim_capsule=8, n_channels=64, kernel_size=9, strides=2, padding="valid"
)
digitcaps = CapsuleLayer(
num_capsule=n_class, dim_capsule=16, routings=routings, name="digitcaps"
)(primarycaps)
# If using tensorflow, this will not be necessary. :)
out_caps = Length(name="capsnet")(digitcaps)
# Decoder network.
y = layers.Input(shape=(n_class,))
masked_by_y = Mask()(
[digitcaps, y]
) # The true label is used to mask the output of capsule layer. For training
masked = Mask()(
digitcaps
) # Mask using the capsule with maximal length. For prediction
# Shared Decoder model in training and prediction
decoder = models.Sequential(name="decoder")
decoder.add(layers.Dense(512, activation="relu", input_dim=16 * n_class))
decoder.add(layers.Dense(1024, activation="relu"))
decoder.add(layers.Dense(np.prod(input_shape), activation="sigmoid"))
decoder.add(layers.Reshape(target_shape=input_shape, name="out_recon"))
# Models for training and evaluation (prediction)
train_model = models.Model([x, y], [out_caps, decoder(masked_by_y)])
eval_model = models.Model(x, [out_caps, decoder(masked)])
# manipulate model
noise = layers.Input(shape=(n_class, 16))
noised_digitcaps = layers.Add()([digitcaps, noise])
masked_noised_y = Mask()([noised_digitcaps, y])
manipulate_model = models.Model([x, y, noise], decoder(masked_noised_y))
return train_model, eval_model, manipulate_model
def margin_loss(y_true, y_pred):
L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + 0.5 * (1 - y_true) * K.square(
K.maximum(0., y_pred - 0.1)
)
return K.mean(K.sum(L, 1))
def train(model, data, args):
# unpacking the data
(x_train, y_train), (x_test, y_test) = data
# callbacks
log = callbacks.CSVLogger(args.save_dir + "/log.csv")
tb = callbacks.TensorBoard(
log_dir=args.save_dir + "/tensorboard-logs",
batch_size=args.batch_size,
histogram_freq=int(args.debug),
)
checkpoint = callbacks.ModelCheckpoint(
args.save_dir + "/weights-{epoch:02d}.h5",
monitor="val_capsnet_acc",
save_best_only=True,
save_weights_only=True,
verbose=1,
)
lr_decay = callbacks.LearningRateScheduler(
schedule=lambda epoch: args.lr * (args.lr_decay ** epoch)
)
timing = TimeHistory()
# compile the model
model.compile(
optimizer=optimizers.Adam(lr=args.lr),
loss=[margin_loss, "mse"],
loss_weights=[1., args.lam_recon],
metrics={"capsnet": "accuracy"},
)
if args.data_augmentation:
def train_generator(x, y, batch_size, shift_fraction=0.):
# shift up to 2 pixel for MNIST
train_datagen = ImageDataGenerator(
width_shift_range=shift_fraction, height_shift_range=shift_fraction
)
generator = train_datagen.flow(x, y, batch_size=batch_size)
while 1:
x_batch, y_batch = generator.next()
yield ([x_batch, y_batch], [y_batch, x_batch])
assert args.shift_fraction != 0, "No data augmentation if ``shift_fraction`` == 0."
model.fit_generator(
generator=train_generator(
x_train, y_train, args.batch_size, args.shift_fraction
),
steps_per_epoch=int(y_train.shape[0] / args.batch_size),
epochs=args.epochs,
validation_data=[[x_test, y_test], [y_test, x_test]],
callbacks=[log, tb, checkpoint, lr_decay, timing],
)
else:
assert args.shift_fraction == 0, "Set ``data_augmentation`` flag to shift pixels."
model.fit(
[x_train, y_train],
[y_train, x_train],
batch_size=args.batch_size,
epochs=args.epochs,
validation_data=[[x_test, y_test], [y_test, x_test]],
callbacks=[log, tb, checkpoint, lr_decay, timing],
)
print("Time per epoch", timing.times)
model.save_weights(args.save_dir + "/trained_model.h5")
print("Trained model saved to '%s/trained_model.h5'" % args.save_dir)
from utils import plot_log
plot_log(args.save_dir + "/log.csv", show=True)
return model
def test(model, data, args):
x_test, y_test = data
y_pred, x_recon = model.predict(x_test, batch_size=100)
print("-" * 30 + "Begin: test" + "-" * 30)
print(
"Test acc:",
np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1)) / y_test.shape[0],
)
img = combine_images(np.concatenate([x_test[:50], x_recon[:50]]))
image = img * 255
Image.fromarray(image.astype(np.uint8)).save(args.save_dir + "/real_and_recon.png")
print()
print("Reconstructed images are saved to %s/real_and_recon.png" % args.save_dir)
print("-" * 30 + "End: test" + "-" * 30)
plt.imshow(plt.imread(args.save_dir + "/real_and_recon.png"))
plt.show()
def manipulate_latent(model, data, args):
print("-" * 30 + "Begin: manipulate" + "-" * 30)
x_test, y_test = data
index = np.argmax(y_test, 1) == args.digit
number = np.random.randint(low=0, high=sum(index) - 1)
x, y = x_test[index][number], y_test[index][number]
x, y = np.expand_dims(x, 0), np.expand_dims(y, 0)
noise = np.zeros([1, 10, 16])
x_recons = []
for dim in range(16):
for r in [-0.25, -0.2, -0.15, -0.1, -0.05, 0, 0.05, 0.1, 0.15, 0.2, 0.25]:
tmp = np.copy(noise)
tmp[:, :, dim] = r
x_recon = model.predict([x, y, tmp])
x_recons.append(x_recon)
x_recons = np.concatenate(x_recons)
img = combine_images(x_recons, height=16)
image = img * 255
Image.fromarray(image.astype(np.uint8)).save(
args.save_dir + "/manipulate-%d.png" % args.digit
)
print(
"manipulated result saved to %s/manipulate-%d.png" % (args.save_dir, args.digit)
)
print("-" * 30 + "End: manipulate" + "-" * 30)
def load_mnist():
# the data, shuffled and split between train and test sets
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1).astype("float32") / 255.
x_test = x_test.reshape(-1, 28, 28, 1).astype("float32") / 255.
y_train = to_categorical(y_train.astype("float32"))
y_test = to_categorical(y_test.astype("float32"))
return (x_train, y_train), (x_test, y_test)
def load_cifar10():
from keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.reshape(-1, 32, 32, 3).astype("float32") / 255.
x_test = x_test.reshape(-1, 32, 32, 3).astype("float32") / 255.
y_train = to_categorical(y_train.astype("float32"))
y_test = to_categorical(y_test.astype("float32"))
return (x_train, y_train), (x_test, y_test)
if __name__ == "__main__":
import os
import argparse
from keras.preprocessing.image import ImageDataGenerator
from keras import callbacks
# setting the hyper parameters
parser = argparse.ArgumentParser(description="Capsule Network on CIFAR-10.")
parser.add_argument("--epochs", default=50, type=int)
parser.add_argument("--batch_size", default=100, type=int)
parser.add_argument("--lr", default=0.001, type=float, help="Initial learning rate")
parser.add_argument(
"--lr_decay",
default=0.9,
type=float,
help="The value multiplied by lr at each epoch. Set a larger value for larger epochs",
)
parser.add_argument(
"--lam_recon",
default=0.392,
type=float,
help="The coefficient for the loss of decoder",
)
parser.add_argument(
"-r",
"--routings",
default=3,
type=int,
help="Number of iterations used in routing algorithm. should > 0",
)
parser.add_argument(
"--shift_fraction",
default=0,
type=float,
help="Fraction of pixels to shift at most in each direction.",
)
parser.add_argument(
"--debug", action="store_true", help="Save weights by TensorBoard"
)
parser.add_argument("--save_dir", default="./result")
parser.add_argument(
"-t",
"--testing",
action="store_true",
help="Test the trained model on testing dataset",
)
parser.add_argument("--digit", default=5, type=int, help="Digit to manipulate")
parser.add_argument(
"-w",
"--weights",
default=None,
help="The path of the saved weights. Should be specified when testing",
)
parser.add_argument('--data_augmentation', dest='data_augmentation', action='store_true')
parser.add_argument('--no-data_augmentation', dest='data_augmentation', action='store_false')
parser.set_defaults(data_augmentation=False)
parser.add_argument(
"--dataset", default="mnist", help="Available datasets: {'mnist'}, 'cifar10'."
)
args = parser.parse_args()
print(f"\nargs: {args}\n")
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
# load data
if args.dataset == "mnist":
(x_train, y_train), (x_test, y_test) = load_mnist()
elif args.dataset == "cifar10":
(x_train, y_train), (x_test, y_test) = load_cifar10()
else:
raise ValueError("Available datasets: 'mnist', 'cifar10'.")
(x_train, y_train), (x_test, y_test) = load_cifar10()
# define model
model, eval_model, manipulate_model = CapsNet(
input_shape=x_train.shape[1:],
n_class=len(np.unique(np.argmax(y_train, 1))),
routings=args.routings,
)
model.summary()
# train or test
if args.weights is not None: # init the model weights with provided one
model.load_weights(args.weights)
if not args.testing:
train(model=model, data=((x_train, y_train), (x_test, y_test)), args=args)
else: # as long as weights are given, will run testing
if args.weights is None:
print(
"No weights are provided. Will test using random initialized weights."
)
manipulate_latent(manipulate_model, (x_test, y_test), args)
test(model=eval_model, data=(x_test, y_test), args=args)
| true | true |
f7faf7e06e3194dff39b62b86ef6da183085945d | 1,254 | py | Python | test/test_sma.py | Yifei-Liu/uts | 64c137d59fcd0c7c016082018d67a56abac0b28e | [
"MIT"
] | 1 | 2021-07-09T15:37:58.000Z | 2021-07-09T15:37:58.000Z | test/test_sma.py | Yifei-Liu/uts | 64c137d59fcd0c7c016082018d67a56abac0b28e | [
"MIT"
] | 1 | 2021-07-20T22:22:44.000Z | 2021-07-20T22:23:04.000Z | test/test_sma.py | Yifei-Liu/uts | 64c137d59fcd0c7c016082018d67a56abac0b28e | [
"MIT"
] | 1 | 2021-06-29T20:13:18.000Z | 2021-06-29T20:13:18.000Z | import unittest
import numpy as np
import numpy.testing as npt
from uts import sma
class TestEMA(unittest.TestCase):
def test_sma_last(self):
values = np.array([[0.0, 0.0], [1.0, 2.0],
[1.2, 4.0], [2.3, 6], [2.9, 8], [5, 10]])
result = sma.last(values, 2.5, 1.0)
desired = np.array([[0.0, 0.0], [1.0, 1.03],
[1.2, 1.26], [2.3, 3.31], [2.9, 4.69], [5, 8.34]])
npt.assert_almost_equal(result, desired, decimal=2)
def test_sma_next(self):
values = np.array([[0.0, 0.0], [1.0, 2.0],
[1.2, 4.0], [2.3, 6], [2.9, 8], [5, 10]])
result = sma.next(values, 2.5, 1.0)
desired = np.array([[0.0, 0.0], [1.0, 1.71],
[1.2, 1.94], [2.3, 4.97], [2.9, 6.11], [5, 9.77]])
npt.assert_almost_equal(result, desired, decimal=2)
def test_sma_linear(self):
values = np.array([[0.0, 0.0], [1.0, 2.0],
[1.2, 4.0], [2.3, 6], [2.9, 8], [5, 10]])
result = sma.linear(values, 2.5, 1.0)
desired = np.array([[0.0, 0.0], [1.0, 1.54],
[1.2, 1.86], [2.3, 4.16], [2.9, 5.60], [5, 9.10]])
npt.assert_almost_equal(result, desired, decimal=2)
if __name__ == '__main__':
unittest.main() | 35.828571 | 60 | 0.491228 | import unittest
import numpy as np
import numpy.testing as npt
from uts import sma
class TestEMA(unittest.TestCase):
def test_sma_last(self):
values = np.array([[0.0, 0.0], [1.0, 2.0],
[1.2, 4.0], [2.3, 6], [2.9, 8], [5, 10]])
result = sma.last(values, 2.5, 1.0)
desired = np.array([[0.0, 0.0], [1.0, 1.03],
[1.2, 1.26], [2.3, 3.31], [2.9, 4.69], [5, 8.34]])
npt.assert_almost_equal(result, desired, decimal=2)
def test_sma_next(self):
values = np.array([[0.0, 0.0], [1.0, 2.0],
[1.2, 4.0], [2.3, 6], [2.9, 8], [5, 10]])
result = sma.next(values, 2.5, 1.0)
desired = np.array([[0.0, 0.0], [1.0, 1.71],
[1.2, 1.94], [2.3, 4.97], [2.9, 6.11], [5, 9.77]])
npt.assert_almost_equal(result, desired, decimal=2)
def test_sma_linear(self):
values = np.array([[0.0, 0.0], [1.0, 2.0],
[1.2, 4.0], [2.3, 6], [2.9, 8], [5, 10]])
result = sma.linear(values, 2.5, 1.0)
desired = np.array([[0.0, 0.0], [1.0, 1.54],
[1.2, 1.86], [2.3, 4.16], [2.9, 5.60], [5, 9.10]])
npt.assert_almost_equal(result, desired, decimal=2)
if __name__ == '__main__':
unittest.main() | true | true |
f7faf81985c717e19ab8a28fe75d869200b41a32 | 3,057 | bzl | Python | pw_env_setup/bazel/cipd_setup/cipd_rules.bzl | curtin-space/pigweed | fe2e1743e03fabd2676f01d9de0ac9d34a426076 | [
"Apache-2.0"
] | 86 | 2021-03-09T23:49:40.000Z | 2022-03-30T08:14:51.000Z | pw_env_setup/bazel/cipd_setup/cipd_rules.bzl | curtin-space/pigweed | fe2e1743e03fabd2676f01d9de0ac9d34a426076 | [
"Apache-2.0"
] | 4 | 2021-07-27T20:32:03.000Z | 2022-03-08T10:39:07.000Z | pw_env_setup/bazel/cipd_setup/cipd_rules.bzl | curtin-space/pigweed | fe2e1743e03fabd2676f01d9de0ac9d34a426076 | [
"Apache-2.0"
] | 22 | 2021-03-11T15:15:47.000Z | 2022-02-09T06:16:36.000Z | # Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Bazel rules for downloading CIPD packages."""
load(
"//pw_env_setup/bazel/cipd_setup/internal:cipd_internal.bzl",
_cipd_client_impl = "cipd_client_impl",
_cipd_deps_impl = "cipd_deps_impl",
_cipd_repository_impl = "cipd_repository_impl",
)
_cipd_client_repository = repository_rule(
_cipd_client_impl,
attrs = {
"_cipd_version_file": attr.label(default = "@pigweed//pw_env_setup:py/pw_env_setup/cipd_setup/.cipd_version"),
"_cipd_digest_file": attr.label(default = "@pigweed//pw_env_setup:py/pw_env_setup/cipd_setup/.cipd_version.digests"),
},
doc = """
Fetches the cipd client.
This rule should not be used directly and instead should be called via
the cipd_client_repository macro.
""",
)
def cipd_client_repository():
"""Fetches the cipd client.
Fetches the cipd client to the prescribed remote repository target
prefix 'cipd_client'. This rule should be called before a
cipd_repository rule is instantiated.
"""
_cipd_client_repository(
name = "cipd_client",
)
cipd_repository = repository_rule(
_cipd_repository_impl,
attrs = {
"_cipd_client": attr.label(default = "@cipd_client//:cipd"),
"path": attr.string(),
"tag": attr.string(),
},
doc = """
Downloads a singular CIPD dependency to the root of a remote repository.
Example:
load(
"//pw_env_setup/bazel/cipd_setup:cipd_rules.bzl",
"cipd_client_repository",
"cipd_repository",
)
# Must be called before cipd_repository
cipd_client_repository()
cipd_repository(
name = "bloaty",
path = "pigweed/third_party/bloaty-embedded/${os=linux,mac}-${arch=amd64}",
tag = "git_revision:2d87d204057b419f5290f8d38b61b9c2c5b4fb52-2",
)
""",
)
_pigweed_deps = repository_rule(
_cipd_deps_impl,
attrs = {
"_pigweed_packages_json": attr.label(
default = "@pigweed//pw_env_setup:py/pw_env_setup/cipd_setup/pigweed.json",
),
"_python_packages_json": attr.label(
default = "@pigweed//pw_env_setup:py/pw_env_setup/cipd_setup/python.json",
),
},
)
def pigweed_deps():
"""Configures Pigweeds Bazel dependencies
Example:
load("@pigweed//pw_env_setup:pigweed_deps.bzl", "pigweed_deps")
pigweed_deps()
load("@cipd_deps//:cipd_init.bzl", "cipd_init")
cipd_init()
"""
_pigweed_deps(
name = "cipd_deps",
)
| 29.394231 | 125 | 0.687275 |
load(
"//pw_env_setup/bazel/cipd_setup/internal:cipd_internal.bzl",
_cipd_client_impl = "cipd_client_impl",
_cipd_deps_impl = "cipd_deps_impl",
_cipd_repository_impl = "cipd_repository_impl",
)
_cipd_client_repository = repository_rule(
_cipd_client_impl,
attrs = {
"_cipd_version_file": attr.label(default = "@pigweed//pw_env_setup:py/pw_env_setup/cipd_setup/.cipd_version"),
"_cipd_digest_file": attr.label(default = "@pigweed//pw_env_setup:py/pw_env_setup/cipd_setup/.cipd_version.digests"),
},
doc = """
Fetches the cipd client.
This rule should not be used directly and instead should be called via
the cipd_client_repository macro.
""",
)
def cipd_client_repository():
_cipd_client_repository(
name = "cipd_client",
)
cipd_repository = repository_rule(
_cipd_repository_impl,
attrs = {
"_cipd_client": attr.label(default = "@cipd_client//:cipd"),
"path": attr.string(),
"tag": attr.string(),
},
doc = """
Downloads a singular CIPD dependency to the root of a remote repository.
Example:
load(
"//pw_env_setup/bazel/cipd_setup:cipd_rules.bzl",
"cipd_client_repository",
"cipd_repository",
)
# Must be called before cipd_repository
cipd_client_repository()
cipd_repository(
name = "bloaty",
path = "pigweed/third_party/bloaty-embedded/${os=linux,mac}-${arch=amd64}",
tag = "git_revision:2d87d204057b419f5290f8d38b61b9c2c5b4fb52-2",
)
""",
)
_pigweed_deps = repository_rule(
_cipd_deps_impl,
attrs = {
"_pigweed_packages_json": attr.label(
default = "@pigweed//pw_env_setup:py/pw_env_setup/cipd_setup/pigweed.json",
),
"_python_packages_json": attr.label(
default = "@pigweed//pw_env_setup:py/pw_env_setup/cipd_setup/python.json",
),
},
)
def pigweed_deps():
_pigweed_deps(
name = "cipd_deps",
)
| true | true |
f7faf9654fe3b687b52dbedb0cf2494bb4e739dc | 1,782 | py | Python | tests/unitary/LiquidityGauge/test_deposit_for.py | AqualisDAO/curve-dao-contracts | beec73a068da8ed01c0f710939dc5adb776d565b | [
"MIT"
] | 217 | 2020-06-24T14:01:21.000Z | 2022-03-29T08:35:24.000Z | tests/unitary/LiquidityGauge/test_deposit_for.py | AqualisDAO/curve-dao-contracts | beec73a068da8ed01c0f710939dc5adb776d565b | [
"MIT"
] | 25 | 2020-06-24T09:39:02.000Z | 2022-03-22T17:03:00.000Z | tests/unitary/LiquidityGauge/test_deposit_for.py | AqualisDAO/curve-dao-contracts | beec73a068da8ed01c0f710939dc5adb776d565b | [
"MIT"
] | 110 | 2020-07-10T22:45:49.000Z | 2022-03-29T02:51:08.000Z | import brownie
def test_deposit_for(accounts, liquidity_gauge, mock_lp_token):
mock_lp_token.approve(liquidity_gauge, 2 ** 256 - 1, {"from": accounts[0]})
balance = mock_lp_token.balanceOf(accounts[0])
liquidity_gauge.set_approve_deposit(accounts[0], True, {"from": accounts[1]})
liquidity_gauge.deposit(100000, accounts[1], {"from": accounts[0]})
assert mock_lp_token.balanceOf(liquidity_gauge) == 100000
assert mock_lp_token.balanceOf(accounts[0]) == balance - 100000
assert liquidity_gauge.totalSupply() == 100000
assert liquidity_gauge.balanceOf(accounts[1]) == 100000
def test_set_approve_deposit_initial(accounts, liquidity_gauge):
assert liquidity_gauge.approved_to_deposit(accounts[0], accounts[1]) is False
def test_set_approve_deposit_true(accounts, liquidity_gauge):
liquidity_gauge.set_approve_deposit(accounts[0], True, {"from": accounts[1]})
assert liquidity_gauge.approved_to_deposit(accounts[0], accounts[1]) is True
def test_set_approve_deposit_false(accounts, liquidity_gauge):
liquidity_gauge.set_approve_deposit(accounts[0], False, {"from": accounts[1]})
assert liquidity_gauge.approved_to_deposit(accounts[0], accounts[1]) is False
def test_set_approve_deposit_toggle(accounts, liquidity_gauge):
for value in [True, True, False, False, True, False, True]:
liquidity_gauge.set_approve_deposit(accounts[0], value, {"from": accounts[1]})
assert liquidity_gauge.approved_to_deposit(accounts[0], accounts[1]) is value
def test_not_approved(accounts, liquidity_gauge, mock_lp_token):
mock_lp_token.approve(liquidity_gauge, 2 ** 256 - 1, {"from": accounts[0]})
with brownie.reverts("Not approved"):
liquidity_gauge.deposit(100000, accounts[1], {"from": accounts[0]})
| 44.55 | 86 | 0.755892 | import brownie
def test_deposit_for(accounts, liquidity_gauge, mock_lp_token):
mock_lp_token.approve(liquidity_gauge, 2 ** 256 - 1, {"from": accounts[0]})
balance = mock_lp_token.balanceOf(accounts[0])
liquidity_gauge.set_approve_deposit(accounts[0], True, {"from": accounts[1]})
liquidity_gauge.deposit(100000, accounts[1], {"from": accounts[0]})
assert mock_lp_token.balanceOf(liquidity_gauge) == 100000
assert mock_lp_token.balanceOf(accounts[0]) == balance - 100000
assert liquidity_gauge.totalSupply() == 100000
assert liquidity_gauge.balanceOf(accounts[1]) == 100000
def test_set_approve_deposit_initial(accounts, liquidity_gauge):
assert liquidity_gauge.approved_to_deposit(accounts[0], accounts[1]) is False
def test_set_approve_deposit_true(accounts, liquidity_gauge):
liquidity_gauge.set_approve_deposit(accounts[0], True, {"from": accounts[1]})
assert liquidity_gauge.approved_to_deposit(accounts[0], accounts[1]) is True
def test_set_approve_deposit_false(accounts, liquidity_gauge):
liquidity_gauge.set_approve_deposit(accounts[0], False, {"from": accounts[1]})
assert liquidity_gauge.approved_to_deposit(accounts[0], accounts[1]) is False
def test_set_approve_deposit_toggle(accounts, liquidity_gauge):
for value in [True, True, False, False, True, False, True]:
liquidity_gauge.set_approve_deposit(accounts[0], value, {"from": accounts[1]})
assert liquidity_gauge.approved_to_deposit(accounts[0], accounts[1]) is value
def test_not_approved(accounts, liquidity_gauge, mock_lp_token):
mock_lp_token.approve(liquidity_gauge, 2 ** 256 - 1, {"from": accounts[0]})
with brownie.reverts("Not approved"):
liquidity_gauge.deposit(100000, accounts[1], {"from": accounts[0]})
| true | true |
f7fafb8904510061e7264b0efd26be396be5c8ea | 10,018 | py | Python | AutoWorkup/logismosb/utils/fs_thickness_measurements.py | pnlbwh/BRAINSTools | a2fe63ab5b795f03da140a4081d1fef6314dab95 | [
"Apache-2.0"
] | 89 | 2015-02-09T16:47:09.000Z | 2022-02-21T07:19:27.000Z | AutoWorkup/logismosb/utils/fs_thickness_measurements.py | pnlbwh/BRAINSTools | a2fe63ab5b795f03da140a4081d1fef6314dab95 | [
"Apache-2.0"
] | 166 | 2015-01-07T22:14:05.000Z | 2021-12-26T06:58:00.000Z | AutoWorkup/logismosb/utils/fs_thickness_measurements.py | BRAINSia/BRAINSTools | f09f74bd28ad07cd2347c2528921b1a43b97fa1d | [
"Apache-2.0"
] | 80 | 2015-01-05T17:18:07.000Z | 2022-01-06T12:46:29.000Z | """
fs_thickness_measurements.py
==============================
Description:
Author:
Usage:
"""
import vtk
import SimpleITK as sitk
import numpy as np
from scipy.spatial import distance
from nipype.interfaces.freesurfer import MRIsConvert
import os
import sys
def read_poly_data(filename):
"""
This function..
:param filename:
:return:
"""
# Check which PolyData reader should be used
if ".vtk" in filename:
reader = vtk.vtkPolyDataReader()
reader.SetFileName(filename)
reader.Update()
return reader.GetOutput()
elif ".vtp" in filename:
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(filename)
reader.Update()
return reader.GetOutput()
else:
print("ERROR: Failed to read in polydata")
return sys.exit(os.EX_IOERR)
def ras_to_lps(point):
"""
This function..
:param point:
:return:
"""
surf_x, surf_y, surf_z = point
point = (-surf_x, -surf_y, surf_z) # must flip y axis to convert from VTK to ITK
return point
# Find the label of a given vtk point from a label map
def vtk_point_to_label(point, labelmap):
"""
This function..
:param point:
:param labelmap:
:return:
"""
point = ras_to_lps(point)
index = labelmap.TransformPhysicalPointToIndex(point)
x = int(index[0])
y = int(index[1])
z = int(index[2])
return labelmap.GetPixel(x, y, z)
def build_kd_tree(mesh):
"""
This function..
:param mesh:
:return:
"""
kd_tree = vtk.vtkKdTreePointLocator()
kd_tree.SetDataSet(mesh)
kd_tree.BuildLocator()
return kd_tree
def convert_fs_surface(in_surf, out_surf, to_scanner=True):
"""
This function..
:param in_surf:
:param out_surf:
:param to_scanner:
:return:
"""
if os.path.isfile(os.path.abspath(out_surf)):
return os.path.abspath(out_surf)
mris_convert = MRIsConvert()
mris_convert.inputs.in_file = in_surf
mris_convert.inputs.out_file = os.path.abspath(out_surf)
mris_convert.inputs.to_scanner = to_scanner
result = mris_convert.run()
return result.outputs.converted
def get_vtk_file_name(fs_file_name):
"""
This function..
:param fs_file_name:
:return:
"""
fs_dir, fs_basename = os.path.split(fs_file_name)
return os.path.join(fs_dir, fs_basename.replace(".", "_") + ".vtk")
def fs_to_vtk(fs_surface):
"""
This function..
:param fs_surface:
:return:
"""
output_file = get_vtk_file_name(fs_surface)
return convert_fs_surface(fs_surface, output_file)
def get_surf(surf_dir, hemisphere, surf):
"""
This function..
:param surf_dir:
:param hemisphere:
:param surf:
:return:
"""
return os.path.join(surf_dir, "{0}.{1}".format(hemisphere, surf))
def get_white(surf_dir, hemisphere):
"""
This function..
:param surf_dir:
:param hemisphere:
:return:
"""
return get_surf(surf_dir, hemisphere, "white")
def get_pial(surf_dir, hemisphere):
"""
This function..
:param surf_dir:
:param hemisphere:
:return:
"""
return get_surf(surf_dir, hemisphere, "pial")
def get_white_and_pial_fs_files(surf_dir, hemisphere):
"""
This function..
:param surf_dir:
:param hemisphere:
:return:
"""
fs_white = get_white(surf_dir, hemisphere)
fs_pial = get_pial(surf_dir, hemisphere)
return fs_white, fs_pial
def get_white_and_pial_vtk_files(surf_dir, hemisphere):
"""
This function..
:param surf_dir:
:param hemisphere:
:return:
"""
fs_white, fs_pial = get_white_and_pial_fs_files(surf_dir, hemisphere)
return fs_to_vtk(fs_white), fs_to_vtk(fs_pial)
def get_white_and_pial(surf_dir, hemisphere):
"""
This function..
:param surf_dir:
:param hemisphere:
:return:
"""
vtk_white, vtk_pial = get_white_and_pial_vtk_files(surf_dir, hemisphere)
white = read_poly_data(vtk_white)
pial = read_poly_data(vtk_pial)
return white, pial
def compute_thickness(wmP, kdTreegm, kdTreewm):
"""
This function..
:param wmP:
:param kdTreegm:
:param kdTreewm:
:return:
"""
# Find the closest point to the gray matter surface point
gmIndex = kdTreegm.FindClosestPoint(wmP)
gmP = kdTreegm.GetDataSet().GetPoint(gmIndex)
# compute the distance
# distance from wm point to gm point
dst1 = distance.euclidean(wmP, gmP)
wmIndex = kdTreewm.FindClosestPoint(gmP)
wmP2 = kdTreegm.GetDataSet().GetPoint(wmIndex)
# distnace from gm to closest wm point
dst2 = distance.euclidean(gmP, wmP2)
# average the two distances
thickness = (dst1 + dst2) / float(2)
return thickness
def create_thickness_array():
"""
This function..
:return:
"""
thicknesses = vtk.vtkFloatArray()
thicknesses.SetName("thickness")
return thicknesses
def calculate_distance(white, pial):
"""
This function..
:param white:
:param pial:
:return:
"""
# setup KdTrees for each surface
# this will help in finding the closest points
kd_tree_white = build_kd_tree(white)
kd_tree_pial = build_kd_tree(pial)
white_points = white.GetPoints()
white_count = white.GetNumberOfPoints()
white_point_data = white.GetPointData()
thicknesses = create_thickness_array()
for i in range(0, white_count):
white_matter_point = white_points.GetPoint(i)
# compute the thickness
thickness = compute_thickness(white_matter_point, kd_tree_pial, kd_tree_white)
thicknesses.InsertNextValue(thickness)
white_point_data.AddArray(thicknesses)
return white
def get_surf_dir(subjects_dir, subject_id):
"""
This function..
:param subjects_dir:
:param subject_id:
:return:
"""
return os.path.join(subjects_dir, subject_id, "surf")
def write_vtk_file(polydata, file_name):
"""
This function..
:param polydata:
:param file_name:
:return:
"""
writer = vtk.vtkPolyDataWriter()
writer.SetFileName(file_name)
writer.SetInputData(polydata)
writer.Update()
return os.path.abspath(writer.GetFileName())
def get_thickness_file(subjects_dir, subject_id, hemisphere):
"""
This function..
:param subjects_dir:
:param subjects_id:
:param hemisphere:
:return:
"""
surf_dir = get_surf_dir(subjects_dir, subject_id)
white, pial = get_white_and_pial(surf_dir, hemisphere)
thickness = calculate_distance(white, pial)
return write_vtk_file(
thickness, os.path.join(surf_dir, "{0}_thickness.vtk".format(hemisphere))
)
def get_thickness_files_for_both_hemispheres(subjects_dir, subject_id):
"""
This function..
:param subjects_dir:
:param subjects_id:
:return:
"""
lh_thickness = get_thickness_file(subjects_dir, subject_id, "lh")
rh_thickness = get_thickness_file(subjects_dir, subject_id, "rh")
return lh_thickness, rh_thickness
def masked_thickness_values(thickness_file, mask_image_file, array_index=None):
"""
This function..
:param thickness_file:
:param mask_file:
:param array_index:
:return:
"""
thickness = read_poly_data(thickness_file)
mask = sitk.ReadImage(mask_image_file)
inside_mask_values = list()
outside_mask_values = list()
thickness_point_data = thickness.GetPointData()
if not array_index:
# set the array index to the last array added to the poly data
array_index = thickness_point_data.GetNumberOfArrays() - 1
thickness_values = thickness.GetPointData().GetArray(array_index)
for point_index in range(thickness.GetNumberOfPoints()):
point = thickness.GetPoint(point_index)
mask_value = vtk_point_to_label(point, mask)
thickness_value = thickness_values.GetValue(point_index)
if mask_value == 1:
inside_mask_values.append(thickness_value)
else:
outside_mask_values.append(thickness_value)
return inside_mask_values, outside_mask_values
def calculate_stats(values):
"""
This function..
:param values:
:return:
"""
if values:
values_array = np.array(values)
return dict(
mean=values_array.mean(),
std=values_array.std(),
min=values_array.min(),
max=values_array.max(),
)
else:
return dict(mean=None, std=None, min=None, max=None)
def masked_thickness_stats(thickness_file, mask_image_file):
"""
This function..
:param thickness_file:
:param mask_image_file:
:return:
"""
inside_mask_values, outside_mask_values = masked_thickness_values(
thickness_file, mask_image_file
)
stats = dict()
stats["inside"] = calculate_stats(inside_mask_values)
stats["outside"] = calculate_stats(outside_mask_values)
return stats
def get_thickness_stats_for_both_hemispheres(subjects_dir, subject_id, mask_file):
"""
This function..
:param subject_id:
:param subjects_dir:
:param mask_file:
:return:
"""
stats = dict()
lh_thickness, rh_thickness = get_thickness_files_for_both_hemispheres(
subjects_dir, subject_id
)
stats["lh"] = masked_thickness_stats(lh_thickness, mask_file)
stats["rh"] = masked_thickness_stats(rh_thickness, mask_file)
return stats
def main():
"""
This function..
"""
os.environ[
"PATH"
] += ":/Shared/sinapse/sharedopt/apps/freesurfer/Darwin/x86_64/6.0-beta/20150915/bin/"
mask_file = "/Shared/sinapse/CACHE/20160712_AtrophySimulation_Results/2559/58661/simulation_1/atrophy_regions.nii.gz"
subj_dir = "/Shared/sinapse/CACHE/20160713_AtrophySimulation_BAW_base_Results/PHD_024/2559_58661/79/"
print(get_thickness_stats_for_both_hemispheres(subj_dir, "FreeSurfer", mask_file))
print("done")
if __name__ == "__main__":
main()
| 23.909308 | 121 | 0.669595 | import vtk
import SimpleITK as sitk
import numpy as np
from scipy.spatial import distance
from nipype.interfaces.freesurfer import MRIsConvert
import os
import sys
def read_poly_data(filename):
if ".vtk" in filename:
reader = vtk.vtkPolyDataReader()
reader.SetFileName(filename)
reader.Update()
return reader.GetOutput()
elif ".vtp" in filename:
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(filename)
reader.Update()
return reader.GetOutput()
else:
print("ERROR: Failed to read in polydata")
return sys.exit(os.EX_IOERR)
def ras_to_lps(point):
surf_x, surf_y, surf_z = point
point = (-surf_x, -surf_y, surf_z)
return point
def vtk_point_to_label(point, labelmap):
point = ras_to_lps(point)
index = labelmap.TransformPhysicalPointToIndex(point)
x = int(index[0])
y = int(index[1])
z = int(index[2])
return labelmap.GetPixel(x, y, z)
def build_kd_tree(mesh):
kd_tree = vtk.vtkKdTreePointLocator()
kd_tree.SetDataSet(mesh)
kd_tree.BuildLocator()
return kd_tree
def convert_fs_surface(in_surf, out_surf, to_scanner=True):
if os.path.isfile(os.path.abspath(out_surf)):
return os.path.abspath(out_surf)
mris_convert = MRIsConvert()
mris_convert.inputs.in_file = in_surf
mris_convert.inputs.out_file = os.path.abspath(out_surf)
mris_convert.inputs.to_scanner = to_scanner
result = mris_convert.run()
return result.outputs.converted
def get_vtk_file_name(fs_file_name):
fs_dir, fs_basename = os.path.split(fs_file_name)
return os.path.join(fs_dir, fs_basename.replace(".", "_") + ".vtk")
def fs_to_vtk(fs_surface):
output_file = get_vtk_file_name(fs_surface)
return convert_fs_surface(fs_surface, output_file)
def get_surf(surf_dir, hemisphere, surf):
return os.path.join(surf_dir, "{0}.{1}".format(hemisphere, surf))
def get_white(surf_dir, hemisphere):
return get_surf(surf_dir, hemisphere, "white")
def get_pial(surf_dir, hemisphere):
return get_surf(surf_dir, hemisphere, "pial")
def get_white_and_pial_fs_files(surf_dir, hemisphere):
fs_white = get_white(surf_dir, hemisphere)
fs_pial = get_pial(surf_dir, hemisphere)
return fs_white, fs_pial
def get_white_and_pial_vtk_files(surf_dir, hemisphere):
fs_white, fs_pial = get_white_and_pial_fs_files(surf_dir, hemisphere)
return fs_to_vtk(fs_white), fs_to_vtk(fs_pial)
def get_white_and_pial(surf_dir, hemisphere):
vtk_white, vtk_pial = get_white_and_pial_vtk_files(surf_dir, hemisphere)
white = read_poly_data(vtk_white)
pial = read_poly_data(vtk_pial)
return white, pial
def compute_thickness(wmP, kdTreegm, kdTreewm):
gmIndex = kdTreegm.FindClosestPoint(wmP)
gmP = kdTreegm.GetDataSet().GetPoint(gmIndex)
dst1 = distance.euclidean(wmP, gmP)
wmIndex = kdTreewm.FindClosestPoint(gmP)
wmP2 = kdTreegm.GetDataSet().GetPoint(wmIndex)
dst2 = distance.euclidean(gmP, wmP2)
thickness = (dst1 + dst2) / float(2)
return thickness
def create_thickness_array():
thicknesses = vtk.vtkFloatArray()
thicknesses.SetName("thickness")
return thicknesses
def calculate_distance(white, pial):
kd_tree_white = build_kd_tree(white)
kd_tree_pial = build_kd_tree(pial)
white_points = white.GetPoints()
white_count = white.GetNumberOfPoints()
white_point_data = white.GetPointData()
thicknesses = create_thickness_array()
for i in range(0, white_count):
white_matter_point = white_points.GetPoint(i)
thickness = compute_thickness(white_matter_point, kd_tree_pial, kd_tree_white)
thicknesses.InsertNextValue(thickness)
white_point_data.AddArray(thicknesses)
return white
def get_surf_dir(subjects_dir, subject_id):
return os.path.join(subjects_dir, subject_id, "surf")
def write_vtk_file(polydata, file_name):
writer = vtk.vtkPolyDataWriter()
writer.SetFileName(file_name)
writer.SetInputData(polydata)
writer.Update()
return os.path.abspath(writer.GetFileName())
def get_thickness_file(subjects_dir, subject_id, hemisphere):
surf_dir = get_surf_dir(subjects_dir, subject_id)
white, pial = get_white_and_pial(surf_dir, hemisphere)
thickness = calculate_distance(white, pial)
return write_vtk_file(
thickness, os.path.join(surf_dir, "{0}_thickness.vtk".format(hemisphere))
)
def get_thickness_files_for_both_hemispheres(subjects_dir, subject_id):
lh_thickness = get_thickness_file(subjects_dir, subject_id, "lh")
rh_thickness = get_thickness_file(subjects_dir, subject_id, "rh")
return lh_thickness, rh_thickness
def masked_thickness_values(thickness_file, mask_image_file, array_index=None):
thickness = read_poly_data(thickness_file)
mask = sitk.ReadImage(mask_image_file)
inside_mask_values = list()
outside_mask_values = list()
thickness_point_data = thickness.GetPointData()
if not array_index:
array_index = thickness_point_data.GetNumberOfArrays() - 1
thickness_values = thickness.GetPointData().GetArray(array_index)
for point_index in range(thickness.GetNumberOfPoints()):
point = thickness.GetPoint(point_index)
mask_value = vtk_point_to_label(point, mask)
thickness_value = thickness_values.GetValue(point_index)
if mask_value == 1:
inside_mask_values.append(thickness_value)
else:
outside_mask_values.append(thickness_value)
return inside_mask_values, outside_mask_values
def calculate_stats(values):
if values:
values_array = np.array(values)
return dict(
mean=values_array.mean(),
std=values_array.std(),
min=values_array.min(),
max=values_array.max(),
)
else:
return dict(mean=None, std=None, min=None, max=None)
def masked_thickness_stats(thickness_file, mask_image_file):
inside_mask_values, outside_mask_values = masked_thickness_values(
thickness_file, mask_image_file
)
stats = dict()
stats["inside"] = calculate_stats(inside_mask_values)
stats["outside"] = calculate_stats(outside_mask_values)
return stats
def get_thickness_stats_for_both_hemispheres(subjects_dir, subject_id, mask_file):
stats = dict()
lh_thickness, rh_thickness = get_thickness_files_for_both_hemispheres(
subjects_dir, subject_id
)
stats["lh"] = masked_thickness_stats(lh_thickness, mask_file)
stats["rh"] = masked_thickness_stats(rh_thickness, mask_file)
return stats
def main():
os.environ[
"PATH"
] += ":/Shared/sinapse/sharedopt/apps/freesurfer/Darwin/x86_64/6.0-beta/20150915/bin/"
mask_file = "/Shared/sinapse/CACHE/20160712_AtrophySimulation_Results/2559/58661/simulation_1/atrophy_regions.nii.gz"
subj_dir = "/Shared/sinapse/CACHE/20160713_AtrophySimulation_BAW_base_Results/PHD_024/2559_58661/79/"
print(get_thickness_stats_for_both_hemispheres(subj_dir, "FreeSurfer", mask_file))
print("done")
if __name__ == "__main__":
main()
| true | true |
f7fafbee44a5e3be184f9e36104920b5900173a0 | 2,122 | py | Python | isiscb/isisdata/operations.py | bgopalachary/IsisCB | c28e3f504eea60ebeff38318d8bb2071abb28ebb | [
"MIT"
] | 4 | 2016-01-25T20:35:33.000Z | 2020-04-07T15:39:52.000Z | isiscb/isisdata/operations.py | bgopalachary/IsisCB | c28e3f504eea60ebeff38318d8bb2071abb28ebb | [
"MIT"
] | 41 | 2015-08-19T17:34:41.000Z | 2022-03-11T23:19:01.000Z | isiscb/isisdata/operations.py | bgopalachary/IsisCB | c28e3f504eea60ebeff38318d8bb2071abb28ebb | [
"MIT"
] | 2 | 2020-11-25T20:18:18.000Z | 2021-06-24T15:15:41.000Z | from __future__ import unicode_literals
from builtins import filter
from django.db.models import Q
from isisdata.models import *
def filter_queryset(user, queryset, do=CRUDRule.VIEW):
"""
Limit a :class:`.QuerySet` to what ``user`` has permission to ``do``.
"""
roles = user.isiscbrole_set.all()
if user.is_superuser: # Superusers are super users.
return queryset
_not_null = lambda obj: obj is not None and obj != ''
# Identify roles that explicitly grant or implicitly deny permission to
# ``do``.
do_pks = roles.filter(Q(accessrule__crudrule__crud_action=do)).values_list('id', flat=True)
dont_pks = roles.filter(~Q(accessrule__crudrule__crud_action=do)).values_list('id', flat=True)
# We need a separate query here, since those above are inner joins and
# we want access to adjacent accessrule entries rooted in the same role.
include = list(roles.filter(pk__in=do_pks, accessrule__datasetrule__isnull=False).values_list('accessrule__datasetrule__dataset', flat=True))
exclude = list(roles.filter(pk__in=dont_pks, accessrule__datasetrule__isnull=False).values_list('accessrule__datasetrule__dataset', flat=True))
# Some citations and authorities are not assigned to a dataset. So if the
# dataset is not set, then the rule applies to records without a dataset.
include_isnull = '' in include or None in include
exclude_isnull = '' in exclude or None in exclude
# We can't use null values when filtering, below.
include = list(filter(_not_null, include))
exclude = list(filter(_not_null, exclude))
if exclude or exclude_isnull:
query = Q(belongs_to__in=exclude)
if exclude_isnull:
query |= Q(belongs_to__isnull=True)
queryset = queryset.exclude(query)
# If ``include`` is empty, this will have the effect of excluding all
# records, unless ``include_isnull`` is True and the record has no dataset.
query = Q(belongs_to__in=include)
if include_isnull:
query |= Q(belongs_to__isnull=True)
queryset = queryset.filter(query)
return queryset
| 41.607843 | 147 | 0.717719 | from __future__ import unicode_literals
from builtins import filter
from django.db.models import Q
from isisdata.models import *
def filter_queryset(user, queryset, do=CRUDRule.VIEW):
roles = user.isiscbrole_set.all()
if user.is_superuser:
return queryset
_not_null = lambda obj: obj is not None and obj != ''
do_pks = roles.filter(Q(accessrule__crudrule__crud_action=do)).values_list('id', flat=True)
dont_pks = roles.filter(~Q(accessrule__crudrule__crud_action=do)).values_list('id', flat=True)
include = list(roles.filter(pk__in=do_pks, accessrule__datasetrule__isnull=False).values_list('accessrule__datasetrule__dataset', flat=True))
exclude = list(roles.filter(pk__in=dont_pks, accessrule__datasetrule__isnull=False).values_list('accessrule__datasetrule__dataset', flat=True))
include_isnull = '' in include or None in include
exclude_isnull = '' in exclude or None in exclude
include = list(filter(_not_null, include))
exclude = list(filter(_not_null, exclude))
if exclude or exclude_isnull:
query = Q(belongs_to__in=exclude)
if exclude_isnull:
query |= Q(belongs_to__isnull=True)
queryset = queryset.exclude(query)
# If ``include`` is empty, this will have the effect of excluding all
# records, unless ``include_isnull`` is True and the record has no dataset.
query = Q(belongs_to__in=include)
if include_isnull:
query |= Q(belongs_to__isnull=True)
queryset = queryset.filter(query)
return queryset
| true | true |
f7fafc3eca2a0d5f684ce78dbf8d565f8e0da8a0 | 787 | py | Python | craw/modules/trail/trails/feeds/urlvir.py | xuluhang/DomainBlockList | e9e69138ffdba6a73741fe204306f1f0b66eff19 | [
"MIT"
] | 19 | 2019-11-25T09:02:15.000Z | 2021-07-24T12:05:28.000Z | craw/modules/trail/trails/feeds/urlvir.py | xuluhang/DomainBlockList | e9e69138ffdba6a73741fe204306f1f0b66eff19 | [
"MIT"
] | 1 | 2019-11-25T09:06:08.000Z | 2019-11-25T09:06:08.000Z | craw/modules/trail/trails/feeds/urlvir.py | xuluhang/DomainBlockList | e9e69138ffdba6a73741fe204306f1f0b66eff19 | [
"MIT"
] | 10 | 2019-11-26T02:42:02.000Z | 2021-08-28T07:16:08.000Z | #!/usr/bin/env python2
"""
Copyright (c) 2014-2019 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
from craw.modules.trail.plugins.util import wget_content
__url__ = "http://www.urlvir.com/export-hosts/"
__check__ = "Updated on"
__info__ = "malware"
__reference__ = "urlvir.com"
maintainer_url = __reference__
maintainer = "urlvir"
list_source_url = __url__
category = __info__
def fetch():
retval = {}
content = wget_content(__url__)
if __check__ in content:
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#') or '.' not in line:
continue
retval[line.strip()] = (__info__, __reference__)
return retval
| 23.848485 | 83 | 0.66709 |
from craw.modules.trail.plugins.util import wget_content
__url__ = "http://www.urlvir.com/export-hosts/"
__check__ = "Updated on"
__info__ = "malware"
__reference__ = "urlvir.com"
maintainer_url = __reference__
maintainer = "urlvir"
list_source_url = __url__
category = __info__
def fetch():
retval = {}
content = wget_content(__url__)
if __check__ in content:
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#') or '.' not in line:
continue
retval[line.strip()] = (__info__, __reference__)
return retval
| true | true |
f7fafc50779885f0c4bda2ae7f84ef26ecdce3aa | 2,298 | py | Python | dummy.py | ShashkovS/poddavki_solver | 3062442fdbf65a5e0abe40bb3088d1d5e5d84bd6 | [
"MIT"
] | 5 | 2017-12-21T13:06:44.000Z | 2018-04-16T19:12:28.000Z | dummy.py | ShashkovS/poddavki_solver | 3062442fdbf65a5e0abe40bb3088d1d5e5d84bd6 | [
"MIT"
] | null | null | null | dummy.py | ShashkovS/poddavki_solver | 3062442fdbf65a5e0abe40bb3088d1d5e5d84bd6 | [
"MIT"
] | null | null | null | from copy import deepcopy
EMPTY, WHITE, BLACK = '.', 'W', 'B'
def free_steps(myboard, fig, need_coordinates=False):
board = deepcopy(myboard)
nextlin, free_steps = (1 if fig == 'B' else -1), []
for lin in range(0, 8):
for col in range(int(lin % 2 == 0), 8, 2):
for i in (1, -1):
try:
if board[lin][col] == fig:
if board[lin + nextlin][col + i] not in (fig, '*', '.') and board[lin + nextlin * 2][col + i * 2] == '.':
board[lin + nextlin * 2][col + i * 2] = '*'
free_steps.append([lin + nextlin * 2, col + i * 2])
elif board[lin - nextlin][col + i] not in (fig, '*', '.') and board[lin - nextlin * 2][col + i * 2] == '.':
board[lin - nextlin * 2][col + i * 2] = '*'
free_steps.append([lin - nextlin * 2, col + i * 2])
elif board[lin + nextlin][col + i] == '.' and len(free_steps) == 0:
board[lin + nextlin][col + i] = '*'
free_steps.append([lin + nextlin, col + i])
except:
None
if need_coordinates:
return board, free_steps
else:
return board
def initial_board():
board = [[EMPTY for i in range(8)] for j in range(8)]
black_cells = [[0, 1], [0, 3], [0, 5], [0, 7],
[1, 0], [1, 2], [1, 4], [1, 6],
[2, 1], [2, 3], [2, 5], [2, 7]]
white_cells = [[7, 0], [7, 2], [7, 4], [7, 6],
[6, 1], [6, 3], [6, 5], [6, 7],
[5, 0], [5, 2], [5, 4], [5, 6]]
for bc in black_cells:
board[bc[0]][bc[1]] = BLACK
for wc in white_cells:
board[wc[0]][wc[1]] = WHITE
return board
def print_board(board):
rep = ' ' + ' '.join(list(map(str, [i for i in range(1, 9)]))) + '\n'
for i in range(1, 9):
rep += '\n' + str(i) + ' '
rep += ' '.join(list(map(str, board[i - 1])))
return rep
# TEST:
board = initial_board()
board[3][2] = 'W'
board[4][3] = 'B'
board[6][5] = 'B'
board[7][4] = '.'
newboard, steps = free_steps(board, 'W', True)
print(print_board(newboard))
print(steps)
# ALL WORK! LIFE IS GOOD! | 37.064516 | 131 | 0.437337 | from copy import deepcopy
EMPTY, WHITE, BLACK = '.', 'W', 'B'
def free_steps(myboard, fig, need_coordinates=False):
board = deepcopy(myboard)
nextlin, free_steps = (1 if fig == 'B' else -1), []
for lin in range(0, 8):
for col in range(int(lin % 2 == 0), 8, 2):
for i in (1, -1):
try:
if board[lin][col] == fig:
if board[lin + nextlin][col + i] not in (fig, '*', '.') and board[lin + nextlin * 2][col + i * 2] == '.':
board[lin + nextlin * 2][col + i * 2] = '*'
free_steps.append([lin + nextlin * 2, col + i * 2])
elif board[lin - nextlin][col + i] not in (fig, '*', '.') and board[lin - nextlin * 2][col + i * 2] == '.':
board[lin - nextlin * 2][col + i * 2] = '*'
free_steps.append([lin - nextlin * 2, col + i * 2])
elif board[lin + nextlin][col + i] == '.' and len(free_steps) == 0:
board[lin + nextlin][col + i] = '*'
free_steps.append([lin + nextlin, col + i])
except:
None
if need_coordinates:
return board, free_steps
else:
return board
def initial_board():
board = [[EMPTY for i in range(8)] for j in range(8)]
black_cells = [[0, 1], [0, 3], [0, 5], [0, 7],
[1, 0], [1, 2], [1, 4], [1, 6],
[2, 1], [2, 3], [2, 5], [2, 7]]
white_cells = [[7, 0], [7, 2], [7, 4], [7, 6],
[6, 1], [6, 3], [6, 5], [6, 7],
[5, 0], [5, 2], [5, 4], [5, 6]]
for bc in black_cells:
board[bc[0]][bc[1]] = BLACK
for wc in white_cells:
board[wc[0]][wc[1]] = WHITE
return board
def print_board(board):
rep = ' ' + ' '.join(list(map(str, [i for i in range(1, 9)]))) + '\n'
for i in range(1, 9):
rep += '\n' + str(i) + ' '
rep += ' '.join(list(map(str, board[i - 1])))
return rep
board = initial_board()
board[3][2] = 'W'
board[4][3] = 'B'
board[6][5] = 'B'
board[7][4] = '.'
newboard, steps = free_steps(board, 'W', True)
print(print_board(newboard))
print(steps)
| true | true |
f7fafd7987fff653ffb6447991b1b1bf940fc87e | 257 | py | Python | app/recipe/urls.py | Chahat001/Recipe-APP-API | 62123248256bd287f35884520efe720baa56e7f3 | [
"MIT"
] | null | null | null | app/recipe/urls.py | Chahat001/Recipe-APP-API | 62123248256bd287f35884520efe720baa56e7f3 | [
"MIT"
] | null | null | null | app/recipe/urls.py | Chahat001/Recipe-APP-API | 62123248256bd287f35884520efe720baa56e7f3 | [
"MIT"
] | null | null | null | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from recipe import views
router = DefaultRouter()
router.register('tags', views.TagViewSet)
app_name = 'recipe'
urlpatterns = [
path('', include(router.urls))
]
| 17.133333 | 48 | 0.754864 | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from recipe import views
router = DefaultRouter()
router.register('tags', views.TagViewSet)
app_name = 'recipe'
urlpatterns = [
path('', include(router.urls))
]
| true | true |
f7fafd8fa883aa04cf113a1397eddc0257eddf91 | 5,284 | py | Python | ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapred_service_check.py | mkozinenko/ambari | 9cfe9559420a1f4af89a2d645af84b1ab20d6737 | [
"Apache-2.0",
"MIT"
] | 1 | 2015-05-04T12:19:05.000Z | 2015-05-04T12:19:05.000Z | ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapred_service_check.py | mkozinenko/ambari | 9cfe9559420a1f4af89a2d645af84b1ab20d6737 | [
"Apache-2.0",
"MIT"
] | null | null | null | ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapred_service_check.py | mkozinenko/ambari | 9cfe9559420a1f4af89a2d645af84b1ab20d6737 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-01-07T08:55:01.000Z | 2021-01-07T08:55:01.000Z | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import sys
from resource_management import *
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
class MapReduce2ServiceCheck(Script):
def service_check(self, env):
pass
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class MapReduce2ServiceCheckWindows(MapReduce2ServiceCheck):
def service_check(self, env):
import params
env.set_params(params)
component_type = 'hs'
if params.hadoop_ssl_enabled:
component_address = params.hs_webui_address
else:
component_address = params.hs_webui_address
validateStatusFileName = "validateYarnComponentStatusWindows.py"
validateStatusFilePath = os.path.join(os.path.dirname(params.hadoop_home), "temp", validateStatusFileName)
python_executable = sys.executable
validateStatusCmd = "{0} {1} {2} -p {3} -s {4}".format(
python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled)
if params.security_enabled:
kinit_cmd = "{0} -kt {1} {2};".format(params.kinit_path_local, params.smoke_user_keytab, params.smokeuser)
smoke_cmd = kinit_cmd + validateStatusCmd
else:
smoke_cmd = validateStatusCmd
File(validateStatusFilePath,
content=StaticFile(validateStatusFileName)
)
Execute(smoke_cmd,
tries=3,
try_sleep=5,
logoutput=True
)
# hadoop_exe = os.path.join(params.hadoop_home, "bin", "hadoop")
#
# tested_file = os.path.join(params.hadoop_home, "bin", "hadoop.cmd")
# jar_path = os.path.join(params.hadoop_mapred2_jar_location, params.hadoopMapredExamplesJarName)
# input_file = format("/user/hadoop/mapredsmokeinput")
# output_file = format("/user/hadoop/mapredsmokeoutput")
# cleanup_cmd = format("cmd /C {hadoop_exe} fs -rm -r -f {output_file} {input_file}")
# create_file_cmd = format("cmd /C {hadoop_exe} fs -put {tested_file} {input_file}")
# run_wordcount_job = format("cmd /C {hadoop_exe} jar {jar_path} wordcount {input_file} {output_file}")
# test_cmd = format("cmd /C {hadoop_exe} fs -test -e {output_file}")
#
# if params.security_enabled:
# kinit_cmd = "{0} -kt {1} {2};".format(kinit_path_local, smoke_user_keytab, smokeuser)
# Execute(kinit_cmd)
#
# Execute(cleanup_cmd,
# tries=1,
# try_sleep=5,
# logoutput=True,
# user=params.hdfs_user
# )
#
# Execute(create_file_cmd,
# tries=1,
# try_sleep=5,
# logoutput=True,
# user=params.hdfs_user
# )
#
# Execute(run_wordcount_job,
# tries=1,
# try_sleep=5,
# logoutput=True,
# user=params.hdfs_user
# )
#
# Execute(test_cmd,
# logoutput=True,
# user=params.hdfs_user
# )
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class MapReduce2ServiceCheckDefault(MapReduce2ServiceCheck):
def service_check(self, env):
import params
env.set_params(params)
jar_path = format("{hadoop_mapred2_jar_location}/{hadoopMapredExamplesJarName}")
input_file = format("/user/{smokeuser}/mapredsmokeinput")
output_file = format("/user/{smokeuser}/mapredsmokeoutput")
test_cmd = format("fs -test -e {output_file}")
run_wordcount_job = format("jar {jar_path} wordcount {input_file} {output_file}")
params.HdfsResource(output_file,
action = "delete_on_execute",
type = "directory",
)
params.HdfsResource(input_file,
action = "create_on_execute",
type = "file",
source = "/etc/passwd",
)
params.HdfsResource(None, action="execute")
if params.security_enabled:
kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
Execute(kinit_cmd,
user=params.smokeuser
)
ExecuteHadoop(run_wordcount_job,
tries=1,
try_sleep=5,
user=params.smokeuser,
bin_dir=params.execute_path,
conf_dir=params.hadoop_conf_dir,
logoutput=True
)
ExecuteHadoop(test_cmd,
user=params.smokeuser,
bin_dir=params.execute_path,
conf_dir=params.hadoop_conf_dir
)
if __name__ == "__main__":
MapReduce2ServiceCheck().execute()
| 33.232704 | 112 | 0.661998 |
import sys
from resource_management import *
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
class MapReduce2ServiceCheck(Script):
def service_check(self, env):
pass
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class MapReduce2ServiceCheckWindows(MapReduce2ServiceCheck):
def service_check(self, env):
import params
env.set_params(params)
component_type = 'hs'
if params.hadoop_ssl_enabled:
component_address = params.hs_webui_address
else:
component_address = params.hs_webui_address
validateStatusFileName = "validateYarnComponentStatusWindows.py"
validateStatusFilePath = os.path.join(os.path.dirname(params.hadoop_home), "temp", validateStatusFileName)
python_executable = sys.executable
validateStatusCmd = "{0} {1} {2} -p {3} -s {4}".format(
python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled)
if params.security_enabled:
kinit_cmd = "{0} -kt {1} {2};".format(params.kinit_path_local, params.smoke_user_keytab, params.smokeuser)
smoke_cmd = kinit_cmd + validateStatusCmd
else:
smoke_cmd = validateStatusCmd
File(validateStatusFilePath,
content=StaticFile(validateStatusFileName)
)
Execute(smoke_cmd,
tries=3,
try_sleep=5,
logoutput=True
)
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class MapReduce2ServiceCheckDefault(MapReduce2ServiceCheck):
def service_check(self, env):
import params
env.set_params(params)
jar_path = format("{hadoop_mapred2_jar_location}/{hadoopMapredExamplesJarName}")
input_file = format("/user/{smokeuser}/mapredsmokeinput")
output_file = format("/user/{smokeuser}/mapredsmokeoutput")
test_cmd = format("fs -test -e {output_file}")
run_wordcount_job = format("jar {jar_path} wordcount {input_file} {output_file}")
params.HdfsResource(output_file,
action = "delete_on_execute",
type = "directory",
)
params.HdfsResource(input_file,
action = "create_on_execute",
type = "file",
source = "/etc/passwd",
)
params.HdfsResource(None, action="execute")
if params.security_enabled:
kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
Execute(kinit_cmd,
user=params.smokeuser
)
ExecuteHadoop(run_wordcount_job,
tries=1,
try_sleep=5,
user=params.smokeuser,
bin_dir=params.execute_path,
conf_dir=params.hadoop_conf_dir,
logoutput=True
)
ExecuteHadoop(test_cmd,
user=params.smokeuser,
bin_dir=params.execute_path,
conf_dir=params.hadoop_conf_dir
)
if __name__ == "__main__":
MapReduce2ServiceCheck().execute()
| true | true |
f7fafeb94afdcb33f5ad2d84cc3435cfee3e418b | 3,432 | py | Python | mailpy/response.py | dn0/mailpy | db382d9d67a34444b8492bb0766b7320e46a0ac0 | [
"BSD-3-Clause"
] | 1 | 2015-10-05T12:01:19.000Z | 2015-10-05T12:01:19.000Z | mailpy/response.py | dn0/mailpy | db382d9d67a34444b8492bb0766b7320e46a0ac0 | [
"BSD-3-Clause"
] | 3 | 2015-08-20T08:47:08.000Z | 2015-10-17T13:52:57.000Z | mailpy/response.py | dn0/mailpy | db382d9d67a34444b8492bb0766b7320e46a0ac0 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from email.message import Message
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from .utils import send_mail, decode_header
from .request import MailRequest
__all__ = ('MailResponse', 'TextMailResponse', 'HtmlMailResponse')
class MailResponse(object):
"""
Mail response (message wrapper).
"""
status_code = 200
def __init__(self, request, message, sender=None, recipients=None, subject=None, status_code=None):
assert isinstance(request, MailRequest), 'request must be an instance of %s' % MailRequest
assert isinstance(message, Message), 'message must be an instance of %s' % Message
if sender is None:
sender = request.recipient
if recipients is None:
recipients = [request.sender]
if subject is None:
subject = 'Re: ' + request.get('Subject', '') # Use original (encoded) header
assert isinstance(recipients, (tuple, list)), 'recipients must a tuple or list'
self._sent = False
self._subject = subject
self.request = request
self.message = message
self.status_code = status_code or self.status_code
# SMTP envelope headers
self.sender = sender
self.recipients = recipients
# Message headers
self._add_message_headers()
def _add_message_headers(self):
"""Add basic message headers and custom mailpy headers"""
message = self.message
# Message headers
if 'Subject' not in message:
message['Subject'] = self._subject
if 'From' not in message:
message['From'] = self.sender
if 'To' not in message:
message['To'] = ','.join(self.recipients)
message['In-Reply-To'] = self.request.message_id
message['X-mailpy-resource'] = self.request.resource
message['X-mailpy-method'] = self.request.method
message['X-mailpy-status-code'] = str(self.status_code)
def __repr__(self):
return '%s(status=%s, from="%s", to="%s", subject="%s")' % (self.__class__.__name__, self.status_code,
self.sender, self.recipient,
self.message.get('Subject', ''))
def __str__(self):
return '%s' % self.message
@property
def recipient(self):
return ','.join(self.recipients)
@property
def subject(self):
return decode_header(self.message.get('Subject', ''))
def send(self, sendmail_fun=send_mail):
self._sent = True
return sendmail_fun(self.sender, self.recipients, self.message.as_string())
class TextMailResponse(MailResponse):
"""
Text mail response.
"""
def __init__(self, request, text, charset='utf-8', **kwargs):
super(TextMailResponse, self).__init__(request, MIMEText(text, _charset=charset), **kwargs)
class HtmlMailResponse(MailResponse):
"""
HTML mail response.
"""
def __init__(self, request, html, text=None, charset='utf-8', **kwargs):
msg = MIMEMultipart('alternative')
if text is not None:
msg.attach(MIMEText(text, 'text', _charset=charset))
msg.attach(MIMEText(html, 'html', _charset=charset))
super(HtmlMailResponse, self).__init__(request, msg, **kwargs)
| 32.685714 | 110 | 0.615967 |
from email.message import Message
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from .utils import send_mail, decode_header
from .request import MailRequest
__all__ = ('MailResponse', 'TextMailResponse', 'HtmlMailResponse')
class MailResponse(object):
status_code = 200
def __init__(self, request, message, sender=None, recipients=None, subject=None, status_code=None):
assert isinstance(request, MailRequest), 'request must be an instance of %s' % MailRequest
assert isinstance(message, Message), 'message must be an instance of %s' % Message
if sender is None:
sender = request.recipient
if recipients is None:
recipients = [request.sender]
if subject is None:
subject = 'Re: ' + request.get('Subject', '')
assert isinstance(recipients, (tuple, list)), 'recipients must a tuple or list'
self._sent = False
self._subject = subject
self.request = request
self.message = message
self.status_code = status_code or self.status_code
self.sender = sender
self.recipients = recipients
self._add_message_headers()
def _add_message_headers(self):
message = self.message
if 'Subject' not in message:
message['Subject'] = self._subject
if 'From' not in message:
message['From'] = self.sender
if 'To' not in message:
message['To'] = ','.join(self.recipients)
message['In-Reply-To'] = self.request.message_id
message['X-mailpy-resource'] = self.request.resource
message['X-mailpy-method'] = self.request.method
message['X-mailpy-status-code'] = str(self.status_code)
def __repr__(self):
return '%s(status=%s, from="%s", to="%s", subject="%s")' % (self.__class__.__name__, self.status_code,
self.sender, self.recipient,
self.message.get('Subject', ''))
def __str__(self):
return '%s' % self.message
@property
def recipient(self):
return ','.join(self.recipients)
@property
def subject(self):
return decode_header(self.message.get('Subject', ''))
def send(self, sendmail_fun=send_mail):
self._sent = True
return sendmail_fun(self.sender, self.recipients, self.message.as_string())
class TextMailResponse(MailResponse):
def __init__(self, request, text, charset='utf-8', **kwargs):
super(TextMailResponse, self).__init__(request, MIMEText(text, _charset=charset), **kwargs)
class HtmlMailResponse(MailResponse):
def __init__(self, request, html, text=None, charset='utf-8', **kwargs):
msg = MIMEMultipart('alternative')
if text is not None:
msg.attach(MIMEText(text, 'text', _charset=charset))
msg.attach(MIMEText(html, 'html', _charset=charset))
super(HtmlMailResponse, self).__init__(request, msg, **kwargs)
| true | true |
f7faff234c8be6121743b94002053f32c4faf470 | 1,174 | py | Python | tests/real_tests/test_real_sqlite_cache.py | guionardo/py-cache-guiosoft | 4f9b6805d4b20c2583648bb9b8c43c8cfa73ebdf | [
"MIT"
] | null | null | null | tests/real_tests/test_real_sqlite_cache.py | guionardo/py-cache-guiosoft | 4f9b6805d4b20c2583648bb9b8c43c8cfa73ebdf | [
"MIT"
] | 11 | 2020-04-03T17:18:44.000Z | 2021-02-10T20:02:53.000Z | tests/real_tests/test_real_sqlite_cache.py | guionardo/py-cache-guiosoft | 4f9b6805d4b20c2583648bb9b8c43c8cfa73ebdf | [
"MIT"
] | null | null | null | import os
import unittest
from time import sleep
from cache_gs import CacheGS
from cache_gs.utils.filesystem import remove_tree
class TestRealSQLiteCache(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cache_file = '.cache'
if not os.path.isdir(cls.cache_file):
os.mkdir(cls.cache_file)
cls.cache = CacheGS('sqlite://' + cls.cache_file)
cls.cache._cache.conn.set_trace_callback(print)
cls.cache.set_value('sec', 'purged', '1234', 0.001)
sleep(1)
return super().setUpClass()
@classmethod
def tearDownClass(cls):
del (cls.cache)
remove_tree(cls.cache_file)
def test_init(self):
self.assertIsInstance(self.cache, CacheGS)
def test_get_set_delete(self):
self.assertTrue(self.cache.set_value(
'sec', 'key', '1234', ttl=100000))
value = self.cache.get_value('sec', 'key')
self.assertEqual(value, '1234')
self.assertTrue(self.cache.delete_value('sec', 'key'))
def test_z_purge(self):
self.assertGreater(self.cache.purge_expired(), 0)
self.assertEqual(self.cache.purge_expired(), 0)
| 28.634146 | 62 | 0.651618 | import os
import unittest
from time import sleep
from cache_gs import CacheGS
from cache_gs.utils.filesystem import remove_tree
class TestRealSQLiteCache(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cache_file = '.cache'
if not os.path.isdir(cls.cache_file):
os.mkdir(cls.cache_file)
cls.cache = CacheGS('sqlite://' + cls.cache_file)
cls.cache._cache.conn.set_trace_callback(print)
cls.cache.set_value('sec', 'purged', '1234', 0.001)
sleep(1)
return super().setUpClass()
@classmethod
def tearDownClass(cls):
del (cls.cache)
remove_tree(cls.cache_file)
def test_init(self):
self.assertIsInstance(self.cache, CacheGS)
def test_get_set_delete(self):
self.assertTrue(self.cache.set_value(
'sec', 'key', '1234', ttl=100000))
value = self.cache.get_value('sec', 'key')
self.assertEqual(value, '1234')
self.assertTrue(self.cache.delete_value('sec', 'key'))
def test_z_purge(self):
self.assertGreater(self.cache.purge_expired(), 0)
self.assertEqual(self.cache.purge_expired(), 0)
| true | true |
f7faff737b0487d907ec123f283b8b9be0268457 | 716 | py | Python | QueueDB/extmodels/GoogleDrive.py | liyao001/BioQueue | 2b2c9f023b988fd926a037eb4755f639632b2991 | [
"Apache-2.0"
] | 33 | 2017-03-12T16:26:45.000Z | 2021-04-30T05:37:35.000Z | QueueDB/extmodels/GoogleDrive.py | liyao001/BioQueue | 2b2c9f023b988fd926a037eb4755f639632b2991 | [
"Apache-2.0"
] | 6 | 2017-04-21T08:44:47.000Z | 2018-11-11T16:20:22.000Z | QueueDB/extmodels/GoogleDrive.py | liyao001/BioQueue | 2b2c9f023b988fd926a037eb4755f639632b2991 | [
"Apache-2.0"
] | 13 | 2017-03-12T16:26:56.000Z | 2020-04-20T05:35:00.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Li Yao
# @Date: 12/29/20
#
# BioQueue is free for personal use and is licensed under
# the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
from django.db import models
from QueueDB.models import _OwnerModel
class GoogleDriveConnection(_OwnerModel):
credential_pickle = models.CharField(max_length=255)
folder_id = models.CharField(max_length=255)
| 32.545455 | 65 | 0.75838 |
from django.db import models
from QueueDB.models import _OwnerModel
class GoogleDriveConnection(_OwnerModel):
credential_pickle = models.CharField(max_length=255)
folder_id = models.CharField(max_length=255)
| true | true |
f7fb0060d5774b4a635cdaa2b52838f02eb1225c | 1,155 | py | Python | jdaviz/configs/cubeviz/plugins/tests/test_data_retrieval.py | check-spelling/jdaviz | bfd0514d13bdc6fa0b8c8536a603293409270337 | [
"MIT",
"BSD-3-Clause"
] | 55 | 2019-05-24T18:53:05.000Z | 2022-03-14T08:45:52.000Z | jdaviz/configs/cubeviz/plugins/tests/test_data_retrieval.py | check-spelling/jdaviz | bfd0514d13bdc6fa0b8c8536a603293409270337 | [
"MIT",
"BSD-3-Clause"
] | 1,105 | 2019-05-09T15:17:35.000Z | 2022-03-31T21:22:18.000Z | jdaviz/configs/cubeviz/plugins/tests/test_data_retrieval.py | rosteen/jdaviz | e02c08d68ef71c5e40600785f46e65e5ae95e236 | [
"MIT",
"BSD-3-Clause"
] | 49 | 2019-05-07T18:05:42.000Z | 2022-03-22T15:15:34.000Z | import pytest
import numpy as np
from astropy.utils.data import download_file
from jdaviz.app import Application
# This file is originally from
# https://data.sdss.org/sas/dr14/manga/spectro/redux/v2_1_2/7495/stack/manga-7495-12704-LOGCUBE.fits.gz
URL = 'https://stsci.box.com/shared/static/28a88k1qfipo4yxc4p4d40v4axtlal8y.fits'
""" The purpose of this test is to check that both methods:
- app.get_viewer('spectrum-viewer').data()
- app.get_data_from_viewer("spectrum-viewer")
return the same spectrum values.
"""
@pytest.fixture
def jdaviz_app():
return Application(configuration='cubeviz')
@pytest.mark.filterwarnings('ignore')
@pytest.mark.remote_data
def test_data_retrieval(jdaviz_app):
fn = download_file(URL, cache=True)
jdaviz_app.load_data(fn)
# two ways of retrieving data from the viewer.
# They should return the same spectral values
a1 = jdaviz_app.get_viewer('spectrum-viewer').data()
a2 = jdaviz_app.get_data_from_viewer("spectrum-viewer")
test_value_1 = a1[0].data
test_value_2 = list(a2.values())[0].data
assert np.allclose(test_value_1, test_value_2, atol=1e-5)
| 26.860465 | 103 | 0.741126 | import pytest
import numpy as np
from astropy.utils.data import download_file
from jdaviz.app import Application
URL = 'https://stsci.box.com/shared/static/28a88k1qfipo4yxc4p4d40v4axtlal8y.fits'
@pytest.fixture
def jdaviz_app():
return Application(configuration='cubeviz')
@pytest.mark.filterwarnings('ignore')
@pytest.mark.remote_data
def test_data_retrieval(jdaviz_app):
fn = download_file(URL, cache=True)
jdaviz_app.load_data(fn)
a1 = jdaviz_app.get_viewer('spectrum-viewer').data()
a2 = jdaviz_app.get_data_from_viewer("spectrum-viewer")
test_value_1 = a1[0].data
test_value_2 = list(a2.values())[0].data
assert np.allclose(test_value_1, test_value_2, atol=1e-5)
| true | true |
f7fb00815d89ab463175d1883628cdede1d26c1b | 11,396 | py | Python | sdk/python/pulumi_azure_nextgen/network/v20190801/express_route_port.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/network/v20190801/express_route_port.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/network/v20190801/express_route_port.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ExpressRoutePort']
class ExpressRoutePort(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bandwidth_in_gbps: Optional[pulumi.Input[int]] = None,
encapsulation: Optional[pulumi.Input[str]] = None,
express_route_port_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteLinkArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
peering_location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_guid: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
ExpressRoutePort resource definition.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] bandwidth_in_gbps: Bandwidth of procured ports in Gbps.
:param pulumi.Input[str] encapsulation: Encapsulation method on physical ports.
:param pulumi.Input[str] express_route_port_name: The name of the ExpressRoutePort resource.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']] identity: The identity of ExpressRoutePort, if configured.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteLinkArgs']]]] links: The set of physical links of the ExpressRoutePort resource.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] peering_location: The name of the peering location that the ExpressRoutePort is mapped to physically.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_guid: The resource GUID property of the express route port resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['bandwidth_in_gbps'] = bandwidth_in_gbps
__props__['encapsulation'] = encapsulation
if express_route_port_name is None:
raise TypeError("Missing required property 'express_route_port_name'")
__props__['express_route_port_name'] = express_route_port_name
__props__['id'] = id
__props__['identity'] = identity
__props__['links'] = links
__props__['location'] = location
__props__['peering_location'] = peering_location
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['resource_guid'] = resource_guid
__props__['tags'] = tags
__props__['allocation_date'] = None
__props__['circuits'] = None
__props__['etag'] = None
__props__['ether_type'] = None
__props__['mtu'] = None
__props__['name'] = None
__props__['provisioned_bandwidth_in_gbps'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ExpressRoutePort")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ExpressRoutePort, __self__).__init__(
'azure-nextgen:network/v20190801:ExpressRoutePort',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ExpressRoutePort':
"""
Get an existing ExpressRoutePort resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ExpressRoutePort(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allocationDate")
def allocation_date(self) -> pulumi.Output[str]:
"""
Date of the physical port allocation to be used in Letter of Authorization.
"""
return pulumi.get(self, "allocation_date")
@property
@pulumi.getter(name="bandwidthInGbps")
def bandwidth_in_gbps(self) -> pulumi.Output[Optional[int]]:
"""
Bandwidth of procured ports in Gbps.
"""
return pulumi.get(self, "bandwidth_in_gbps")
@property
@pulumi.getter
def circuits(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
"""
Reference the ExpressRoute circuit(s) that are provisioned on this ExpressRoutePort resource.
"""
return pulumi.get(self, "circuits")
@property
@pulumi.getter
def encapsulation(self) -> pulumi.Output[Optional[str]]:
"""
Encapsulation method on physical ports.
"""
return pulumi.get(self, "encapsulation")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="etherType")
def ether_type(self) -> pulumi.Output[str]:
"""
Ether type of the physical port.
"""
return pulumi.get(self, "ether_type")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]:
"""
The identity of ExpressRoutePort, if configured.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def links(self) -> pulumi.Output[Optional[Sequence['outputs.ExpressRouteLinkResponse']]]:
"""
The set of physical links of the ExpressRoutePort resource.
"""
return pulumi.get(self, "links")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def mtu(self) -> pulumi.Output[str]:
"""
Maximum transmission unit of the physical port pair(s).
"""
return pulumi.get(self, "mtu")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peeringLocation")
def peering_location(self) -> pulumi.Output[Optional[str]]:
"""
The name of the peering location that the ExpressRoutePort is mapped to physically.
"""
return pulumi.get(self, "peering_location")
@property
@pulumi.getter(name="provisionedBandwidthInGbps")
def provisioned_bandwidth_in_gbps(self) -> pulumi.Output[float]:
"""
Aggregate Gbps of associated circuit bandwidths.
"""
return pulumi.get(self, "provisioned_bandwidth_in_gbps")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the express route port resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[Optional[str]]:
"""
The resource GUID property of the express route port resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 43.830769 | 1,274 | 0.657687 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ExpressRoutePort']
class ExpressRoutePort(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bandwidth_in_gbps: Optional[pulumi.Input[int]] = None,
encapsulation: Optional[pulumi.Input[str]] = None,
express_route_port_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteLinkArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
peering_location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_guid: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['bandwidth_in_gbps'] = bandwidth_in_gbps
__props__['encapsulation'] = encapsulation
if express_route_port_name is None:
raise TypeError("Missing required property 'express_route_port_name'")
__props__['express_route_port_name'] = express_route_port_name
__props__['id'] = id
__props__['identity'] = identity
__props__['links'] = links
__props__['location'] = location
__props__['peering_location'] = peering_location
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['resource_guid'] = resource_guid
__props__['tags'] = tags
__props__['allocation_date'] = None
__props__['circuits'] = None
__props__['etag'] = None
__props__['ether_type'] = None
__props__['mtu'] = None
__props__['name'] = None
__props__['provisioned_bandwidth_in_gbps'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ExpressRoutePort")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ExpressRoutePort, __self__).__init__(
'azure-nextgen:network/v20190801:ExpressRoutePort',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ExpressRoutePort':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ExpressRoutePort(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allocationDate")
def allocation_date(self) -> pulumi.Output[str]:
return pulumi.get(self, "allocation_date")
@property
@pulumi.getter(name="bandwidthInGbps")
def bandwidth_in_gbps(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "bandwidth_in_gbps")
@property
@pulumi.getter
def circuits(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
return pulumi.get(self, "circuits")
@property
@pulumi.getter
def encapsulation(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "encapsulation")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="etherType")
def ether_type(self) -> pulumi.Output[str]:
return pulumi.get(self, "ether_type")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]:
return pulumi.get(self, "identity")
@property
@pulumi.getter
def links(self) -> pulumi.Output[Optional[Sequence['outputs.ExpressRouteLinkResponse']]]:
return pulumi.get(self, "links")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def mtu(self) -> pulumi.Output[str]:
return pulumi.get(self, "mtu")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peeringLocation")
def peering_location(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "peering_location")
@property
@pulumi.getter(name="provisionedBandwidthInGbps")
def provisioned_bandwidth_in_gbps(self) -> pulumi.Output[float]:
return pulumi.get(self, "provisioned_bandwidth_in_gbps")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
f7fb00a96ec13f9c034fdb798ce90475d4e94a0f | 2,647 | py | Python | data/p3BR/R1/benchmark/startPyquil401.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R1/benchmark/startPyquil401.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R1/benchmark/startPyquil401.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=2
# total number=72
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += RX(-0.09738937226128368,2) # number=2
prog += H(1) # number=33
prog += Y(2) # number=56
prog += CZ(2,1) # number=34
prog += H(1) # number=35
prog += H(1) # number=3
prog += H(0) # number=45
prog += H(1) # number=69
prog += CZ(2,1) # number=70
prog += H(1) # number=71
prog += CZ(1,0) # number=46
prog += H(0) # number=47
prog += Y(1) # number=15
prog += H(0) # number=66
prog += CZ(1,0) # number=67
prog += H(0) # number=68
prog += H(1) # number=19
prog += CZ(0,1) # number=20
prog += RX(-0.6000441968356504,1) # number=28
prog += H(1) # number=21
prog += H(1) # number=30
prog += CZ(0,1) # number=31
prog += H(1) # number=32
prog += H(1) # number=57
prog += CZ(0,1) # number=58
prog += H(1) # number=59
prog += CNOT(0,1) # number=51
prog += X(1) # number=52
prog += CNOT(0,1) # number=53
prog += CNOT(0,1) # number=50
prog += H(2) # number=29
prog += H(1) # number=36
prog += X(1) # number=64
prog += CZ(0,1) # number=37
prog += Y(2) # number=44
prog += H(1) # number=38
prog += Z(1) # number=55
prog += H(1) # number=61
prog += CZ(0,1) # number=62
prog += Z(2) # number=65
prog += H(1) # number=63
prog += Z(1) # number=11
prog += RX(-1.1780972450961724,2) # number=54
prog += H(1) # number=42
prog += H(0) # number=39
prog += CZ(1,0) # number=40
prog += H(0) # number=41
prog += CNOT(2,1) # number=26
prog += Y(1) # number=14
prog += CNOT(1,0) # number=5
prog += X(1) # number=6
prog += Z(1) # number=8
prog += X(1) # number=7
prog += H(2) # number=43
prog += RX(-2.42845112122491,1) # number=25
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil401.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 26.737374 | 64 | 0.55119 |
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program()
prog += H(0)
prog += RX(-0.09738937226128368,2)
prog += H(1)
prog += Y(2)
prog += CZ(2,1)
prog += H(1)
prog += H(1)
prog += H(0)
prog += H(1)
prog += CZ(2,1)
prog += H(1)
prog += CZ(1,0)
prog += H(0)
prog += Y(1)
prog += H(0)
prog += CZ(1,0)
prog += H(0)
prog += H(1)
prog += CZ(0,1)
prog += RX(-0.6000441968356504,1)
prog += H(1)
prog += H(1)
prog += CZ(0,1)
prog += H(1)
prog += H(1)
prog += CZ(0,1)
prog += H(1)
prog += CNOT(0,1)
prog += X(1)
prog += CNOT(0,1)
prog += CNOT(0,1)
prog += H(2)
prog += H(1)
prog += X(1)
prog += CZ(0,1)
prog += Y(2)
prog += H(1)
prog += Z(1)
prog += H(1)
prog += CZ(0,1)
prog += Z(2)
prog += H(1)
prog += Z(1)
prog += RX(-1.1780972450961724,2)
prog += H(1)
prog += H(0)
prog += CZ(1,0)
prog += H(0)
prog += CNOT(2,1)
prog += Y(1)
prog += CNOT(1,0)
prog += X(1)
prog += Z(1)
prog += X(1)
prog += H(2)
prog += RX(-2.42845112122491,1)
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil401.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| true | true |
f7fb00c053b4385a3bee025e96670730d996acab | 227 | py | Python | apps/project/urls.py | FeiChaiCom/django-netdisk | f9ef4f46a065527f0ace0a65f2b3210d1657b80f | [
"MIT"
] | 6 | 2021-05-22T12:23:29.000Z | 2022-01-01T01:38:29.000Z | apps/project/urls.py | gaomugong/django-netdisk | 5b2204f5087579bb6a26c6b92972a9f53bd05a7c | [
"MIT"
] | null | null | null | apps/project/urls.py | gaomugong/django-netdisk | 5b2204f5087579bb6a26c6b92972a9f53bd05a7c | [
"MIT"
] | 1 | 2021-05-22T15:25:33.000Z | 2021-05-22T15:25:33.000Z | from rest_framework import routers
from apps.project.views import ProjectViewSet
app_name = 'project'
router = routers.DefaultRouter()
router.register('project', ProjectViewSet, base_name='project')
urlpatterns = router.urls
| 25.222222 | 63 | 0.810573 | from rest_framework import routers
from apps.project.views import ProjectViewSet
app_name = 'project'
router = routers.DefaultRouter()
router.register('project', ProjectViewSet, base_name='project')
urlpatterns = router.urls
| true | true |
f7fb00f9c1bd6ed9ba70af36b34cda97083dc27d | 1,889 | py | Python | learningAgentKeras.py | kaiobarb/solarescape | 18f2c432a48e4b2fe9dc116ec7b9190ee5637401 | [
"MIT"
] | 1 | 2019-01-28T04:38:05.000Z | 2019-01-28T04:38:05.000Z | learningAgentKeras.py | kaiobarb/solarescape | 18f2c432a48e4b2fe9dc116ec7b9190ee5637401 | [
"MIT"
] | null | null | null | learningAgentKeras.py | kaiobarb/solarescape | 18f2c432a48e4b2fe9dc116ec7b9190ee5637401 | [
"MIT"
] | null | null | null | import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
from solarescape_env import SolarescapeEnv
import pygame
from ple import PLE
import random
# Get the environment and extract the number of actions.
game = SolarescapeEnv(width=856, height=856, dt=1)
game.screen = pygame.display.set_mode(game.getScreenDims(), 0, 32)
p = PLE(game, fps=30, frame_skip = 3, num_steps = 1,
force_fps = False, display_screen=False)
nb_actions = list(game.getActions())
print(nb_actions)
# Next, we build a very simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + (16, 4)))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
dqn.fit(env, nb_steps=50000, visualize=True, verbose=2)
# After training is done, we save the final weights.
dqn.save_weights('dqn_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
# Finally, evaluate our algorithm for 5 episodes.
dqn.test(env, nb_episodes=5, visualize=True) | 33.140351 | 93 | 0.761779 | import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
from solarescape_env import SolarescapeEnv
import pygame
from ple import PLE
import random
game = SolarescapeEnv(width=856, height=856, dt=1)
game.screen = pygame.display.set_mode(game.getScreenDims(), 0, 32)
p = PLE(game, fps=30, frame_skip = 3, num_steps = 1,
force_fps = False, display_screen=False)
nb_actions = list(game.getActions())
print(nb_actions)
model = Sequential()
model.add(Flatten(input_shape=(1,) + (16, 4)))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
dqn.fit(env, nb_steps=50000, visualize=True, verbose=2)
# After training is done, we save the final weights.
dqn.save_weights('dqn_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
# Finally, evaluate our algorithm for 5 episodes.
dqn.test(env, nb_episodes=5, visualize=True) | true | true |
f7fb0310a3caace3fb403a1f221632972c04c435 | 12,657 | py | Python | webshell_sdk/model/container/workload_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | webshell_sdk/model/container/workload_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | webshell_sdk/model/container/workload_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: workload.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from webshell_sdk.model.container import container_pb2 as webshell__sdk_dot_model_dot_container_dot_container__pb2
from webshell_sdk.model.container import volume_pb2 as webshell__sdk_dot_model_dot_container_dot_volume__pb2
from webshell_sdk.model.container import deployment_strategy_pb2 as webshell__sdk_dot_model_dot_container_dot_deployment__strategy__pb2
from webshell_sdk.model.container import local_object_reference_pb2 as webshell__sdk_dot_model_dot_container_dot_local__object__reference__pb2
from webshell_sdk.model.container import deployment_status_pb2 as webshell__sdk_dot_model_dot_container_dot_deployment__status__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='workload.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x0eworkload.proto\x12\tcontainer\x1a,webshell_sdk/model/container/container.proto\x1a)webshell_sdk/model/container/volume.proto\x1a\x36webshell_sdk/model/container/deployment_strategy.proto\x1a\x39webshell_sdk/model/container/local_object_reference.proto\x1a\x34webshell_sdk/model/container/deployment_status.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xe3\x04\n\x08Workload\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04kind\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x11\n\tnamespace\x18\x04 \x01(\t\x12\x14\n\x0cresourceName\x18\x05 \x01(\t\x12.\n\ncontainers\x18\x06 \x03(\x0b\x32\x1a.container.ContainerConfig\x12\x10\n\x08replicas\x18\x07 \x01(\x05\x12\"\n\x07volumes\x18\x08 \x03(\x0b\x32\x11.container.Volume\x12,\n\x0b\x61nnotations\x18\t \x01(\x0b\x32\x17.google.protobuf.Struct\x12\'\n\x06labels\x18\n \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x11\n\tdnsPolicy\x18\x0b \x01(\t\x12\x15\n\rrestartPolicy\x18\x0c \x01(\t\x12\x39\n\x12\x64\x65ploymentStrategy\x18\r \x01(\x0b\x32\x1d.container.DeploymentStrategy\x12\x39\n\x10imagePullSecrets\x18\x0e \x03(\x0b\x32\x1f.container.LocalObjectReference\x12\x35\n\x10\x64\x65ploymentStatus\x18\x0f \x01(\x0b\x32\x1b.container.DeploymentStatus\x12\x14\n\x0cresourceSpec\x18\x10 \x01(\t\x12\x0f\n\x07\x63reator\x18\x11 \x01(\t\x12\x19\n\x11\x63reationTimestamp\x18\x12 \x01(\t\x12\r\n\x05state\x18\x13 \x01(\t\x12\x19\n\x11transitionMessage\x18\x14 \x01(\tBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[webshell__sdk_dot_model_dot_container_dot_container__pb2.DESCRIPTOR,webshell__sdk_dot_model_dot_container_dot_volume__pb2.DESCRIPTOR,webshell__sdk_dot_model_dot_container_dot_deployment__strategy__pb2.DESCRIPTOR,webshell__sdk_dot_model_dot_container_dot_local__object__reference__pb2.DESCRIPTOR,webshell__sdk_dot_model_dot_container_dot_deployment__status__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_WORKLOAD = _descriptor.Descriptor(
name='Workload',
full_name='container.Workload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='container.Workload.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kind', full_name='container.Workload.kind', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='container.Workload.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='namespace', full_name='container.Workload.namespace', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceName', full_name='container.Workload.resourceName', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='containers', full_name='container.Workload.containers', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='replicas', full_name='container.Workload.replicas', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='volumes', full_name='container.Workload.volumes', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='annotations', full_name='container.Workload.annotations', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='container.Workload.labels', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dnsPolicy', full_name='container.Workload.dnsPolicy', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='restartPolicy', full_name='container.Workload.restartPolicy', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deploymentStrategy', full_name='container.Workload.deploymentStrategy', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='imagePullSecrets', full_name='container.Workload.imagePullSecrets', index=13,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deploymentStatus', full_name='container.Workload.deploymentStatus', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceSpec', full_name='container.Workload.resourceSpec', index=15,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='container.Workload.creator', index=16,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creationTimestamp', full_name='container.Workload.creationTimestamp', index=17,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='container.Workload.state', index=18,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='transitionMessage', full_name='container.Workload.transitionMessage', index=19,
number=20, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=318,
serialized_end=929,
)
_WORKLOAD.fields_by_name['containers'].message_type = webshell__sdk_dot_model_dot_container_dot_container__pb2._CONTAINERCONFIG
_WORKLOAD.fields_by_name['volumes'].message_type = webshell__sdk_dot_model_dot_container_dot_volume__pb2._VOLUME
_WORKLOAD.fields_by_name['annotations'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_WORKLOAD.fields_by_name['labels'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_WORKLOAD.fields_by_name['deploymentStrategy'].message_type = webshell__sdk_dot_model_dot_container_dot_deployment__strategy__pb2._DEPLOYMENTSTRATEGY
_WORKLOAD.fields_by_name['imagePullSecrets'].message_type = webshell__sdk_dot_model_dot_container_dot_local__object__reference__pb2._LOCALOBJECTREFERENCE
_WORKLOAD.fields_by_name['deploymentStatus'].message_type = webshell__sdk_dot_model_dot_container_dot_deployment__status__pb2._DEPLOYMENTSTATUS
DESCRIPTOR.message_types_by_name['Workload'] = _WORKLOAD
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Workload = _reflection.GeneratedProtocolMessageType('Workload', (_message.Message,), {
'DESCRIPTOR' : _WORKLOAD,
'__module__' : 'workload_pb2'
# @@protoc_insertion_point(class_scope:container.Workload)
})
_sym_db.RegisterMessage(Workload)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 57.794521 | 1,533 | 0.771273 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from webshell_sdk.model.container import container_pb2 as webshell__sdk_dot_model_dot_container_dot_container__pb2
from webshell_sdk.model.container import volume_pb2 as webshell__sdk_dot_model_dot_container_dot_volume__pb2
from webshell_sdk.model.container import deployment_strategy_pb2 as webshell__sdk_dot_model_dot_container_dot_deployment__strategy__pb2
from webshell_sdk.model.container import local_object_reference_pb2 as webshell__sdk_dot_model_dot_container_dot_local__object__reference__pb2
from webshell_sdk.model.container import deployment_status_pb2 as webshell__sdk_dot_model_dot_container_dot_deployment__status__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='workload.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x0eworkload.proto\x12\tcontainer\x1a,webshell_sdk/model/container/container.proto\x1a)webshell_sdk/model/container/volume.proto\x1a\x36webshell_sdk/model/container/deployment_strategy.proto\x1a\x39webshell_sdk/model/container/local_object_reference.proto\x1a\x34webshell_sdk/model/container/deployment_status.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xe3\x04\n\x08Workload\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04kind\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x11\n\tnamespace\x18\x04 \x01(\t\x12\x14\n\x0cresourceName\x18\x05 \x01(\t\x12.\n\ncontainers\x18\x06 \x03(\x0b\x32\x1a.container.ContainerConfig\x12\x10\n\x08replicas\x18\x07 \x01(\x05\x12\"\n\x07volumes\x18\x08 \x03(\x0b\x32\x11.container.Volume\x12,\n\x0b\x61nnotations\x18\t \x01(\x0b\x32\x17.google.protobuf.Struct\x12\'\n\x06labels\x18\n \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x11\n\tdnsPolicy\x18\x0b \x01(\t\x12\x15\n\rrestartPolicy\x18\x0c \x01(\t\x12\x39\n\x12\x64\x65ploymentStrategy\x18\r \x01(\x0b\x32\x1d.container.DeploymentStrategy\x12\x39\n\x10imagePullSecrets\x18\x0e \x03(\x0b\x32\x1f.container.LocalObjectReference\x12\x35\n\x10\x64\x65ploymentStatus\x18\x0f \x01(\x0b\x32\x1b.container.DeploymentStatus\x12\x14\n\x0cresourceSpec\x18\x10 \x01(\t\x12\x0f\n\x07\x63reator\x18\x11 \x01(\t\x12\x19\n\x11\x63reationTimestamp\x18\x12 \x01(\t\x12\r\n\x05state\x18\x13 \x01(\t\x12\x19\n\x11transitionMessage\x18\x14 \x01(\tBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[webshell__sdk_dot_model_dot_container_dot_container__pb2.DESCRIPTOR,webshell__sdk_dot_model_dot_container_dot_volume__pb2.DESCRIPTOR,webshell__sdk_dot_model_dot_container_dot_deployment__strategy__pb2.DESCRIPTOR,webshell__sdk_dot_model_dot_container_dot_local__object__reference__pb2.DESCRIPTOR,webshell__sdk_dot_model_dot_container_dot_deployment__status__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_WORKLOAD = _descriptor.Descriptor(
name='Workload',
full_name='container.Workload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='container.Workload.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kind', full_name='container.Workload.kind', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='container.Workload.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='namespace', full_name='container.Workload.namespace', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceName', full_name='container.Workload.resourceName', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='containers', full_name='container.Workload.containers', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='replicas', full_name='container.Workload.replicas', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='volumes', full_name='container.Workload.volumes', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='annotations', full_name='container.Workload.annotations', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='container.Workload.labels', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dnsPolicy', full_name='container.Workload.dnsPolicy', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='restartPolicy', full_name='container.Workload.restartPolicy', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deploymentStrategy', full_name='container.Workload.deploymentStrategy', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='imagePullSecrets', full_name='container.Workload.imagePullSecrets', index=13,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deploymentStatus', full_name='container.Workload.deploymentStatus', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceSpec', full_name='container.Workload.resourceSpec', index=15,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='container.Workload.creator', index=16,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creationTimestamp', full_name='container.Workload.creationTimestamp', index=17,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='container.Workload.state', index=18,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='transitionMessage', full_name='container.Workload.transitionMessage', index=19,
number=20, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=318,
serialized_end=929,
)
_WORKLOAD.fields_by_name['containers'].message_type = webshell__sdk_dot_model_dot_container_dot_container__pb2._CONTAINERCONFIG
_WORKLOAD.fields_by_name['volumes'].message_type = webshell__sdk_dot_model_dot_container_dot_volume__pb2._VOLUME
_WORKLOAD.fields_by_name['annotations'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_WORKLOAD.fields_by_name['labels'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_WORKLOAD.fields_by_name['deploymentStrategy'].message_type = webshell__sdk_dot_model_dot_container_dot_deployment__strategy__pb2._DEPLOYMENTSTRATEGY
_WORKLOAD.fields_by_name['imagePullSecrets'].message_type = webshell__sdk_dot_model_dot_container_dot_local__object__reference__pb2._LOCALOBJECTREFERENCE
_WORKLOAD.fields_by_name['deploymentStatus'].message_type = webshell__sdk_dot_model_dot_container_dot_deployment__status__pb2._DEPLOYMENTSTATUS
DESCRIPTOR.message_types_by_name['Workload'] = _WORKLOAD
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Workload = _reflection.GeneratedProtocolMessageType('Workload', (_message.Message,), {
'DESCRIPTOR' : _WORKLOAD,
'__module__' : 'workload_pb2'
# @@protoc_insertion_point(class_scope:container.Workload)
})
_sym_db.RegisterMessage(Workload)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| true | true |
f7fb03e162ec6cae2b6a91be535496fba35f02f5 | 3,080 | py | Python | examples/example_rlm.py | toobaz/statsmodels | 5286dd713a809b0630232508bf9ad5104aae1980 | [
"BSD-3-Clause"
] | 10 | 2016-05-18T11:46:33.000Z | 2018-12-23T04:52:27.000Z | examples/example_rlm.py | AnaMP/statsmodels | 2d4aad9a14619ce0c84d4c7bca9dacd66b2be566 | [
"BSD-3-Clause"
] | null | null | null | examples/example_rlm.py | AnaMP/statsmodels | 2d4aad9a14619ce0c84d4c7bca9dacd66b2be566 | [
"BSD-3-Clause"
] | 3 | 2015-04-01T08:26:54.000Z | 2020-02-14T14:34:10.000Z | """
Robust Linear Models
Notes
-----
The syntax for the arguments will be shortened to accept string arguments
in the future.
"""
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
#Estimating RLM
#--------------
# Load data
data = sm.datasets.stackloss.load()
data.exog = sm.add_constant(data.exog)
# Huber's T norm with the (default) median absolute deviation scaling
huber_t = sm.RLM(data.endog, data.exog, M=sm.robust.norms.HuberT())
hub_results = huber_t.fit()
print hub_results.params
print hub_results.bse
varnames = ['var_%d' % i for i in range(len(hub_results.params))]
print hub_results.summary(yname='y', xname=varnames)
# Huber's T norm with 'H2' covariance matrix
hub_results2 = huber_t.fit(cov="H2")
print hub_results2.params
print hub_results2.bse
# Andrew's Wave norm with Huber's Proposal 2 scaling and 'H3' covariance matrix
andrew_mod = sm.RLM(data.endog, data.exog, M=sm.robust.norms.AndrewWave())
andrew_results = andrew_mod.fit(scale_est=sm.robust.scale.HuberScale(),
cov='H3')
print andrew_results.params
# See ``help(sm.RLM.fit)`` for more options and ``module sm.robust.scale`` for
# scale options
#Comparing OLS and RLM
#---------------------
#Artificial data
#^^^^^^^^^^^^^^^
nsample = 50
x1 = np.linspace(0, 20, nsample)
X = np.c_[x1, (x1 - 5)**2, np.ones(nsample)]
sig = 0.3 # smaller error variance makes OLS<->RLM contrast bigger
beta = [0.5, -0.0, 5.]
y_true2 = np.dot(X, beta)
y2 = y_true2 + sig * 1. * np.random.normal(size=nsample)
y2[[39, 41, 43, 45, 48]] -= 5 # add some outliers (10% of nsample)
#Example: quadratic function with linear truth
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Note that the quadratic term in OLS regression will capture outlier effects.
res = sm.OLS(y2, X).fit()
print res.params
print res.bse
print res.predict
# Estimate RLM
resrlm = sm.RLM(y2, X).fit()
print resrlm.params
print resrlm.bse
# Draw a plot to compare OLS estimates to the robust estimates
plt.figure();
plt.plot(x1, y2, 'o', x1, y_true2, 'b-');
prstd, iv_l, iv_u = wls_prediction_std(res);
plt.plot(x1, res.fittedvalues, 'r-');
plt.plot(x1, iv_u, 'r--');
plt.plot(x1, iv_l, 'r--');
plt.plot(x1, resrlm.fittedvalues, 'g.-');
#@savefig rlm_ols_0.png
plt.title('blue: true, red: OLS, green: RLM');
#Example: linear function with linear truth
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Fit a new OLS model using only the linear term and the constant
X2 = X[:, [0, 2]]
res2 = sm.OLS(y2, X2).fit()
print res2.params
print res2.bse
# Estimate RLM
resrlm2 = sm.RLM(y2, X2).fit()
print resrlm2.params
print resrlm2.bse
# Draw a plot to compare OLS estimates to the robust estimates
prstd, iv_l, iv_u = wls_prediction_std(res2)
plt.figure();
plt.plot(x1, y2, 'o', x1, y_true2, 'b-');
plt.plot(x1, res2.fittedvalues, 'r-');
plt.plot(x1, iv_u, 'r--');
plt.plot(x1, iv_l, 'r--');
plt.plot(x1, resrlm2.fittedvalues, 'g.-');
#@savefig rlm_ols_1.png
plt.title('blue: true, red: OLS, green: RLM');
| 29.333333 | 79 | 0.675 | """
Robust Linear Models
Notes
-----
The syntax for the arguments will be shortened to accept string arguments
in the future.
"""
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
data = sm.datasets.stackloss.load()
data.exog = sm.add_constant(data.exog)
huber_t = sm.RLM(data.endog, data.exog, M=sm.robust.norms.HuberT())
hub_results = huber_t.fit()
print hub_results.params
print hub_results.bse
varnames = ['var_%d' % i for i in range(len(hub_results.params))]
print hub_results.summary(yname='y', xname=varnames)
# Huber's T norm with 'H2' covariance matrix
hub_results2 = huber_t.fit(cov="H2")
print hub_results2.params
print hub_results2.bse
andrew_mod = sm.RLM(data.endog, data.exog, M=sm.robust.norms.AndrewWave())
andrew_results = andrew_mod.fit(scale_est=sm.robust.scale.HuberScale(),
cov='H3')
print andrew_results.params
nsample = 50
x1 = np.linspace(0, 20, nsample)
X = np.c_[x1, (x1 - 5)**2, np.ones(nsample)]
sig = 0.3
beta = [0.5, -0.0, 5.]
y_true2 = np.dot(X, beta)
y2 = y_true2 + sig * 1. * np.random.normal(size=nsample)
y2[[39, 41, 43, 45, 48]] -= 5
res = sm.OLS(y2, X).fit()
print res.params
print res.bse
print res.predict
resrlm = sm.RLM(y2, X).fit()
print resrlm.params
print resrlm.bse
plt.figure();
plt.plot(x1, y2, 'o', x1, y_true2, 'b-');
prstd, iv_l, iv_u = wls_prediction_std(res);
plt.plot(x1, res.fittedvalues, 'r-');
plt.plot(x1, iv_u, 'r--');
plt.plot(x1, iv_l, 'r--');
plt.plot(x1, resrlm.fittedvalues, 'g.-');
plt.title('blue: true, red: OLS, green: RLM');
X2 = X[:, [0, 2]]
res2 = sm.OLS(y2, X2).fit()
print res2.params
print res2.bse
resrlm2 = sm.RLM(y2, X2).fit()
print resrlm2.params
print resrlm2.bse
prstd, iv_l, iv_u = wls_prediction_std(res2)
plt.figure();
plt.plot(x1, y2, 'o', x1, y_true2, 'b-');
plt.plot(x1, res2.fittedvalues, 'r-');
plt.plot(x1, iv_u, 'r--');
plt.plot(x1, iv_l, 'r--');
plt.plot(x1, resrlm2.fittedvalues, 'g.-');
plt.title('blue: true, red: OLS, green: RLM');
| false | true |
f7fb04d71da48b4b0a5c3652d8746dc8f37be144 | 4,880 | py | Python | NLU/pet/config.py | jinfeihu-stan/IPL | 7c45e122ccaa0b41c94f56ee58206c7468bfa5ef | [
"MIT"
] | 5 | 2022-02-02T01:53:49.000Z | 2022-03-10T07:30:04.000Z | NLU/pet/config.py | jinfeihu-stan/IPL | 7c45e122ccaa0b41c94f56ee58206c7468bfa5ef | [
"MIT"
] | null | null | null | NLU/pet/config.py | jinfeihu-stan/IPL | 7c45e122ccaa0b41c94f56ee58206c7468bfa5ef | [
"MIT"
] | null | null | null | import json
from abc import ABC
from typing import List
class PetConfig(ABC):
"""Abstract class for a PET configuration that can be saved to and loaded from a json file."""
def __repr__(self):
return repr(self.__dict__)
def save(self, path: str):
"""Save this config to a file."""
with open(path, 'w', encoding='utf8') as fh:
json.dump(self.__dict__, fh)
@classmethod
def load(cls, path: str):
"""Load a config from a file."""
cfg = cls.__new__(cls)
with open(path, 'r', encoding='utf8') as fh:
cfg.__dict__ = json.load(fh)
return cfg
class TrainConfig(PetConfig):
"""Configuration for training a model."""
def __init__(self,
device: str = None,
per_gpu_train_batch_size: int = 8,
n_gpu: int = 1,
num_train_epochs: int = 3,
max_steps: int = -1,
gradient_accumulation_steps: int = 1,
weight_decay: float = 0.0,
learning_rate: float = 5e-5,
adam_epsilon: float = 1e-8,
warmup_steps: int = 0,
max_grad_norm: float = 1,
alpha: float = 0.9999):
"""
Create a new training config.
:param device: the device to use ('cpu' or 'gpu')
:param per_gpu_train_batch_size: the number of labeled training examples per batch and gpu
:param n_gpu: the number of gpus to use
:param num_train_epochs: the number of epochs to train for
:param max_steps: the maximum number of steps to train for (overrides ``num_train_epochs``)
:param gradient_accumulation_steps: the number of steps to accumulate gradients for before performing an update
:param weight_decay: the weight decay to use
:param learning_rate: the maximum learning rate to use
:param adam_epsilon: the epsilon value for Adam
:param warmup_steps: the number of warmup steps to perform before reaching the maximum learning rate
:param max_grad_norm: the maximum norm for the gradient
:param alpha: the alpha parameter for auxiliary language modeling
"""
self.device = device
self.per_gpu_train_batch_size = per_gpu_train_batch_size
self.n_gpu = n_gpu
self.num_train_epochs = num_train_epochs
self.max_steps = max_steps
self.gradient_accumulation_steps = gradient_accumulation_steps
self.weight_decay = weight_decay
self.learning_rate = learning_rate
self.adam_epsilon = adam_epsilon
self.warmup_steps = warmup_steps
self.max_grad_norm = max_grad_norm
self.alpha = alpha
class EvalConfig(PetConfig):
"""Configuration for evaluating a model."""
def __init__(self,
device: str = None,
n_gpu: int = 1,
per_gpu_eval_batch_size: int = 8,
metrics: List[str] = None):
"""
Create a new evaluation config.
:param device: the device to use ('cpu' or 'gpu')
:param n_gpu: the number of gpus to use
:param per_gpu_eval_batch_size: the number of evaluation examples per batch and gpu
:param metrics: the evaluation metrics to use (default: accuracy only)
"""
self.device = device
self.n_gpu = n_gpu
self.per_gpu_eval_batch_size = per_gpu_eval_batch_size
self.metrics = metrics
class WrapperConfig(object):
"""A configuration for a :class:`TransformerModelWrapper`."""
def __init__(self,
model_type: str,
model_name_or_path: str,
task_name: str,
max_seq_length: int,
label_list: List[str],
pattern_id: int = 0,
cache_dir: str = None,
output_dir=None,
#embed_size=128,
prompt_length =16,
embedding_dim =768,
prompt_encoder_type="mlp",
prompt_tuning: bool = False,
prompt_head_size: int = 64,
eval_every_step=20):
self.model_type = model_type
self.model_type = model_type
self.model_name_or_path = model_name_or_path
self.task_name = task_name
self.max_seq_length = max_seq_length
self.label_list = label_list
self.pattern_id = pattern_id
self.cache_dir = cache_dir
self.output_dir = output_dir
#self.embed_size = embed_size
self.embedding_dim = embedding_dim
self.prompt_encoder_type = prompt_encoder_type
self.prompt_tuning = prompt_tuning
self.prompt_head_size = prompt_head_size
self.prompt_length = prompt_length
self.eval_every_step = eval_every_step
| 37.251908 | 119 | 0.608197 | import json
from abc import ABC
from typing import List
class PetConfig(ABC):
def __repr__(self):
return repr(self.__dict__)
def save(self, path: str):
with open(path, 'w', encoding='utf8') as fh:
json.dump(self.__dict__, fh)
@classmethod
def load(cls, path: str):
cfg = cls.__new__(cls)
with open(path, 'r', encoding='utf8') as fh:
cfg.__dict__ = json.load(fh)
return cfg
class TrainConfig(PetConfig):
def __init__(self,
device: str = None,
per_gpu_train_batch_size: int = 8,
n_gpu: int = 1,
num_train_epochs: int = 3,
max_steps: int = -1,
gradient_accumulation_steps: int = 1,
weight_decay: float = 0.0,
learning_rate: float = 5e-5,
adam_epsilon: float = 1e-8,
warmup_steps: int = 0,
max_grad_norm: float = 1,
alpha: float = 0.9999):
self.device = device
self.per_gpu_train_batch_size = per_gpu_train_batch_size
self.n_gpu = n_gpu
self.num_train_epochs = num_train_epochs
self.max_steps = max_steps
self.gradient_accumulation_steps = gradient_accumulation_steps
self.weight_decay = weight_decay
self.learning_rate = learning_rate
self.adam_epsilon = adam_epsilon
self.warmup_steps = warmup_steps
self.max_grad_norm = max_grad_norm
self.alpha = alpha
class EvalConfig(PetConfig):
def __init__(self,
device: str = None,
n_gpu: int = 1,
per_gpu_eval_batch_size: int = 8,
metrics: List[str] = None):
self.device = device
self.n_gpu = n_gpu
self.per_gpu_eval_batch_size = per_gpu_eval_batch_size
self.metrics = metrics
class WrapperConfig(object):
def __init__(self,
model_type: str,
model_name_or_path: str,
task_name: str,
max_seq_length: int,
label_list: List[str],
pattern_id: int = 0,
cache_dir: str = None,
output_dir=None,
prompt_length =16,
embedding_dim =768,
prompt_encoder_type="mlp",
prompt_tuning: bool = False,
prompt_head_size: int = 64,
eval_every_step=20):
self.model_type = model_type
self.model_type = model_type
self.model_name_or_path = model_name_or_path
self.task_name = task_name
self.max_seq_length = max_seq_length
self.label_list = label_list
self.pattern_id = pattern_id
self.cache_dir = cache_dir
self.output_dir = output_dir
self.embedding_dim = embedding_dim
self.prompt_encoder_type = prompt_encoder_type
self.prompt_tuning = prompt_tuning
self.prompt_head_size = prompt_head_size
self.prompt_length = prompt_length
self.eval_every_step = eval_every_step
| true | true |
f7fb053bc9486bdef2415fecb72c6eab3c45bbdf | 433 | py | Python | pytrovich/enums.py | alexeyev/pytrovich | 81c8d254f8b233abe275eaea1fc05e1e6dc0edd2 | [
"MIT"
] | 19 | 2020-06-03T05:56:04.000Z | 2022-01-28T09:27:07.000Z | pytrovich/enums.py | alexeyev/pytrovich | 81c8d254f8b233abe275eaea1fc05e1e6dc0edd2 | [
"MIT"
] | 2 | 2021-05-12T15:06:01.000Z | 2022-01-22T21:41:55.000Z | pytrovich/enums.py | petrovich/pytrovich | 81c8d254f8b233abe275eaea1fc05e1e6dc0edd2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from enum import Enum
class LowerCaseNameEnum(Enum):
def str(self):
return self.name.lower()
class Case(LowerCaseNameEnum):
GENITIVE = 0
DATIVE = 1
ACCUSATIVE = 2
INSTRUMENTAL = 3
PREPOSITIONAL = 4
class Gender(LowerCaseNameEnum):
MALE = 0
FEMALE = 1
ANDROGYNOUS = 2
class NamePart(LowerCaseNameEnum):
LASTNAME = 0
FIRSTNAME = 1
MIDDLENAME = 2
| 14.931034 | 34 | 0.635104 |
from enum import Enum
class LowerCaseNameEnum(Enum):
def str(self):
return self.name.lower()
class Case(LowerCaseNameEnum):
GENITIVE = 0
DATIVE = 1
ACCUSATIVE = 2
INSTRUMENTAL = 3
PREPOSITIONAL = 4
class Gender(LowerCaseNameEnum):
MALE = 0
FEMALE = 1
ANDROGYNOUS = 2
class NamePart(LowerCaseNameEnum):
LASTNAME = 0
FIRSTNAME = 1
MIDDLENAME = 2
| true | true |
f7fb0700d44ecf2fac95836495a34e2e9ea5bcc0 | 10,436 | py | Python | bin/generate_BED_file_of_endpoints.py | wjidea/RILseq | 5419a83344cd74098039e8953eacbba930b91e00 | [
"MIT"
] | null | null | null | bin/generate_BED_file_of_endpoints.py | wjidea/RILseq | 5419a83344cd74098039e8953eacbba930b91e00 | [
"MIT"
] | null | null | null | bin/generate_BED_file_of_endpoints.py | wjidea/RILseq | 5419a83344cd74098039e8953eacbba930b91e00 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Given already mapped fusions using the reads file (format:
gene1 gene2 position1 strand1 position2 strand2 read_name)
Use the original BAM to plot the ends of the reads as BED file to be presented
by the genome browser.
Color code as specified in the parametrs
"""
import sys
import argparse
import csv
from collections import defaultdict
import random
from Bio.Seq import Seq
import pysam
from Bio import SeqIO
import RILseq
def process_command_line(argv):
"""
Return a 2-tuple: (settings object, args list).
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
if argv is None:
argv = sys.argv[1:]
# initialize the parser object:
parser = argparse.ArgumentParser(
description='Generate BED graph of the reads.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'genome',
help='genome fasta file.')
parser.add_argument(
'list_reads',
help='File with list of reads and their fused positions.')
parser.add_argument(
'track_name',
help='Name of track')
parser.add_argument(
'track_desc',
help='Description of the track')
parser.add_argument(
'bamfiles', action='append', nargs='+',
help='The original bam file (or several files) with the full reads.')
parser.add_argument(
'-r', '--reverse', default=False, action='store_true',
help='The original bam file is the reverse complement of the RNA.')
parser.add_argument(
'-s', '--summary',
help='Print only reads that are found to be significant in this summary file.')
parser.add_argument(
'-e', '--gene_name',
help='Print reads involve only this gene (EcoCyc ID), '
'applies only with -s')
parser.add_argument(
'--rand_score', default=False, action='store_true',
help='Set a random score (0-1000) for each read, this will allow to '
'present some of the reads in UCSC genome browser.')
parser.add_argument(
'--pos_first', default='255,0,0',
help='Color of first part, positive strand.')
parser.add_argument(
'--pos_second', default='51,102,255',
help='Color of second part, positive strand.')
parser.add_argument(
'--rev_first', default='255,0,0',
help='Color of first part, reverse strand.')
parser.add_argument(
'--rev_second', default='51,102,255',
help='Color of second part, reverse strand.')
parser.add_argument(
'--EC_chrlist', default='COLI-K12,chr',
help='A comma separated dictionary of chromosome names from the EcoCyc'
' names to the bam file names. The names in the bam file should be '
' the same as the UCSC genome browser (they will be printed).')
settings = parser.parse_args(argv)
return settings
def get_reads_seqs(bamfile, rnames, rev=False):
"""
Return the sequences of all the reads from the bam file
Arguments:
- `bamfile`: The pysam file
- `rnames`: reads names
"""
r1_seqs = {}
r2_seqs = {}
rqns = set()
reads = defaultdict(list)
for read in bamfile.fetch(until_eof=True):
rqns.add(read.qname)
reads[read.qname].append(read)
for rn in set(rnames) & rqns:
for read in reads[rn]:
if read.is_read1==rev:
outseq = Seq(read.seq)
if not read.is_reverse:
outseq = outseq.reverse_complement()
r1_seqs[read.qname] = str(outseq)
else:
outseq = Seq(read.seq)
if read.is_reverse:
outseq = outseq.reverse_complement()
r2_seqs[read.qname] = str(outseq)
# r1_seqs is the 3' end of the second fused RNA, r2_seqs is the 5' of the
# first fused RNA
return r1_seqs, r2_seqs
def extend_alignment(rseq, pos5p, pos3p, is_read1, strand, genome, mismatch=1):
"""
Align the rseq to the genome in the specified position. Return the last
position of the read mapped to the genome.
Use local alignment
Arguments:
- `rseq`: Read sequence
- `pos5p`: the 5' position, exact if read 2 or as limit if read 1
- `pos3p`: the 3' position, exact if read 1 or as limit if read 2
- `is_read1`: This read is read 1
- `strand`: mapping strand
- `genome`: The genome Seq object
- `mismatch`: allowed mismatches
"""
rcnt = {'A':'T', 'C':'G', 'G':'C', 'T':'A'}
glen = len(genome)
if is_read1:
# Start from the last position and move on to the 5' end
if strand == '-':
ipos = 0
for _ in range(mismatch+1):
try:
while rcnt[genome[(pos3p+ipos)%glen]] == rseq[-(ipos+1)]:
ipos += 1
except IndexError:
return ipos - 1
ipos += 1
return ipos
else:
ipos = 0
for _ in range(mismatch+1):
try:
while genome[(pos3p-ipos)%glen] == rseq[-(ipos+1)]:
ipos += 1
except IndexError:
return ipos-1
ipos += 1
return ipos
else:
if strand == '-':
ipos = 0
for _ in range(mismatch+1):
try:
while rcnt[genome[(pos5p-ipos)%glen]] == rseq[ipos]:
ipos += 1
except IndexError:
return ipos -1
ipos += 1
return ipos
else:
ipos = 0
for _ in range(mismatch+1):
try:
while genome[(pos5p+ipos)%glen] == rseq[ipos]:
ipos += 1
except IndexError:
return ipos - 1
ipos += 1
return ipos
def find_overlap(s1, s2):
"""
Find overlaps between two reads. Assume they are both in the same
orientation (r1 is revcomp)
Return 3 seuqnces: s1, overlap, s2
Arguments:
- `s1`: first sequence, this is mate 2 actually in our experiments
- `s2`: last sequence, mate 1
"""
for i in range(min(len(s1), len(s2)))[::-1]:
if s1[-i:]==s2[:i]:
return s1[:-i], s1[-i:], s2[i:]
return s1, '', s2
def main(argv=None):
settings = process_command_line(argv)
# Read the read names and positions
read_5ps = {}
read_3ps = {}
read_genes = {}
genome = {}
gsize = {}
for sr in SeqIO.parse(settings.genome, 'fasta'):
genome[sr.id] = sr.seq
gsize[sr.id] = len(sr.seq)
if len(settings.EC_chrlist) >= 2:
chr_dict = dict(zip(
settings.EC_chrlist.split(',')[0::2],
settings.EC_chrlist.split(',')[1::2]))
else:
chr_dict = {}
if settings.summary:
sig_reads = RILseq.read_significant_reads(
settings.summary, chr_dict, gname=settings.gene_name)
for line in csv.reader(open(settings.list_reads), delimiter='\t'):
# skip single
if len(line) > 7 and line[7]=="single":
continue
if settings.summary:
if (int(line[4])-1, line[5], line[3]) not in\
sig_reads[(int(line[1])-1, line[2], line[0])]:
continue
read_5ps[line[6]] = [int(line[1])-1, line[2], line[0]]
read_3ps[line[6]] = [int(line[4])-1, line[5], line[3]]
# read_genes[line[6]] = [line[0], line[1]]
# Read the bam files and return the long sequences
r1_seqs = {}
r2_seqs = {}
for bamfile in list(RILseq.flat_list(settings.bamfiles)):
r1s, r2s = get_reads_seqs(
pysam.Samfile(bamfile), read_5ps.keys(), rev=settings.reverse)
r1_seqs.update(r1s)
r2_seqs.update(r2s)
# For each read find the overlap, if exists and find the fusion point
outer = csv.writer(sys.stdout, delimiter='\t')
print 'track name="%s" description="%s" visibility=4 itemRgb="On" useScore=0'%(
settings.track_name, settings.track_desc)
# Because I'm lazy, the code is written so r1 is the 3' end of the fragment
for rname in set(r2_seqs.keys()):
if rname in r1_seqs:
r2seq = r2_seqs[rname]
r1seq = r1_seqs[rname]
else: # single-end
r2seq = r2_seqs[rname]
r1seq = ''
s1, overlap, s2 = find_overlap(r2seq, r1seq)
side_5p_len = extend_alignment(
s1+overlap+s2, read_5ps[rname][0], 0, False, read_5ps[rname][1],
genome[read_5ps[rname][2]])
side_3p_len = extend_alignment(
s1+overlap+s2, 0, read_3ps[rname][0], True, read_3ps[rname][1],
genome[read_3ps[rname][2]])
# Write each of the sides to the output file
score=0
if settings.rand_score:
score=random.randint(0, 1000)
if read_5ps[rname][1] == '+':
gfrom = max(0, read_5ps[rname][0])
gto = min(gsize[read_5ps[rname][2]], read_5ps[rname][0]+side_5p_len)
outer.writerow([
read_5ps[rname][2], gfrom, gto, "%s_5p"%rname, score, '+',
gfrom, gto, settings.pos_first])
elif read_5ps[rname][1] == '-':
gfrom = max(0, read_5ps[rname][0]-side_5p_len+1)
gto = min(gsize[read_5ps[rname][2]], read_5ps[rname][0]+1)
outer.writerow([
read_5ps[rname][2], gfrom, gto, "%s_5p"%rname, score, '-',
gfrom, gto,settings.rev_first])
if read_3ps[rname][1] == '+':
gfrom = max(0, read_3ps[rname][0]-side_3p_len+1)
gto = min(gsize[read_3ps[rname][2]], read_3ps[rname][0]+1)
outer.writerow([
read_3ps[rname][2], gfrom, gto,"%s_3p"%rname, score, '+',
gfrom, gto, settings.pos_second])
elif read_3ps[rname][1] == '-':
gfrom = max(0, read_3ps[rname][0])
gto = min(gsize[read_3ps[rname][2]], read_3ps[rname][0]+side_3p_len)
outer.writerow([
read_3ps[rname][2], gfrom, gto, "%s_3p"%rname, score, '-',
gfrom, gto, settings.rev_second])
return 0 # success
if __name__ == '__main__':
status = main()
sys.exit(status)
| 36.746479 | 87 | 0.559985 |
"""
Given already mapped fusions using the reads file (format:
gene1 gene2 position1 strand1 position2 strand2 read_name)
Use the original BAM to plot the ends of the reads as BED file to be presented
by the genome browser.
Color code as specified in the parametrs
"""
import sys
import argparse
import csv
from collections import defaultdict
import random
from Bio.Seq import Seq
import pysam
from Bio import SeqIO
import RILseq
def process_command_line(argv):
"""
Return a 2-tuple: (settings object, args list).
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(
description='Generate BED graph of the reads.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'genome',
help='genome fasta file.')
parser.add_argument(
'list_reads',
help='File with list of reads and their fused positions.')
parser.add_argument(
'track_name',
help='Name of track')
parser.add_argument(
'track_desc',
help='Description of the track')
parser.add_argument(
'bamfiles', action='append', nargs='+',
help='The original bam file (or several files) with the full reads.')
parser.add_argument(
'-r', '--reverse', default=False, action='store_true',
help='The original bam file is the reverse complement of the RNA.')
parser.add_argument(
'-s', '--summary',
help='Print only reads that are found to be significant in this summary file.')
parser.add_argument(
'-e', '--gene_name',
help='Print reads involve only this gene (EcoCyc ID), '
'applies only with -s')
parser.add_argument(
'--rand_score', default=False, action='store_true',
help='Set a random score (0-1000) for each read, this will allow to '
'present some of the reads in UCSC genome browser.')
parser.add_argument(
'--pos_first', default='255,0,0',
help='Color of first part, positive strand.')
parser.add_argument(
'--pos_second', default='51,102,255',
help='Color of second part, positive strand.')
parser.add_argument(
'--rev_first', default='255,0,0',
help='Color of first part, reverse strand.')
parser.add_argument(
'--rev_second', default='51,102,255',
help='Color of second part, reverse strand.')
parser.add_argument(
'--EC_chrlist', default='COLI-K12,chr',
help='A comma separated dictionary of chromosome names from the EcoCyc'
' names to the bam file names. The names in the bam file should be '
' the same as the UCSC genome browser (they will be printed).')
settings = parser.parse_args(argv)
return settings
def get_reads_seqs(bamfile, rnames, rev=False):
"""
Return the sequences of all the reads from the bam file
Arguments:
- `bamfile`: The pysam file
- `rnames`: reads names
"""
r1_seqs = {}
r2_seqs = {}
rqns = set()
reads = defaultdict(list)
for read in bamfile.fetch(until_eof=True):
rqns.add(read.qname)
reads[read.qname].append(read)
for rn in set(rnames) & rqns:
for read in reads[rn]:
if read.is_read1==rev:
outseq = Seq(read.seq)
if not read.is_reverse:
outseq = outseq.reverse_complement()
r1_seqs[read.qname] = str(outseq)
else:
outseq = Seq(read.seq)
if read.is_reverse:
outseq = outseq.reverse_complement()
r2_seqs[read.qname] = str(outseq)
return r1_seqs, r2_seqs
def extend_alignment(rseq, pos5p, pos3p, is_read1, strand, genome, mismatch=1):
"""
Align the rseq to the genome in the specified position. Return the last
position of the read mapped to the genome.
Use local alignment
Arguments:
- `rseq`: Read sequence
- `pos5p`: the 5' position, exact if read 2 or as limit if read 1
- `pos3p`: the 3' position, exact if read 1 or as limit if read 2
- `is_read1`: This read is read 1
- `strand`: mapping strand
- `genome`: The genome Seq object
- `mismatch`: allowed mismatches
"""
rcnt = {'A':'T', 'C':'G', 'G':'C', 'T':'A'}
glen = len(genome)
if is_read1:
if strand == '-':
ipos = 0
for _ in range(mismatch+1):
try:
while rcnt[genome[(pos3p+ipos)%glen]] == rseq[-(ipos+1)]:
ipos += 1
except IndexError:
return ipos - 1
ipos += 1
return ipos
else:
ipos = 0
for _ in range(mismatch+1):
try:
while genome[(pos3p-ipos)%glen] == rseq[-(ipos+1)]:
ipos += 1
except IndexError:
return ipos-1
ipos += 1
return ipos
else:
if strand == '-':
ipos = 0
for _ in range(mismatch+1):
try:
while rcnt[genome[(pos5p-ipos)%glen]] == rseq[ipos]:
ipos += 1
except IndexError:
return ipos -1
ipos += 1
return ipos
else:
ipos = 0
for _ in range(mismatch+1):
try:
while genome[(pos5p+ipos)%glen] == rseq[ipos]:
ipos += 1
except IndexError:
return ipos - 1
ipos += 1
return ipos
def find_overlap(s1, s2):
"""
Find overlaps between two reads. Assume they are both in the same
orientation (r1 is revcomp)
Return 3 seuqnces: s1, overlap, s2
Arguments:
- `s1`: first sequence, this is mate 2 actually in our experiments
- `s2`: last sequence, mate 1
"""
for i in range(min(len(s1), len(s2)))[::-1]:
if s1[-i:]==s2[:i]:
return s1[:-i], s1[-i:], s2[i:]
return s1, '', s2
def main(argv=None):
settings = process_command_line(argv)
# Read the read names and positions
read_5ps = {}
read_3ps = {}
read_genes = {}
genome = {}
gsize = {}
for sr in SeqIO.parse(settings.genome, 'fasta'):
genome[sr.id] = sr.seq
gsize[sr.id] = len(sr.seq)
if len(settings.EC_chrlist) >= 2:
chr_dict = dict(zip(
settings.EC_chrlist.split(',')[0::2],
settings.EC_chrlist.split(',')[1::2]))
else:
chr_dict = {}
if settings.summary:
sig_reads = RILseq.read_significant_reads(
settings.summary, chr_dict, gname=settings.gene_name)
for line in csv.reader(open(settings.list_reads), delimiter='\t'):
# skip single
if len(line) > 7 and line[7]=="single":
continue
if settings.summary:
if (int(line[4])-1, line[5], line[3]) not in\
sig_reads[(int(line[1])-1, line[2], line[0])]:
continue
read_5ps[line[6]] = [int(line[1])-1, line[2], line[0]]
read_3ps[line[6]] = [int(line[4])-1, line[5], line[3]]
# read_genes[line[6]] = [line[0], line[1]]
# Read the bam files and return the long sequences
r1_seqs = {}
r2_seqs = {}
for bamfile in list(RILseq.flat_list(settings.bamfiles)):
r1s, r2s = get_reads_seqs(
pysam.Samfile(bamfile), read_5ps.keys(), rev=settings.reverse)
r1_seqs.update(r1s)
r2_seqs.update(r2s)
# For each read find the overlap, if exists and find the fusion point
outer = csv.writer(sys.stdout, delimiter='\t')
print 'track name="%s" description="%s" visibility=4 itemRgb="On" useScore=0'%(
settings.track_name, settings.track_desc)
# Because I'm lazy, the code is written so r1 is the 3' end of the fragment
for rname in set(r2_seqs.keys()):
if rname in r1_seqs:
r2seq = r2_seqs[rname]
r1seq = r1_seqs[rname]
else: # single-end
r2seq = r2_seqs[rname]
r1seq = ''
s1, overlap, s2 = find_overlap(r2seq, r1seq)
side_5p_len = extend_alignment(
s1+overlap+s2, read_5ps[rname][0], 0, False, read_5ps[rname][1],
genome[read_5ps[rname][2]])
side_3p_len = extend_alignment(
s1+overlap+s2, 0, read_3ps[rname][0], True, read_3ps[rname][1],
genome[read_3ps[rname][2]])
# Write each of the sides to the output file
score=0
if settings.rand_score:
score=random.randint(0, 1000)
if read_5ps[rname][1] == '+':
gfrom = max(0, read_5ps[rname][0])
gto = min(gsize[read_5ps[rname][2]], read_5ps[rname][0]+side_5p_len)
outer.writerow([
read_5ps[rname][2], gfrom, gto, "%s_5p"%rname, score, '+',
gfrom, gto, settings.pos_first])
elif read_5ps[rname][1] == '-':
gfrom = max(0, read_5ps[rname][0]-side_5p_len+1)
gto = min(gsize[read_5ps[rname][2]], read_5ps[rname][0]+1)
outer.writerow([
read_5ps[rname][2], gfrom, gto, "%s_5p"%rname, score, '-',
gfrom, gto,settings.rev_first])
if read_3ps[rname][1] == '+':
gfrom = max(0, read_3ps[rname][0]-side_3p_len+1)
gto = min(gsize[read_3ps[rname][2]], read_3ps[rname][0]+1)
outer.writerow([
read_3ps[rname][2], gfrom, gto,"%s_3p"%rname, score, '+',
gfrom, gto, settings.pos_second])
elif read_3ps[rname][1] == '-':
gfrom = max(0, read_3ps[rname][0])
gto = min(gsize[read_3ps[rname][2]], read_3ps[rname][0]+side_3p_len)
outer.writerow([
read_3ps[rname][2], gfrom, gto, "%s_3p"%rname, score, '-',
gfrom, gto, settings.rev_second])
return 0 # success
if __name__ == '__main__':
status = main()
sys.exit(status)
| false | true |
f7fb08a3e6e605eb8c5d6f5590b2ce3af4eb9854 | 1,457 | py | Python | backend/backend/settings/deps/ledger.py | AurelienGasser/substra-backend | c963f9b0521c7ebd878ea42fd9be9acfddf9f61d | [
"Apache-2.0"
] | 37 | 2019-10-25T13:31:20.000Z | 2021-05-29T05:27:50.000Z | backend/backend/settings/deps/ledger.py | AurelienGasser/substra-backend | c963f9b0521c7ebd878ea42fd9be9acfddf9f61d | [
"Apache-2.0"
] | 217 | 2019-10-29T16:01:03.000Z | 2021-05-25T13:06:29.000Z | backend/backend/settings/deps/ledger.py | AurelienGasser/substra-backend | c963f9b0521c7ebd878ea42fd9be9acfddf9f61d | [
"Apache-2.0"
] | 13 | 2019-10-25T13:46:36.000Z | 2021-03-16T16:59:04.000Z | import os
import json
LEDGER_CHANNELS = {
channel: settings
for channels in json.loads(os.getenv('LEDGER_CHANNELS'))
for channel, settings in channels.items()
}
LEDGER_MSP_ID = os.getenv('LEDGER_MSP_ID')
LEDGER_USER_NAME = os.getenv('LEDGER_USER_NAME')
LEDGER_PEER_HOST = os.getenv('LEDGER_PEER_HOST')
LEDGER_PEER_PORT = int(os.getenv('LEDGER_PEER_PORT'))
LEDGER_PEER_NAME = 'peer'
LEDGER_PEER_TLS_CA_CERTS = '/var/hyperledger/ca/cacert.pem'
LEDGER_PEER_TLS_CLIENT_KEY = '/var/hyperledger/tls/client/pair/tls.key'
LEDGER_PEER_TLS_CLIENT_CERT = '/var/hyperledger/tls/client/pair/tls.crt'
LEDGER_CLIENT_STATE_STORE = '/var/substra/hfc-cvs'
LEDGER_CLIENT_KEY_PATH = '/var/hyperledger/msp/keystore/*'
LEDGER_CLIENT_CERT_PATH = '/var/hyperledger/msp/signcerts/cert.pem'
LEDGER_SYNC_ENABLED = True
LEDGER_CALL_RETRY = True
LEDGER_WAIT_FOR_EVENT_TIMEOUT_SECONDS = int(os.getenv('LEDGER_WAIT_FOR_EVENT_TIMEOUT_SECONDS'))
LEDGER_INVOKE_STRATEGY = os.getenv('LEDGER_INVOKE_STRATEGY')
LEDGER_QUERY_STRATEGY = os.getenv('LEDGER_QUERY_STRATEGY')
LEDGER_GRPC_MAX_SEND_MESSAGE_LENGTH = -1
LEDGER_GRPC_MAX_RECEIVE_MESSAGE_LENGTH = -1
LEDGER_GRPC_KEEPALIVE_TIMEOUT_MS = 20000
LEDGER_GRPC_HTTP2_MAX_PINGS_WITHOUT_DATA = 0
LEDGER_GRPC_KEEPALIVE_PERMIT_WITHOUT_CALLS = 1
LEDGER_GRPC_KEEPALIVE_TIME_MS = int(os.getenv('LEDGER_GRPC_KEEPALIVE_TIME_MS'))
LEDGER_GRPC_HTTP2_MIN_TIME_BETWEEN_PINGS_MS = int(os.getenv('LEDGER_GRPC_HTTP2_MIN_TIME_BETWEEN_PINGS_MS'))
| 39.378378 | 107 | 0.829101 | import os
import json
LEDGER_CHANNELS = {
channel: settings
for channels in json.loads(os.getenv('LEDGER_CHANNELS'))
for channel, settings in channels.items()
}
LEDGER_MSP_ID = os.getenv('LEDGER_MSP_ID')
LEDGER_USER_NAME = os.getenv('LEDGER_USER_NAME')
LEDGER_PEER_HOST = os.getenv('LEDGER_PEER_HOST')
LEDGER_PEER_PORT = int(os.getenv('LEDGER_PEER_PORT'))
LEDGER_PEER_NAME = 'peer'
LEDGER_PEER_TLS_CA_CERTS = '/var/hyperledger/ca/cacert.pem'
LEDGER_PEER_TLS_CLIENT_KEY = '/var/hyperledger/tls/client/pair/tls.key'
LEDGER_PEER_TLS_CLIENT_CERT = '/var/hyperledger/tls/client/pair/tls.crt'
LEDGER_CLIENT_STATE_STORE = '/var/substra/hfc-cvs'
LEDGER_CLIENT_KEY_PATH = '/var/hyperledger/msp/keystore/*'
LEDGER_CLIENT_CERT_PATH = '/var/hyperledger/msp/signcerts/cert.pem'
LEDGER_SYNC_ENABLED = True
LEDGER_CALL_RETRY = True
LEDGER_WAIT_FOR_EVENT_TIMEOUT_SECONDS = int(os.getenv('LEDGER_WAIT_FOR_EVENT_TIMEOUT_SECONDS'))
LEDGER_INVOKE_STRATEGY = os.getenv('LEDGER_INVOKE_STRATEGY')
LEDGER_QUERY_STRATEGY = os.getenv('LEDGER_QUERY_STRATEGY')
LEDGER_GRPC_MAX_SEND_MESSAGE_LENGTH = -1
LEDGER_GRPC_MAX_RECEIVE_MESSAGE_LENGTH = -1
LEDGER_GRPC_KEEPALIVE_TIMEOUT_MS = 20000
LEDGER_GRPC_HTTP2_MAX_PINGS_WITHOUT_DATA = 0
LEDGER_GRPC_KEEPALIVE_PERMIT_WITHOUT_CALLS = 1
LEDGER_GRPC_KEEPALIVE_TIME_MS = int(os.getenv('LEDGER_GRPC_KEEPALIVE_TIME_MS'))
LEDGER_GRPC_HTTP2_MIN_TIME_BETWEEN_PINGS_MS = int(os.getenv('LEDGER_GRPC_HTTP2_MIN_TIME_BETWEEN_PINGS_MS'))
| true | true |
f7fb0b8e4b048cbec573906d9754b1536c47d721 | 4,727 | py | Python | tree/ID3_Clf.py | HermitSun/ML_for_learner | 501634b35fb5bad2576d5a0c3b144603ba61336c | [
"Apache-2.0"
] | 81 | 2019-03-07T03:19:53.000Z | 2022-03-12T14:19:22.000Z | tree/ID3_Clf.py | a910773203/ML_for_learner | 3014c641800c1408668ff243395bde752c45ec43 | [
"Apache-2.0"
] | null | null | null | tree/ID3_Clf.py | a910773203/ML_for_learner | 3014c641800c1408668ff243395bde752c45ec43 | [
"Apache-2.0"
] | 50 | 2019-08-17T06:44:59.000Z | 2021-12-30T05:39:44.000Z | import pandas as pd
import numpy as np
class ID3:
def __init__(self):
self.tree = None
self.dataset = None
def __entropy(self, feature):
uni_val, cnt = np.unique(feature, return_counts=True) # 返回独特值与计数
# 熵的计算
H = np.sum([(-cnt[i] / np.sum(cnt)) * np.log2(cnt[i] / np.sum(cnt))
for i in range(len(uni_val))])
return H
def __InfoGain(self, dataset, f_test_col, Y_col=-1):
entropy_before = self.__entropy(dataset.iloc[:, Y_col]) # 分割前的熵
uni_val, cnt = np.unique(dataset.iloc[:, f_test_col], return_counts=True) # 计算分割特征的独特值与计数
entropy_cond = np.sum([(cnt[i] / np.sum(cnt)) * self.__entropy(dataset.where(dataset.iloc[:, f_test_col]
== uni_val[i]).dropna().iloc[:,
Y_col])
for i in range(len(uni_val))])
return entropy_before - entropy_cond
def __gen_tree(self, dataset, org_dataset, f_cols, Y_col=-1, p_node_cls=None):
'''
dataset: 用于分割的数据
org_dataset: 最原始的数据,全部数据
f_cols: 备选特征
'''
# 如果数据中的Y已经纯净了,则返回Y的取值
if len(np.unique(dataset.iloc[:, Y_col])) <= 1:
return np.unique(dataset.iloc[:, Y_col])[0]
# 如果传入数据为空(对应空叶节点),则返回原始数据中数量较多的label值
elif len(dataset) == 0:
uni_cls, cnt = np.unique(
org_dataset.iloc[:, Y_col], return_counts=True)
return uni_cls[np.argmax(cnt)]
# 如果没有特征可用于划分,则返回父节点中数量较多的label值
# 由于初始传入的是Index类型,所以这里不能用if not
elif len(f_cols) == 0:
return p_node_cls
# 否则进行分裂
else:
# 得到当前节点中数量最多的label,递归时会赋给下层函数的p_node_cls
cur_uni_cls, cnt = np.unique(
dataset.iloc[:, Y_col], return_counts=True)
cur_node_cls = cur_uni_cls[np.argmax(cnt)]
del cur_uni_cls, cnt
# 根据信息增益选出最佳分裂特征
gains = [self.__InfoGain(dataset, f_col, Y_col) for f_col in f_cols]
best_f = f_cols[np.argmax(gains)]
# 更新备选特征
f_cols = [col for col in f_cols if col != best_f]
# 按最佳特征的不同取值,划分数据集并递归
tree = {best_f: {}}
for val in np.unique(dataset.iloc[:, best_f]): # ID3对每一个取值都划分数据集
sub_data = dataset.where(dataset.iloc[:, best_f] == val).dropna()
sub_tree = self.__gen_tree(sub_data, dataset, f_cols, Y_col, cur_node_cls)
tree[best_f][val] = sub_tree # 分裂特征的某一取值,对应一颗子树或叶节点
return tree
def fit(self, X_train, Y_train):
dataset = np.c_[X_train, Y_train]
self.dataset = pd.DataFrame(dataset, columns=list(range(dataset.shape[1])))
self.tree = self.__gen_tree(self.dataset, self.dataset, list(range(self.dataset.shape[1] - 1)))
def __predict_one(self, x_test, tree, default=-1):
'''
query:一个测试样本,字典形式,{f:val,f:val,...}
tree:训练生成树
default:查找失败时返回的默认类别
'''
for feature in list(x_test.keys()):
if feature in list(tree.keys()): # 如果该特征与根节点的划分特征相同
try:
sub_tree = tree[feature][x_test[feature]] # 根据特征的取值来获取左右分支
if isinstance(sub_tree, dict): # 判断是否还有子树
return self.__predict_one(x_test, tree=sub_tree) # 有则继续查找
else:
return sub_tree # 是叶节点则返回结果
except: # 没有查到则说明是未见过的情况,只能返回default
return default
def predict(self, X_test):
X_test = pd.DataFrame(X_test, columns=list(range(X_test.shape[1]))).to_dict(orient='record')
Y_pred = list()
for item in X_test:
Y_pred.append(self.__predict_one(item, tree=self.tree))
return Y_pred
def load_zoo():
'''
返回一个sklearn-like的数据集
'''
from collections import namedtuple
df = pd.read_csv('../utils/dataset/UCI_Zoo_Data_Set/zoo.data.csv', header=None)
df = df.drop([0], axis=1) # 首列是animal_name,丢弃
dataClass = namedtuple('data', ['data', 'target'])
dataClass.data = df.iloc[:, :-1].values
dataClass.target = df.iloc[:, -1].values
return dataClass
if __name__ == '__main__':
from model_selection.train_test_split import train_test_split
data = load_zoo()
X = data.data
Y = data.target
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
id3_tree = ID3()
id3_tree.fit(X_train, Y_train)
Y_pred = id3_tree.predict(X_test)
print('acc:{}'.format(np.sum(np.array(Y_test) == np.array(Y_pred)) / len(Y_test)))
| 36.083969 | 116 | 0.567167 | import pandas as pd
import numpy as np
class ID3:
def __init__(self):
self.tree = None
self.dataset = None
def __entropy(self, feature):
uni_val, cnt = np.unique(feature, return_counts=True)
H = np.sum([(-cnt[i] / np.sum(cnt)) * np.log2(cnt[i] / np.sum(cnt))
for i in range(len(uni_val))])
return H
def __InfoGain(self, dataset, f_test_col, Y_col=-1):
entropy_before = self.__entropy(dataset.iloc[:, Y_col])
uni_val, cnt = np.unique(dataset.iloc[:, f_test_col], return_counts=True)
entropy_cond = np.sum([(cnt[i] / np.sum(cnt)) * self.__entropy(dataset.where(dataset.iloc[:, f_test_col]
== uni_val[i]).dropna().iloc[:,
Y_col])
for i in range(len(uni_val))])
return entropy_before - entropy_cond
def __gen_tree(self, dataset, org_dataset, f_cols, Y_col=-1, p_node_cls=None):
if len(np.unique(dataset.iloc[:, Y_col])) <= 1:
return np.unique(dataset.iloc[:, Y_col])[0]
elif len(dataset) == 0:
uni_cls, cnt = np.unique(
org_dataset.iloc[:, Y_col], return_counts=True)
return uni_cls[np.argmax(cnt)]
elif len(f_cols) == 0:
return p_node_cls
else:
cur_uni_cls, cnt = np.unique(
dataset.iloc[:, Y_col], return_counts=True)
cur_node_cls = cur_uni_cls[np.argmax(cnt)]
del cur_uni_cls, cnt
gains = [self.__InfoGain(dataset, f_col, Y_col) for f_col in f_cols]
best_f = f_cols[np.argmax(gains)]
f_cols = [col for col in f_cols if col != best_f]
tree = {best_f: {}}
for val in np.unique(dataset.iloc[:, best_f]):
sub_data = dataset.where(dataset.iloc[:, best_f] == val).dropna()
sub_tree = self.__gen_tree(sub_data, dataset, f_cols, Y_col, cur_node_cls)
tree[best_f][val] = sub_tree
return tree
def fit(self, X_train, Y_train):
dataset = np.c_[X_train, Y_train]
self.dataset = pd.DataFrame(dataset, columns=list(range(dataset.shape[1])))
self.tree = self.__gen_tree(self.dataset, self.dataset, list(range(self.dataset.shape[1] - 1)))
def __predict_one(self, x_test, tree, default=-1):
for feature in list(x_test.keys()):
if feature in list(tree.keys()):
try:
sub_tree = tree[feature][x_test[feature]]
if isinstance(sub_tree, dict):
return self.__predict_one(x_test, tree=sub_tree)
else:
return sub_tree
except:
return default
def predict(self, X_test):
X_test = pd.DataFrame(X_test, columns=list(range(X_test.shape[1]))).to_dict(orient='record')
Y_pred = list()
for item in X_test:
Y_pred.append(self.__predict_one(item, tree=self.tree))
return Y_pred
def load_zoo():
from collections import namedtuple
df = pd.read_csv('../utils/dataset/UCI_Zoo_Data_Set/zoo.data.csv', header=None)
df = df.drop([0], axis=1)
dataClass = namedtuple('data', ['data', 'target'])
dataClass.data = df.iloc[:, :-1].values
dataClass.target = df.iloc[:, -1].values
return dataClass
if __name__ == '__main__':
from model_selection.train_test_split import train_test_split
data = load_zoo()
X = data.data
Y = data.target
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
id3_tree = ID3()
id3_tree.fit(X_train, Y_train)
Y_pred = id3_tree.predict(X_test)
print('acc:{}'.format(np.sum(np.array(Y_test) == np.array(Y_pred)) / len(Y_test)))
| true | true |
f7fb0c38fd17ac4b61af5045e5d5b10a78df3e2b | 362 | py | Python | 2021/6/aoc2106a.py | JonasThorsell/AoC | f1fc00a2927336ffbef5f62675c76473c67d09d0 | [
"MIT"
] | null | null | null | 2021/6/aoc2106a.py | JonasThorsell/AoC | f1fc00a2927336ffbef5f62675c76473c67d09d0 | [
"MIT"
] | null | null | null | 2021/6/aoc2106a.py | JonasThorsell/AoC | f1fc00a2927336ffbef5f62675c76473c67d09d0 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 Jonas Thorsell
import sys
tl = [0] * 9
for t in [int(x) for x in sys.stdin.readline().split(',')]:
tl[t] += 1
for d in range(80):
ntl = [0] * 9
for i in range(9):
if i == 0:
ntl[8] = tl[0]
ntl[6] = tl[0]
else:
ntl[i-1] += tl[i]
tl = ntl
print(sum(tl))
| 19.052632 | 60 | 0.430939 |
import sys
tl = [0] * 9
for t in [int(x) for x in sys.stdin.readline().split(',')]:
tl[t] += 1
for d in range(80):
ntl = [0] * 9
for i in range(9):
if i == 0:
ntl[8] = tl[0]
ntl[6] = tl[0]
else:
ntl[i-1] += tl[i]
tl = ntl
print(sum(tl))
| true | true |
f7fb0d6493a4ce90f45d00994a700ab2d3e5e451 | 17,648 | py | Python | buildscripts/packager-enterprise.py | ibm-linux-on-z/mongo | 4d6a2ca26b54958b90aa05801891e8f70f1040a3 | [
"Apache-2.0"
] | 3 | 2015-10-20T18:40:10.000Z | 2016-01-12T00:32:45.000Z | buildscripts/packager-enterprise.py | ibm-linux-on-z/mongo | 4d6a2ca26b54958b90aa05801891e8f70f1040a3 | [
"Apache-2.0"
] | null | null | null | buildscripts/packager-enterprise.py | ibm-linux-on-z/mongo | 4d6a2ca26b54958b90aa05801891e8f70f1040a3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# This program makes Debian and RPM repositories for MongoDB, by
# downloading our tarballs of statically linked executables and
# insinuating them into Linux packages. It must be run on a
# Debianoid, since Debian provides tools to make RPMs, but RPM-based
# systems don't provide debian packaging crud.
# Notes:
#
# * Almost anything that you want to be able to influence about how a
# package construction must be embedded in some file that the
# packaging tool uses for input (e.g., debian/rules, debian/control,
# debian/changelog; or the RPM specfile), and the precise details are
# arbitrary and silly. So this program generates all the relevant
# inputs to the packaging tools.
#
# * Once a .deb or .rpm package is made, there's a separate layer of
# tools that makes a "repository" for use by the apt/yum layers of
# package tools. The layouts of these repositories are arbitrary and
# silly, too.
#
# * Before you run the program on a new host, these are the
# prerequisites:
#
# apt-get install dpkg-dev rpm debhelper fakeroot ia32-libs createrepo git-core libsnmp15
# echo "Now put the dist gnupg signing keys in ~root/.gnupg"
import argparse
import errno
import getopt
from glob import glob
import packager
import os
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import time
import urlparse
# The MongoDB names for the architectures we support.
DEFAULT_ARCHES=["x86_64"]
# Made up names for the flavors of distribution we package for.
DISTROS=["suse", "debian","redhat","ubuntu"]
class Spec(object):
def __init__(self, ver, gitspec = None, rel = None):
self.ver = ver
self.gitspec = gitspec
self.rel = rel
def is_nightly(self):
return bool(re.search("-$", self.version()))
def is_rc(self):
return bool(re.search("-rc\d+$", self.version()))
def is_pre_release(self):
return self.is_rc() or self.is_nightly()
def version(self):
return self.ver
def metadata_gitspec(self):
"""Git revision to use for spec+control+init+manpage files.
The default is the release tag for the version being packaged."""
if(self.gitspec):
return self.gitspec
else:
return 'r' + self.version()
def version_better_than(self, version_string):
# FIXME: this is wrong, but I'm in a hurry.
# e.g., "1.8.2" < "1.8.10", "1.8.2" < "1.8.2-rc1"
return self.ver > version_string
def suffix(self):
return "-enterprise" if int(self.ver.split(".")[1])%2==0 else "-enterprise-unstable"
def prelease(self):
# "N" is either passed in on the command line, or "1"
#
# 1) Standard release - "N"
# 2) Nightly (snapshot) - "0.N.YYYYMMDDlatest"
# 3) RC's - "0.N.rcX"
if self.rel:
corenum = self.rel
else:
corenum = 1
# RC's
if self.is_rc():
return "0.%s.%s" % (corenum, re.sub('.*-','',self.version()))
# Nightlies
elif self.is_nightly():
return "0.%s.%s" % (corenum, time.strftime("%Y%m%d"))
else:
return str(corenum)
def pversion(self, distro):
# Note: Debian packages have funny rules about dashes in
# version numbers, and RPM simply forbids dashes. pversion
# will be the package's version number (but we need to know
# our upstream version too).
if re.search("^(debian|ubuntu)", distro.name()):
return re.sub("-", "~", self.ver)
elif re.search("(suse|redhat|fedora|centos)", distro.name()):
return re.sub("-.*", "", self.ver)
else:
raise Exception("BUG: unsupported platform?")
def branch(self):
"""Return the major and minor portions of the specified version.
For example, if the version is "2.5.5" the branch would be "2.5"
"""
return ".".join(self.ver.split(".")[0:2])
class Distro(object):
def __init__(self, string):
self.n=string
def name(self):
return self.n
def pkgbase(self):
return "mongodb"
def archname(self, arch):
if re.search("^(debian|ubuntu)", self.n):
return "i386" if arch.endswith("86") else "amd64"
elif re.search("^(suse|centos|redhat|fedora)", self.n):
return "i686" if arch.endswith("86") else "x86_64"
else:
raise Exception("BUG: unsupported platform?")
def repodir(self, arch, build_os, spec):
"""Return the directory where we'll place the package files for
(distro, distro_version) in that distro's preferred repository
layout (as distinct from where that distro's packaging building
tools place the package files).
Packages will go into repos corresponding to the major release
series (2.5, 2.6, 2.7, 2.8, etc.) except for RC's and nightlies
which will go into special separate "testing" directories
Examples:
repo/apt/ubuntu/dists/precise/mongodb-enterprise/testing/multiverse/binary-amd64
repo/apt/ubuntu/dists/precise/mongodb-enterprise/testing/multiverse/binary-i386
repo/apt/ubuntu/dists/precise/mongodb-enterprise/2.5/multiverse/binary-amd64
repo/apt/ubuntu/dists/precise/mongodb-enterprise/2.5/multiverse/binary-i386
repo/apt/ubuntu/dists/trusty/mongodb-enterprise/2.5/multiverse/binary-amd64
repo/apt/ubuntu/dists/trusty/mongodb-enterprise/2.5/multiverse/binary-i386
repo/apt/debian/dists/wheezy/mongodb-enterprise/2.5/main/binary-amd64
repo/apt/debian/dists/wheezy/mongodb-enterprise/2.5/main/binary-i386
repo/yum/redhat/6/mongodb-enterprise/2.5/x86_64
repo/yum/redhat/6/mongodb-enterprise/2.5/i386
repo/zypper/suse/11/mongodb-enterprise/2.5/x86_64
repo/zypper/suse/11/mongodb-enterprise/2.5/i386
repo/zypper/suse/11/mongodb-enterprise/testing/x86_64
repo/zypper/suse/11/mongodb-enterprise/testing/i386
"""
repo_directory = ""
if spec.is_pre_release():
repo_directory = "testing"
else:
repo_directory = spec.branch()
if re.search("^(debian|ubuntu)", self.n):
return "repo/apt/%s/dists/%s/mongodb-enterprise/%s/%s/binary-%s/" % (self.n, self.repo_os_version(build_os), repo_directory, self.repo_component(), self.archname(arch))
elif re.search("(redhat|fedora|centos)", self.n):
return "repo/yum/%s/%s/mongodb-enterprise/%s/%s/RPMS/" % (self.n, self.repo_os_version(build_os), repo_directory, self.archname(arch))
elif re.search("(suse)", self.n):
return "repo/zypper/%s/%s/mongodb-enterprise/%s/%s/RPMS/" % (self.n, self.repo_os_version(build_os), repo_directory, self.archname(arch))
else:
raise Exception("BUG: unsupported platform?")
def repo_component(self):
"""Return the name of the section/component/pool we are publishing into -
e.g. "multiverse" for Ubuntu, "main" for debian."""
if self.n == 'ubuntu':
return "multiverse"
elif self.n == 'debian':
return "main"
else:
raise Exception("unsupported distro: %s" % self.n)
def repo_os_version(self, build_os):
"""Return an OS version suitable for package repo directory
naming - e.g. 5, 6 or 7 for redhat/centos, "precise," "wheezy," etc.
for Ubuntu/Debian, 11 for suse"""
if self.n == 'suse':
return re.sub(r'^suse(\d+)$', r'\1', build_os)
if self.n == 'redhat':
return re.sub(r'^rhel(\d).*$', r'\1', build_os)
elif self.n == 'ubuntu':
if build_os == 'ubuntu1204':
return "precise"
elif build_os == 'ubuntu1404':
return "trusty"
else:
raise Exception("unsupported build_os: %s" % build_os)
elif self.n == 'debian':
if build_os == 'debian71':
return 'wheezy'
else:
raise Exception("unsupported build_os: %s" % build_os)
else:
raise Exception("unsupported distro: %s" % self.n)
def make_pkg(self, build_os, arch, spec, srcdir):
if re.search("^(debian|ubuntu)", self.n):
return packager.make_deb(self, build_os, arch, spec, srcdir)
elif re.search("^(suse|centos|redhat|fedora)", self.n):
return packager.make_rpm(self, build_os, arch, spec, srcdir)
else:
raise Exception("BUG: unsupported platform?")
def build_os(self):
"""Return the build os label in the binary package to download ("rhel57", "rhel62" and "rhel70"
for redhat, "ubuntu1204" and "ubuntu1404" for Ubuntu, "debian71" for Debian), and "suse11"
for SUSE)"""
if re.search("(suse)", self.n):
return [ "suse11", "suse12" ]
if re.search("(redhat|fedora|centos)", self.n):
return [ "rhel70", "rhel62", "rhel57" ]
elif self.n == 'ubuntu':
return [ "ubuntu1204", "ubuntu1404" ]
elif self.n == 'debian':
return [ "debian71" ]
else:
raise Exception("BUG: unsupported platform?")
def release_dist(self, build_os):
"""Return the release distribution to use in the rpm - "el5" for rhel 5.x,
"el6" for rhel 6.x, return anything else unchanged"""
return re.sub(r'^rh(el\d).*$', r'\1', build_os)
def main(argv):
distros=[Distro(distro) for distro in DISTROS]
args = packager.get_args(distros)
spec = Spec(args.server_version, args.metadata_gitspec, args.release_number)
oldcwd=os.getcwd()
srcdir=oldcwd+"/../"
# Where to do all of our work. Use a randomly-created directory if one
# is not passed in.
prefix = args.prefix
if prefix is None:
prefix=tempfile.mkdtemp()
print "Working in directory %s" % prefix
os.chdir(prefix)
try:
# Download the binaries.
urlfmt="http://downloads.mongodb.com/linux/mongodb-linux-%s-enterprise-%s-%s.tgz"
# Build a package for each distro/spec/arch tuple, and
# accumulate the repository-layout directories.
for (distro, arch) in packager.crossproduct(distros, args.arches):
for build_os in distro.build_os():
if build_os in args.distros or not args.distros:
if args.tarball:
filename = tarfile(build_os, arch, spec)
packager.ensure_dir(filename)
shutil.copyfile(args.tarball,filename)
else:
packager.httpget(urlfmt % (arch, build_os, spec.version()), packager.ensure_dir(tarfile(build_os, arch, spec)))
repo = make_package(distro, build_os, arch, spec, srcdir)
make_repo(repo, distro, build_os, spec)
finally:
os.chdir(oldcwd)
def tarfile(build_os, arch, spec):
"""Return the location where we store the downloaded tarball for
this package"""
return "dl/mongodb-linux-%s-enterprise-%s-%s.tar.gz" % (spec.version(), build_os, arch)
def setupdir(distro, build_os, arch, spec):
# The setupdir will be a directory containing all inputs to the
# distro's packaging tools (e.g., package metadata files, init
# scripts, etc), along with the already-built binaries). In case
# the following format string is unclear, an example setupdir
# would be dst/x86_64/debian-sysvinit/wheezy/mongodb-org-unstable/
# or dst/x86_64/redhat/rhel57/mongodb-org-unstable/
return "dst/%s/%s/%s/%s%s-%s/" % (arch, distro.name(), build_os, distro.pkgbase(), spec.suffix(), spec.pversion(distro))
def unpack_binaries_into(build_os, arch, spec, where):
"""Unpack the tarfile for (build_os, arch, spec) into directory where."""
rootdir=os.getcwd()
packager.ensure_dir(where)
# Note: POSIX tar doesn't require support for gtar's "-C" option,
# and Python's tarfile module prior to Python 2.7 doesn't have the
# features to make this detail easy. So we'll just do the dumb
# thing and chdir into where and run tar there.
os.chdir(where)
try:
packager.sysassert(["tar", "xvzf", rootdir+"/"+tarfile(build_os, arch, spec)])
release_dir = glob('mongodb-linux-*')[0]
for releasefile in "bin", "snmp", "LICENSE.txt", "README", "THIRD-PARTY-NOTICES", "MPL-2":
os.rename("%s/%s" % (release_dir, releasefile), releasefile)
os.rmdir(release_dir)
except Exception:
exc=sys.exc_value
os.chdir(rootdir)
raise exc
os.chdir(rootdir)
def make_package(distro, build_os, arch, spec, srcdir):
"""Construct the package for (arch, distro, spec), getting
packaging files from srcdir and any user-specified suffix from
suffixes"""
sdir=setupdir(distro, build_os, arch, spec)
packager.ensure_dir(sdir)
# Note that the RPM packages get their man pages from the debian
# directory, so the debian directory is needed in all cases (and
# innocuous in the debianoids' sdirs).
for pkgdir in ["debian", "rpm"]:
print "Copying packaging files from %s to %s" % ("%s/%s" % (srcdir, pkgdir), sdir)
# FIXME: sh-dash-cee is bad. See if tarfile can do this.
packager.sysassert(["sh", "-c", "(cd \"%s\" && git archive %s %s/ ) | (cd \"%s\" && tar xvf -)" % (srcdir, spec.metadata_gitspec(), pkgdir, sdir)])
# Splat the binaries and snmp files under sdir. The "build" stages of the
# packaging infrastructure will move the files to wherever they
# need to go.
unpack_binaries_into(build_os, arch, spec, sdir)
# Remove the mongosniff binary due to libpcap dynamic
# linkage. FIXME: this removal should go away
# eventually.
if os.path.exists(sdir + "bin/mongosniff"):
os.unlink(sdir + "bin/mongosniff")
return distro.make_pkg(build_os, arch, spec, srcdir)
def make_repo(repodir, distro, build_os, spec):
if re.search("(debian|ubuntu)", repodir):
make_deb_repo(repodir, distro, build_os, spec)
elif re.search("(suse|centos|redhat|fedora)", repodir):
packager.make_rpm_repo(repodir)
else:
raise Exception("BUG: unsupported platform?")
def make_deb_repo(repo, distro, build_os, spec):
# Note: the Debian repository Packages files must be generated
# very carefully in order to be usable.
oldpwd=os.getcwd()
os.chdir(repo+"../../../../../../")
try:
dirs=set([os.path.dirname(deb)[2:] for deb in packager.backtick(["find", ".", "-name", "*.deb"]).split()])
for d in dirs:
s=packager.backtick(["dpkg-scanpackages", d, "/dev/null"])
with open(d+"/Packages", "w") as f:
f.write(s)
b=packager.backtick(["gzip", "-9c", d+"/Packages"])
with open(d+"/Packages.gz", "wb") as f:
f.write(b)
finally:
os.chdir(oldpwd)
# Notes: the Release{,.gpg} files must live in a special place,
# and must be created after all the Packages.gz files have been
# done.
s="""Origin: mongodb
Label: mongodb
Suite: %s
Codename: %s/mongodb-enterprise
Architectures: amd64
Components: %s
Description: MongoDB packages
""" % (distro.repo_os_version(build_os), distro.repo_os_version(build_os), distro.repo_component())
if os.path.exists(repo+"../../Release"):
os.unlink(repo+"../../Release")
if os.path.exists(repo+"../../Release.gpg"):
os.unlink(repo+"../../Release.gpg")
oldpwd=os.getcwd()
os.chdir(repo+"../../")
s2=packager.backtick(["apt-ftparchive", "release", "."])
try:
with open("Release", 'w') as f:
f.write(s)
f.write(s2)
finally:
os.chdir(oldpwd)
def move_repos_into_place(src, dst):
# Find all the stuff in src/*, move it to a freshly-created
# directory beside dst, then play some games with symlinks so that
# dst is a name the new stuff and dst+".old" names the previous
# one. This feels like a lot of hooey for something so trivial.
# First, make a crispy fresh new directory to put the stuff in.
i=0
while True:
date_suffix=time.strftime("%Y-%m-%d")
dname=dst+".%s.%d" % (date_suffix, i)
try:
os.mkdir(dname)
break
except OSError:
exc=sys.exc_value
if exc.errno == errno.EEXIST:
pass
else:
raise exc
i=i+1
# Put the stuff in our new directory.
for r in os.listdir(src):
packager.sysassert(["cp", "-rv", src + "/" + r, dname])
# Make a symlink to the new directory; the symlink will be renamed
# to dst shortly.
i=0
while True:
tmpnam=dst+".TMP.%d" % i
try:
os.symlink(dname, tmpnam)
break
except OSError: # as exc: # Python >2.5
exc=sys.exc_value
if exc.errno == errno.EEXIST:
pass
else:
raise exc
i=i+1
# Make a symlink to the old directory; this symlink will be
# renamed shortly, too.
oldnam=None
if os.path.exists(dst):
i=0
while True:
oldnam=dst+".old.%d" % i
try:
os.symlink(os.readlink(dst), oldnam)
break
except OSError: # as exc: # Python >2.5
exc=sys.exc_value
if exc.errno == errno.EEXIST:
pass
else:
raise exc
os.rename(tmpnam, dst)
if oldnam:
os.rename(oldnam, dst+".old")
if __name__ == "__main__":
main(sys.argv)
| 37.232068 | 180 | 0.619107 |
# Notes:
#
# * Almost anything that you want to be able to influence about how a
# package construction must be embedded in some file that the
# packaging tool uses for input (e.g., debian/rules, debian/control,
# debian/changelog; or the RPM specfile), and the precise details are
# arbitrary and silly. So this program generates all the relevant
# inputs to the packaging tools.
#
# * Once a .deb or .rpm package is made, there's a separate layer of
import argparse
import errno
import getopt
from glob import glob
import packager
import os
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import time
import urlparse
DEFAULT_ARCHES=["x86_64"]
DISTROS=["suse", "debian","redhat","ubuntu"]
class Spec(object):
def __init__(self, ver, gitspec = None, rel = None):
self.ver = ver
self.gitspec = gitspec
self.rel = rel
def is_nightly(self):
return bool(re.search("-$", self.version()))
def is_rc(self):
return bool(re.search("-rc\d+$", self.version()))
def is_pre_release(self):
return self.is_rc() or self.is_nightly()
def version(self):
return self.ver
def metadata_gitspec(self):
"""Git revision to use for spec+control+init+manpage files.
The default is the release tag for the version being packaged."""
if(self.gitspec):
return self.gitspec
else:
return 'r' + self.version()
def version_better_than(self, version_string):
# e.g., "1.8.2" < "1.8.10", "1.8.2" < "1.8.2-rc1"
return self.ver > version_string
def suffix(self):
return "-enterprise" if int(self.ver.split(".")[1])%2==0 else "-enterprise-unstable"
def prelease(self):
# "N" is either passed in on the command line, or "1"
#
# 1) Standard release - "N"
# 2) Nightly (snapshot) - "0.N.YYYYMMDDlatest"
# 3) RC's - "0.N.rcX"
if self.rel:
corenum = self.rel
else:
corenum = 1
if self.is_rc():
return "0.%s.%s" % (corenum, re.sub('.*-','',self.version()))
# Nightlies
elif self.is_nightly():
return "0.%s.%s" % (corenum, time.strftime("%Y%m%d"))
else:
return str(corenum)
def pversion(self, distro):
# Note: Debian packages have funny rules about dashes in
# version numbers, and RPM simply forbids dashes. pversion
# will be the package's version number (but we need to know
if re.search("^(debian|ubuntu)", distro.name()):
return re.sub("-", "~", self.ver)
elif re.search("(suse|redhat|fedora|centos)", distro.name()):
return re.sub("-.*", "", self.ver)
else:
raise Exception("BUG: unsupported platform?")
def branch(self):
"""Return the major and minor portions of the specified version.
For example, if the version is "2.5.5" the branch would be "2.5"
"""
return ".".join(self.ver.split(".")[0:2])
class Distro(object):
def __init__(self, string):
self.n=string
def name(self):
return self.n
def pkgbase(self):
return "mongodb"
def archname(self, arch):
if re.search("^(debian|ubuntu)", self.n):
return "i386" if arch.endswith("86") else "amd64"
elif re.search("^(suse|centos|redhat|fedora)", self.n):
return "i686" if arch.endswith("86") else "x86_64"
else:
raise Exception("BUG: unsupported platform?")
def repodir(self, arch, build_os, spec):
"""Return the directory where we'll place the package files for
(distro, distro_version) in that distro's preferred repository
layout (as distinct from where that distro's packaging building
tools place the package files).
Packages will go into repos corresponding to the major release
series (2.5, 2.6, 2.7, 2.8, etc.) except for RC's and nightlies
which will go into special separate "testing" directories
Examples:
repo/apt/ubuntu/dists/precise/mongodb-enterprise/testing/multiverse/binary-amd64
repo/apt/ubuntu/dists/precise/mongodb-enterprise/testing/multiverse/binary-i386
repo/apt/ubuntu/dists/precise/mongodb-enterprise/2.5/multiverse/binary-amd64
repo/apt/ubuntu/dists/precise/mongodb-enterprise/2.5/multiverse/binary-i386
repo/apt/ubuntu/dists/trusty/mongodb-enterprise/2.5/multiverse/binary-amd64
repo/apt/ubuntu/dists/trusty/mongodb-enterprise/2.5/multiverse/binary-i386
repo/apt/debian/dists/wheezy/mongodb-enterprise/2.5/main/binary-amd64
repo/apt/debian/dists/wheezy/mongodb-enterprise/2.5/main/binary-i386
repo/yum/redhat/6/mongodb-enterprise/2.5/x86_64
repo/yum/redhat/6/mongodb-enterprise/2.5/i386
repo/zypper/suse/11/mongodb-enterprise/2.5/x86_64
repo/zypper/suse/11/mongodb-enterprise/2.5/i386
repo/zypper/suse/11/mongodb-enterprise/testing/x86_64
repo/zypper/suse/11/mongodb-enterprise/testing/i386
"""
repo_directory = ""
if spec.is_pre_release():
repo_directory = "testing"
else:
repo_directory = spec.branch()
if re.search("^(debian|ubuntu)", self.n):
return "repo/apt/%s/dists/%s/mongodb-enterprise/%s/%s/binary-%s/" % (self.n, self.repo_os_version(build_os), repo_directory, self.repo_component(), self.archname(arch))
elif re.search("(redhat|fedora|centos)", self.n):
return "repo/yum/%s/%s/mongodb-enterprise/%s/%s/RPMS/" % (self.n, self.repo_os_version(build_os), repo_directory, self.archname(arch))
elif re.search("(suse)", self.n):
return "repo/zypper/%s/%s/mongodb-enterprise/%s/%s/RPMS/" % (self.n, self.repo_os_version(build_os), repo_directory, self.archname(arch))
else:
raise Exception("BUG: unsupported platform?")
def repo_component(self):
"""Return the name of the section/component/pool we are publishing into -
e.g. "multiverse" for Ubuntu, "main" for debian."""
if self.n == 'ubuntu':
return "multiverse"
elif self.n == 'debian':
return "main"
else:
raise Exception("unsupported distro: %s" % self.n)
def repo_os_version(self, build_os):
"""Return an OS version suitable for package repo directory
naming - e.g. 5, 6 or 7 for redhat/centos, "precise," "wheezy," etc.
for Ubuntu/Debian, 11 for suse"""
if self.n == 'suse':
return re.sub(r'^suse(\d+)$', r'\1', build_os)
if self.n == 'redhat':
return re.sub(r'^rhel(\d).*$', r'\1', build_os)
elif self.n == 'ubuntu':
if build_os == 'ubuntu1204':
return "precise"
elif build_os == 'ubuntu1404':
return "trusty"
else:
raise Exception("unsupported build_os: %s" % build_os)
elif self.n == 'debian':
if build_os == 'debian71':
return 'wheezy'
else:
raise Exception("unsupported build_os: %s" % build_os)
else:
raise Exception("unsupported distro: %s" % self.n)
def make_pkg(self, build_os, arch, spec, srcdir):
if re.search("^(debian|ubuntu)", self.n):
return packager.make_deb(self, build_os, arch, spec, srcdir)
elif re.search("^(suse|centos|redhat|fedora)", self.n):
return packager.make_rpm(self, build_os, arch, spec, srcdir)
else:
raise Exception("BUG: unsupported platform?")
def build_os(self):
"""Return the build os label in the binary package to download ("rhel57", "rhel62" and "rhel70"
for redhat, "ubuntu1204" and "ubuntu1404" for Ubuntu, "debian71" for Debian), and "suse11"
for SUSE)"""
if re.search("(suse)", self.n):
return [ "suse11", "suse12" ]
if re.search("(redhat|fedora|centos)", self.n):
return [ "rhel70", "rhel62", "rhel57" ]
elif self.n == 'ubuntu':
return [ "ubuntu1204", "ubuntu1404" ]
elif self.n == 'debian':
return [ "debian71" ]
else:
raise Exception("BUG: unsupported platform?")
def release_dist(self, build_os):
"""Return the release distribution to use in the rpm - "el5" for rhel 5.x,
"el6" for rhel 6.x, return anything else unchanged"""
return re.sub(r'^rh(el\d).*$', r'\1', build_os)
def main(argv):
distros=[Distro(distro) for distro in DISTROS]
args = packager.get_args(distros)
spec = Spec(args.server_version, args.metadata_gitspec, args.release_number)
oldcwd=os.getcwd()
srcdir=oldcwd+"/../"
prefix = args.prefix
if prefix is None:
prefix=tempfile.mkdtemp()
print "Working in directory %s" % prefix
os.chdir(prefix)
try:
urlfmt="http://downloads.mongodb.com/linux/mongodb-linux-%s-enterprise-%s-%s.tgz"
for (distro, arch) in packager.crossproduct(distros, args.arches):
for build_os in distro.build_os():
if build_os in args.distros or not args.distros:
if args.tarball:
filename = tarfile(build_os, arch, spec)
packager.ensure_dir(filename)
shutil.copyfile(args.tarball,filename)
else:
packager.httpget(urlfmt % (arch, build_os, spec.version()), packager.ensure_dir(tarfile(build_os, arch, spec)))
repo = make_package(distro, build_os, arch, spec, srcdir)
make_repo(repo, distro, build_os, spec)
finally:
os.chdir(oldcwd)
def tarfile(build_os, arch, spec):
"""Return the location where we store the downloaded tarball for
this package"""
return "dl/mongodb-linux-%s-enterprise-%s-%s.tar.gz" % (spec.version(), build_os, arch)
def setupdir(distro, build_os, arch, spec):
# scripts, etc), along with the already-built binaries). In case
# the following format string is unclear, an example setupdir
# would be dst/x86_64/debian-sysvinit/wheezy/mongodb-org-unstable/
# or dst/x86_64/redhat/rhel57/mongodb-org-unstable/
return "dst/%s/%s/%s/%s%s-%s/" % (arch, distro.name(), build_os, distro.pkgbase(), spec.suffix(), spec.pversion(distro))
def unpack_binaries_into(build_os, arch, spec, where):
"""Unpack the tarfile for (build_os, arch, spec) into directory where."""
rootdir=os.getcwd()
packager.ensure_dir(where)
# Note: POSIX tar doesn't require support for gtar's "-C" option,
# and Python's tarfile module prior to Python 2.7 doesn't have the
# features to make this detail easy. So we'll just do the dumb
os.chdir(where)
try:
packager.sysassert(["tar", "xvzf", rootdir+"/"+tarfile(build_os, arch, spec)])
release_dir = glob('mongodb-linux-*')[0]
for releasefile in "bin", "snmp", "LICENSE.txt", "README", "THIRD-PARTY-NOTICES", "MPL-2":
os.rename("%s/%s" % (release_dir, releasefile), releasefile)
os.rmdir(release_dir)
except Exception:
exc=sys.exc_value
os.chdir(rootdir)
raise exc
os.chdir(rootdir)
def make_package(distro, build_os, arch, spec, srcdir):
"""Construct the package for (arch, distro, spec), getting
packaging files from srcdir and any user-specified suffix from
suffixes"""
sdir=setupdir(distro, build_os, arch, spec)
packager.ensure_dir(sdir)
for pkgdir in ["debian", "rpm"]:
print "Copying packaging files from %s to %s" % ("%s/%s" % (srcdir, pkgdir), sdir)
# FIXME: sh-dash-cee is bad. See if tarfile can do this.
packager.sysassert(["sh", "-c", "(cd \"%s\" && git archive %s %s/ ) | (cd \"%s\" && tar xvf -)" % (srcdir, spec.metadata_gitspec(), pkgdir, sdir)])
# Splat the binaries and snmp files under sdir. The "build" stages of the
# packaging infrastructure will move the files to wherever they
# need to go.
unpack_binaries_into(build_os, arch, spec, sdir)
# Remove the mongosniff binary due to libpcap dynamic
# linkage. FIXME: this removal should go away
# eventually.
if os.path.exists(sdir + "bin/mongosniff"):
os.unlink(sdir + "bin/mongosniff")
return distro.make_pkg(build_os, arch, spec, srcdir)
def make_repo(repodir, distro, build_os, spec):
if re.search("(debian|ubuntu)", repodir):
make_deb_repo(repodir, distro, build_os, spec)
elif re.search("(suse|centos|redhat|fedora)", repodir):
packager.make_rpm_repo(repodir)
else:
raise Exception("BUG: unsupported platform?")
def make_deb_repo(repo, distro, build_os, spec):
# Note: the Debian repository Packages files must be generated
# very carefully in order to be usable.
oldpwd=os.getcwd()
os.chdir(repo+"../../../../../../")
try:
dirs=set([os.path.dirname(deb)[2:] for deb in packager.backtick(["find", ".", "-name", "*.deb"]).split()])
for d in dirs:
s=packager.backtick(["dpkg-scanpackages", d, "/dev/null"])
with open(d+"/Packages", "w") as f:
f.write(s)
b=packager.backtick(["gzip", "-9c", d+"/Packages"])
with open(d+"/Packages.gz", "wb") as f:
f.write(b)
finally:
os.chdir(oldpwd)
# Notes: the Release{,.gpg} files must live in a special place,
# and must be created after all the Packages.gz files have been
# done.
s="""Origin: mongodb
Label: mongodb
Suite: %s
Codename: %s/mongodb-enterprise
Architectures: amd64
Components: %s
Description: MongoDB packages
""" % (distro.repo_os_version(build_os), distro.repo_os_version(build_os), distro.repo_component())
if os.path.exists(repo+"../../Release"):
os.unlink(repo+"../../Release")
if os.path.exists(repo+"../../Release.gpg"):
os.unlink(repo+"../../Release.gpg")
oldpwd=os.getcwd()
os.chdir(repo+"../../")
s2=packager.backtick(["apt-ftparchive", "release", "."])
try:
with open("Release", 'w') as f:
f.write(s)
f.write(s2)
finally:
os.chdir(oldpwd)
def move_repos_into_place(src, dst):
# Find all the stuff in src/*, move it to a freshly-created
# directory beside dst, then play some games with symlinks so that
# dst is a name the new stuff and dst+".old" names the previous
# one. This feels like a lot of hooey for something so trivial.
# First, make a crispy fresh new directory to put the stuff in.
i=0
while True:
date_suffix=time.strftime("%Y-%m-%d")
dname=dst+".%s.%d" % (date_suffix, i)
try:
os.mkdir(dname)
break
except OSError:
exc=sys.exc_value
if exc.errno == errno.EEXIST:
pass
else:
raise exc
i=i+1
# Put the stuff in our new directory.
for r in os.listdir(src):
packager.sysassert(["cp", "-rv", src + "/" + r, dname])
# Make a symlink to the new directory; the symlink will be renamed
# to dst shortly.
i=0
while True:
tmpnam=dst+".TMP.%d" % i
try:
os.symlink(dname, tmpnam)
break
except OSError: # as exc: # Python >2.5
exc=sys.exc_value
if exc.errno == errno.EEXIST:
pass
else:
raise exc
i=i+1
# Make a symlink to the old directory; this symlink will be
# renamed shortly, too.
oldnam=None
if os.path.exists(dst):
i=0
while True:
oldnam=dst+".old.%d" % i
try:
os.symlink(os.readlink(dst), oldnam)
break
except OSError: # as exc: # Python >2.5
exc=sys.exc_value
if exc.errno == errno.EEXIST:
pass
else:
raise exc
os.rename(tmpnam, dst)
if oldnam:
os.rename(oldnam, dst+".old")
if __name__ == "__main__":
main(sys.argv)
| false | true |
f7fb0e9d28b5bfb3aac3f38811c22a8a7ea1ebac | 2,114 | py | Python | train.py | bearcatt/single-shot-detector | 649d55aa84f1c988afd920ed8abc601512405825 | [
"MIT"
] | 1 | 2020-01-31T09:28:54.000Z | 2020-01-31T09:28:54.000Z | train.py | bearcatt/single-shot-detector | 649d55aa84f1c988afd920ed8abc601512405825 | [
"MIT"
] | null | null | null | train.py | bearcatt/single-shot-detector | 649d55aa84f1c988afd920ed8abc601512405825 | [
"MIT"
] | null | null | null | import os
import tensorflow as tf
import json
from model import model_fn, RestoreMovingAverageHook
from detector.input_pipeline import Pipeline
tf.logging.set_verbosity('INFO')
"""
This script does the training.
Also it runs the evaluation now and then during the training.
"""
GPU_TO_USE = '0'
CONFIG = 'config.json' # 'config_mobilenet.json' or 'config_shufflenet.json'
params = json.load(open(CONFIG))
def get_input_fn(is_training=True):
dataset_path = params['train_dataset'] if is_training else params['val_dataset']
filenames = os.listdir(dataset_path)
filenames = [n for n in filenames if n.endswith('.tfrecords')]
filenames = [os.path.join(dataset_path, n) for n in sorted(filenames)]
def input_fn():
with tf.device('/cpu:0'), tf.name_scope('input_pipeline'):
pipeline = Pipeline(filenames, is_training, params)
return pipeline.dataset
return input_fn
session_config = tf.ConfigProto(allow_soft_placement=True)
session_config.gpu_options.visible_device_list = GPU_TO_USE
run_config = tf.estimator.RunConfig()
run_config = run_config.replace(
model_dir=params['model_dir'], session_config=session_config,
save_summary_steps=600, save_checkpoints_secs=1800,
log_step_count_steps=1000
)
if params['backbone'] == 'mobilenet':
scope_to_restore = 'MobilenetV1/*'
elif params['backbone'] == 'shufflenet':
scope_to_restore = 'ShuffleNetV2/*'
warm_start = tf.estimator.WarmStartSettings(
params['pretrained_checkpoint'], [scope_to_restore]
)
train_input_fn = get_input_fn(is_training=True)
val_input_fn = get_input_fn(is_training=False)
estimator = tf.estimator.Estimator(
model_fn, params=params, config=run_config,
warm_start_from=warm_start
)
train_spec = tf.estimator.TrainSpec(train_input_fn, max_steps=params['num_steps'])
eval_spec = tf.estimator.EvalSpec(
val_input_fn, steps=None, start_delay_secs=3600 * 3, throttle_secs=3600 * 3,
# TODO: remove this when not using ema
hooks=[RestoreMovingAverageHook(params['model_dir'])]
)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
| 31.088235 | 84 | 0.758278 | import os
import tensorflow as tf
import json
from model import model_fn, RestoreMovingAverageHook
from detector.input_pipeline import Pipeline
tf.logging.set_verbosity('INFO')
GPU_TO_USE = '0'
CONFIG = 'config.json'
params = json.load(open(CONFIG))
def get_input_fn(is_training=True):
dataset_path = params['train_dataset'] if is_training else params['val_dataset']
filenames = os.listdir(dataset_path)
filenames = [n for n in filenames if n.endswith('.tfrecords')]
filenames = [os.path.join(dataset_path, n) for n in sorted(filenames)]
def input_fn():
with tf.device('/cpu:0'), tf.name_scope('input_pipeline'):
pipeline = Pipeline(filenames, is_training, params)
return pipeline.dataset
return input_fn
session_config = tf.ConfigProto(allow_soft_placement=True)
session_config.gpu_options.visible_device_list = GPU_TO_USE
run_config = tf.estimator.RunConfig()
run_config = run_config.replace(
model_dir=params['model_dir'], session_config=session_config,
save_summary_steps=600, save_checkpoints_secs=1800,
log_step_count_steps=1000
)
if params['backbone'] == 'mobilenet':
scope_to_restore = 'MobilenetV1/*'
elif params['backbone'] == 'shufflenet':
scope_to_restore = 'ShuffleNetV2/*'
warm_start = tf.estimator.WarmStartSettings(
params['pretrained_checkpoint'], [scope_to_restore]
)
train_input_fn = get_input_fn(is_training=True)
val_input_fn = get_input_fn(is_training=False)
estimator = tf.estimator.Estimator(
model_fn, params=params, config=run_config,
warm_start_from=warm_start
)
train_spec = tf.estimator.TrainSpec(train_input_fn, max_steps=params['num_steps'])
eval_spec = tf.estimator.EvalSpec(
val_input_fn, steps=None, start_delay_secs=3600 * 3, throttle_secs=3600 * 3,
hooks=[RestoreMovingAverageHook(params['model_dir'])]
)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
| true | true |
f7fb0ecff3e83cd2094615f1ba6ec6c306e6fdb3 | 1,949 | py | Python | epytope/Data/pssms/hammer/mat/DRB5_0105_9.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 7 | 2021-02-01T18:11:28.000Z | 2022-01-31T19:14:07.000Z | epytope/Data/pssms/hammer/mat/DRB5_0105_9.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 22 | 2021-01-02T15:25:23.000Z | 2022-03-14T11:32:53.000Z | epytope/Data/pssms/hammer/mat/DRB5_0105_9.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 4 | 2021-05-28T08:50:38.000Z | 2022-03-14T11:45:32.000Z | DRB5_0105_9 = {0: {'A': -999.0, 'C': -999.9, 'E': -999.0, 'D': -999.0, 'G': -999.0, 'F': 0.0, 'I': -1.0, 'H': -999.0, 'K': -999.0, 'M': -1.0, 'L': -1.0, 'N': -999.0, 'Q': -999.0, 'P': -999.0, 'S': -999.0, 'R': -999.0, 'T': -999.0, 'W': 0.0, 'V': -1.0, 'Y': 0.0}, 1: {'A': 0.0, 'C': 0.0, 'E': 0.1, 'D': -1.3, 'G': 0.5, 'F': 0.8, 'I': 1.1, 'H': 0.8, 'K': 1.1, 'M': 1.1, 'L': 1.0, 'N': 0.8, 'Q': 1.2, 'P': -0.5, 'S': -0.3, 'R': 2.2, 'T': 0.0, 'W': -0.1, 'V': 2.1, 'Y': 0.9}, 2: {'A': 0.0, 'C': 0.0, 'E': -1.2, 'D': -1.3, 'G': 0.2, 'F': 0.8, 'I': 1.5, 'H': 0.2, 'K': 0.0, 'M': 1.4, 'L': 1.0, 'N': 0.5, 'Q': 0.0, 'P': 0.3, 'S': 0.2, 'R': 0.7, 'T': 0.0, 'W': 0.0, 'V': 0.5, 'Y': 0.8}, 3: {'A': 0.0, 'C': 0.0, 'E': -1.3, 'D': -1.9, 'G': -1.6, 'F': -0.6, 'I': 1.3, 'H': -1.4, 'K': -1.7, 'M': 1.7, 'L': 0.6, 'N': -1.7, 'Q': -0.7, 'P': -1.5, 'S': -0.5, 'R': -1.7, 'T': 0.3, 'W': -1.4, 'V': 1.1, 'Y': -0.6}, 4: {'A': 0.0, 'C': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 5: {'A': 0.0, 'C': 0.0, 'E': -2.0, 'D': -2.0, 'G': -0.3, 'F': -1.7, 'I': -1.4, 'H': -1.2, 'K': -1.5, 'M': -1.5, 'L': -1.0, 'N': -1.3, 'Q': -1.4, 'P': 0.2, 'S': -0.5, 'R': -1.3, 'T': -0.8, 'W': -1.7, 'V': -1.3, 'Y': -1.0}, 6: {'A': 0.0, 'C': 0.0, 'E': -0.9, 'D': -1.5, 'G': 0.6, 'F': 1.5, 'I': 1.2, 'H': 1.2, 'K': 0.9, 'M': 0.4, 'L': 0.6, 'N': 0.5, 'Q': 0.7, 'P': -0.6, 'S': -0.2, 'R': 1.3, 'T': 0.3, 'W': 0.4, 'V': -0.3, 'Y': 1.2}, 7: {'A': 0.0, 'C': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 8: {'A': 0.0, 'C': 0.0, 'E': -0.6, 'D': -1.5, 'G': 0.4, 'F': 1.2, 'I': 1.2, 'H': 1.0, 'K': 2.7, 'M': 0.5, 'L': 1.3, 'N': 0.0, 'Q': 0.7, 'P': -0.8, 'S': 0.7, 'R': 2.5, 'T': -0.2, 'W': -0.7, 'V': -0.2, 'Y': 1.3}} | 1,949 | 1,949 | 0.300667 | DRB5_0105_9 = {0: {'A': -999.0, 'C': -999.9, 'E': -999.0, 'D': -999.0, 'G': -999.0, 'F': 0.0, 'I': -1.0, 'H': -999.0, 'K': -999.0, 'M': -1.0, 'L': -1.0, 'N': -999.0, 'Q': -999.0, 'P': -999.0, 'S': -999.0, 'R': -999.0, 'T': -999.0, 'W': 0.0, 'V': -1.0, 'Y': 0.0}, 1: {'A': 0.0, 'C': 0.0, 'E': 0.1, 'D': -1.3, 'G': 0.5, 'F': 0.8, 'I': 1.1, 'H': 0.8, 'K': 1.1, 'M': 1.1, 'L': 1.0, 'N': 0.8, 'Q': 1.2, 'P': -0.5, 'S': -0.3, 'R': 2.2, 'T': 0.0, 'W': -0.1, 'V': 2.1, 'Y': 0.9}, 2: {'A': 0.0, 'C': 0.0, 'E': -1.2, 'D': -1.3, 'G': 0.2, 'F': 0.8, 'I': 1.5, 'H': 0.2, 'K': 0.0, 'M': 1.4, 'L': 1.0, 'N': 0.5, 'Q': 0.0, 'P': 0.3, 'S': 0.2, 'R': 0.7, 'T': 0.0, 'W': 0.0, 'V': 0.5, 'Y': 0.8}, 3: {'A': 0.0, 'C': 0.0, 'E': -1.3, 'D': -1.9, 'G': -1.6, 'F': -0.6, 'I': 1.3, 'H': -1.4, 'K': -1.7, 'M': 1.7, 'L': 0.6, 'N': -1.7, 'Q': -0.7, 'P': -1.5, 'S': -0.5, 'R': -1.7, 'T': 0.3, 'W': -1.4, 'V': 1.1, 'Y': -0.6}, 4: {'A': 0.0, 'C': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 5: {'A': 0.0, 'C': 0.0, 'E': -2.0, 'D': -2.0, 'G': -0.3, 'F': -1.7, 'I': -1.4, 'H': -1.2, 'K': -1.5, 'M': -1.5, 'L': -1.0, 'N': -1.3, 'Q': -1.4, 'P': 0.2, 'S': -0.5, 'R': -1.3, 'T': -0.8, 'W': -1.7, 'V': -1.3, 'Y': -1.0}, 6: {'A': 0.0, 'C': 0.0, 'E': -0.9, 'D': -1.5, 'G': 0.6, 'F': 1.5, 'I': 1.2, 'H': 1.2, 'K': 0.9, 'M': 0.4, 'L': 0.6, 'N': 0.5, 'Q': 0.7, 'P': -0.6, 'S': -0.2, 'R': 1.3, 'T': 0.3, 'W': 0.4, 'V': -0.3, 'Y': 1.2}, 7: {'A': 0.0, 'C': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 8: {'A': 0.0, 'C': 0.0, 'E': -0.6, 'D': -1.5, 'G': 0.4, 'F': 1.2, 'I': 1.2, 'H': 1.0, 'K': 2.7, 'M': 0.5, 'L': 1.3, 'N': 0.0, 'Q': 0.7, 'P': -0.8, 'S': 0.7, 'R': 2.5, 'T': -0.2, 'W': -0.7, 'V': -0.2, 'Y': 1.3}} | true | true |
f7fb1109bf89db5bf87c82699fc7b9493c2500d3 | 1,035 | py | Python | tests/continuous_integration.py | kfaRabi/online-judge-tools | 79de8d37e1aa78a7c4c82c6a666f1f1602caf545 | [
"MIT"
] | null | null | null | tests/continuous_integration.py | kfaRabi/online-judge-tools | 79de8d37e1aa78a7c4c82c6a666f1f1602caf545 | [
"MIT"
] | null | null | null | tests/continuous_integration.py | kfaRabi/online-judge-tools | 79de8d37e1aa78a7c4c82c6a666f1f1602caf545 | [
"MIT"
] | null | null | null | import os
import subprocess
import sys
import unittest
# TODO: these command should be written at once, at only .travis.yml or at only here
paths = ['oj', 'onlinejudge', 'setup.py', 'tests']
class ContinuousIntegrationTest(unittest.TestCase):
"""A dummy test to run the commands same to CI on local environments"""
@unittest.skipIf('CI' in os.environ, 'the same command is call from .travis.yml')
def test_isort(self):
subprocess.check_call(['isort', '--check-only', '--diff', '--recursive'] + paths, stdout=sys.stdout, stderr=sys.stderr)
@unittest.skipIf('CI' in os.environ, 'the same command is call from .travis.yml')
def test_yapf(self):
output = subprocess.check_output(['yapf', '--diff', '--recursive'] + paths, stderr=sys.stderr)
self.assertEqual(output, b'')
@unittest.skipIf('CI' in os.environ, 'the same command is call from .travis.yml')
def test_mypy(self):
subprocess.check_call(['mypy', '--show-traceback'] + paths, stdout=sys.stdout, stderr=sys.stderr)
| 39.807692 | 127 | 0.68599 | import os
import subprocess
import sys
import unittest
paths = ['oj', 'onlinejudge', 'setup.py', 'tests']
class ContinuousIntegrationTest(unittest.TestCase):
@unittest.skipIf('CI' in os.environ, 'the same command is call from .travis.yml')
def test_isort(self):
subprocess.check_call(['isort', '--check-only', '--diff', '--recursive'] + paths, stdout=sys.stdout, stderr=sys.stderr)
@unittest.skipIf('CI' in os.environ, 'the same command is call from .travis.yml')
def test_yapf(self):
output = subprocess.check_output(['yapf', '--diff', '--recursive'] + paths, stderr=sys.stderr)
self.assertEqual(output, b'')
@unittest.skipIf('CI' in os.environ, 'the same command is call from .travis.yml')
def test_mypy(self):
subprocess.check_call(['mypy', '--show-traceback'] + paths, stdout=sys.stdout, stderr=sys.stderr)
| true | true |
f7fb114741c88ed71dd1c4f731e86104ad238dd7 | 1,516 | py | Python | core_marketing/migrations/0015_corevendortestmpttnode.py | amenabe22/ashewa_backend | 0c1aba1de3fcb86538482da1b628c68d4e5960a7 | [
"MIT"
] | null | null | null | core_marketing/migrations/0015_corevendortestmpttnode.py | amenabe22/ashewa_backend | 0c1aba1de3fcb86538482da1b628c68d4e5960a7 | [
"MIT"
] | null | null | null | core_marketing/migrations/0015_corevendortestmpttnode.py | amenabe22/ashewa_backend | 0c1aba1de3fcb86538482da1b628c68d4e5960a7 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2021-02-17 16:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('vendors', '0008_auto_20210214_2100'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core_marketing', '0014_auto_20210211_0951'),
]
operations = [
migrations.CreateModel(
name='CoreVendorTestMpttNode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lft', models.PositiveIntegerField(editable=False)),
('rght', models.PositiveIntegerField(editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(editable=False)),
('marketing_plan', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='vendors.vendorlevelplans')),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='core_marketing.corevendortestmpttnode')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| 43.314286 | 192 | 0.650396 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('vendors', '0008_auto_20210214_2100'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core_marketing', '0014_auto_20210211_0951'),
]
operations = [
migrations.CreateModel(
name='CoreVendorTestMpttNode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lft', models.PositiveIntegerField(editable=False)),
('rght', models.PositiveIntegerField(editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(editable=False)),
('marketing_plan', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='vendors.vendorlevelplans')),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='core_marketing.corevendortestmpttnode')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| true | true |
f7fb11bd2f9bc2b26c942f9db475fceba0171778 | 3,113 | py | Python | django_afip/clients.py | odony/django-afip | 5907dc0eb144744f423126249ea498595cee366f | [
"ISC"
] | null | null | null | django_afip/clients.py | odony/django-afip | 5907dc0eb144744f423126249ea498595cee366f | [
"ISC"
] | null | null | null | django_afip/clients.py | odony/django-afip | 5907dc0eb144744f423126249ea498595cee366f | [
"ISC"
] | null | null | null | __all__ = ("get_client",)
from urllib.parse import urlparse
import pytz
from django.utils.functional import LazyObject
from requests import Session
from requests.adapters import HTTPAdapter
from urllib3.util.ssl_ import create_urllib3_context, DEFAULT_CIPHERS
from zeep import Client
from zeep.cache import SqliteCache
from zeep.transports import Transport
TZ_AR = pytz.timezone(pytz.country_timezones["ar"][0])
CIPHERS = DEFAULT_CIPHERS + ":!DH"
WSDLS = {
("wsaa", False): "https://wsaa.afip.gov.ar/ws/services/LoginCms?wsdl",
("wsfe", False): "https://servicios1.afip.gov.ar/wsfev1/service.asmx?WSDL",
("wsaa", True): "https://wsaahomo.afip.gov.ar/ws/services/LoginCms?wsdl",
("wsfe", True): "https://wswhomo.afip.gov.ar/wsfev1/service.asmx?WSDL",
}
class AFIPAdapter(HTTPAdapter):
"""An adapter with reduced security so it'll work with AFIP."""
def init_poolmanager(self, *args, **kwargs):
context = create_urllib3_context(ciphers=CIPHERS)
kwargs["ssl_context"] = context
return super().init_poolmanager(*args, **kwargs)
def proxy_manager_for(self, *args, **kwargs):
context = create_urllib3_context(ciphers=CIPHERS)
kwargs["ssl_context"] = context
return super().proxy_manager_for(*args, **kwargs)
class LazyTransport(LazyObject):
"""A lazy-initialized Zeep transport.
This transport does two non-default things:
- Reduces TLS security. Sadly, AFIP only has insecure endpoints, so we're forced to
reduce security to talk to them.
- Cache the WSDL file for a whole day.
"""
def _setup(self):
"""Initialise this lazy object with a celery app instance."""
session = Session()
# For each WSDL, extract the domain, and add it as an exception:
for url in WSDLS.values():
parsed = urlparse(url)
base_url = f"{parsed.scheme}://{parsed.netloc}"
session.mount(base_url, AFIPAdapter())
self._wrapped = Transport(cache=SqliteCache(timeout=86400), session=session)
transport = LazyTransport()
cached_clients = {}
def get_client(service_name, sandbox=False):
"""
Return a client for a given service.
The `sandbox` argument should only be necessary if a the client will be
used to make a request. If it will only be used to serialize objects, it is
irrelevant. A caller can avoid the overhead of determining the sandbox mode in the
calling context if only serialization operations will take place.
:param string service_name: The name of the web services.
:param bool sandbox: Whether the sandbox (or production) environment should
be used by the returned client.
:returns: A zeep client to communicate with an AFIP web service.
:rtype: zeep.Client
"""
key = (
service_name.lower(),
sandbox,
)
try:
if key not in cached_clients:
cached_clients[key] = Client(WSDLS[key], transport=transport)
return cached_clients[key]
except KeyError:
raise ValueError("Unknown service name, {}".format(service_name))
| 34.208791 | 87 | 0.692258 | __all__ = ("get_client",)
from urllib.parse import urlparse
import pytz
from django.utils.functional import LazyObject
from requests import Session
from requests.adapters import HTTPAdapter
from urllib3.util.ssl_ import create_urllib3_context, DEFAULT_CIPHERS
from zeep import Client
from zeep.cache import SqliteCache
from zeep.transports import Transport
TZ_AR = pytz.timezone(pytz.country_timezones["ar"][0])
CIPHERS = DEFAULT_CIPHERS + ":!DH"
WSDLS = {
("wsaa", False): "https://wsaa.afip.gov.ar/ws/services/LoginCms?wsdl",
("wsfe", False): "https://servicios1.afip.gov.ar/wsfev1/service.asmx?WSDL",
("wsaa", True): "https://wsaahomo.afip.gov.ar/ws/services/LoginCms?wsdl",
("wsfe", True): "https://wswhomo.afip.gov.ar/wsfev1/service.asmx?WSDL",
}
class AFIPAdapter(HTTPAdapter):
def init_poolmanager(self, *args, **kwargs):
context = create_urllib3_context(ciphers=CIPHERS)
kwargs["ssl_context"] = context
return super().init_poolmanager(*args, **kwargs)
def proxy_manager_for(self, *args, **kwargs):
context = create_urllib3_context(ciphers=CIPHERS)
kwargs["ssl_context"] = context
return super().proxy_manager_for(*args, **kwargs)
class LazyTransport(LazyObject):
def _setup(self):
session = Session()
for url in WSDLS.values():
parsed = urlparse(url)
base_url = f"{parsed.scheme}://{parsed.netloc}"
session.mount(base_url, AFIPAdapter())
self._wrapped = Transport(cache=SqliteCache(timeout=86400), session=session)
transport = LazyTransport()
cached_clients = {}
def get_client(service_name, sandbox=False):
key = (
service_name.lower(),
sandbox,
)
try:
if key not in cached_clients:
cached_clients[key] = Client(WSDLS[key], transport=transport)
return cached_clients[key]
except KeyError:
raise ValueError("Unknown service name, {}".format(service_name))
| true | true |
f7fb13916043f328c397b1864ebf8d54a6bcf399 | 4,592 | py | Python | masonite/response.py | STejas6/core | 0576d50093b4520915636a216ebb26aa72aea9d5 | [
"MIT"
] | null | null | null | masonite/response.py | STejas6/core | 0576d50093b4520915636a216ebb26aa72aea9d5 | [
"MIT"
] | null | null | null | masonite/response.py | STejas6/core | 0576d50093b4520915636a216ebb26aa72aea9d5 | [
"MIT"
] | null | null | null | """The Masonite Response Object."""
import json
from masonite.exceptions import ResponseError
from masonite.helpers.Extendable import Extendable
from masonite.view import View
from orator.support.collection import Collection
from orator import Model
from masonite.app import App
class Response(Extendable):
def __init__(self, app: App):
"""A Response object to be used to abstract the logic of getting a response ready to be returned.
Arguments:
app {masonite.app.App} -- The Masonite container.
"""
self.app = app
self.request = self.app.make('Request')
def json(self, payload, status=200):
"""Gets the response ready for a JSON response.
Arguments:
payload {dict|list} -- Either a dictionary or a list.
Returns:
string -- Returns a string representation of the data
"""
self.app.bind('Response', json.dumps(payload))
self.make_headers(content_type="application/json; charset=utf-8")
self.request.status(status)
return self.data()
def make_headers(self, content_type="text/html; charset=utf-8"):
"""Make the appropriate headers based on changes made in controllers or middleware.
Keyword Arguments:
content_type {str} -- The content type to set. (default: {"text/html; charset=utf-8"})
"""
self.request.header('Content-Length', str(len(self.to_bytes())))
# If the user did not change it directly
if not self.request.has_raw_header('Content-Type'):
self.request.header('Content-Type', content_type)
def data(self):
"""Get the data that will be returned to the WSGI server.
Returns:
string -- Returns a string representation of the response
"""
if self.app.has('Response'):
return self.app.make('Response')
return ''
def converted_data(self):
"""Converts the data appropriately so the WSGI server can handle it.
Returns:
string -- Returns a string representation of the data
"""
if isinstance(self.data(), dict) or isinstance(self.data(), list):
return json.dumps(self.data())
else:
return self.data()
def view(self, view, status=200):
"""Set a string or view to be returned.
Arguments:
view {string|dict|list|masonite.view.View} -- Some data type that is an appropriate response.
Keyword Arguments:
status {int} -- The Response status code. (default: {200})
Raises:
ResponseError -- If a data type that is not an acceptable response type is returned.
Returns:
string|dict|list -- Returns the data to be returned.
"""
if not self.request.get_status():
self.request.status(status)
if isinstance(view, dict) or isinstance(view, list):
return self.json(view, status=self.request.get_status())
elif isinstance(view, Collection) or isinstance(view, Model):
return self.json(view.serialize(), status=self.request.get_status())
elif isinstance(view, int):
view = str(view)
elif isinstance(view, View):
view = view.rendered_template
elif isinstance(view, self.request.__class__):
view = self.data()
elif view is None:
raise ResponseError('Responses cannot be of type: None.')
if not isinstance(view, str):
raise ResponseError('Invalid response type of {}'.format(type(view)))
self.app.bind('Response', view)
self.make_headers()
return self.data()
def redirect(self, location=None, status=302):
"""Set the redirection on the server.
Keyword Arguments:
location {string} -- The URL to redirect to (default: {None})
status {int} -- The Response status code. (default: {302})
Returns:
string -- Returns the data to be returned.
"""
self.request.status(status)
if not location:
location = self.request.redirect_url
self.request.reset_headers()
self.request.header('Location', location)
self.app.bind('Response', 'redirecting ...')
return self.data()
def to_bytes(self):
"""Converts the data to bytes so the WSGI server can handle it.
Returns:
bytes -- The converted response to bytes.
"""
return bytes(self.converted_data(), 'utf-8')
| 32.567376 | 105 | 0.614547 |
import json
from masonite.exceptions import ResponseError
from masonite.helpers.Extendable import Extendable
from masonite.view import View
from orator.support.collection import Collection
from orator import Model
from masonite.app import App
class Response(Extendable):
def __init__(self, app: App):
self.app = app
self.request = self.app.make('Request')
def json(self, payload, status=200):
self.app.bind('Response', json.dumps(payload))
self.make_headers(content_type="application/json; charset=utf-8")
self.request.status(status)
return self.data()
def make_headers(self, content_type="text/html; charset=utf-8"):
self.request.header('Content-Length', str(len(self.to_bytes())))
if not self.request.has_raw_header('Content-Type'):
self.request.header('Content-Type', content_type)
def data(self):
if self.app.has('Response'):
return self.app.make('Response')
return ''
def converted_data(self):
if isinstance(self.data(), dict) or isinstance(self.data(), list):
return json.dumps(self.data())
else:
return self.data()
def view(self, view, status=200):
if not self.request.get_status():
self.request.status(status)
if isinstance(view, dict) or isinstance(view, list):
return self.json(view, status=self.request.get_status())
elif isinstance(view, Collection) or isinstance(view, Model):
return self.json(view.serialize(), status=self.request.get_status())
elif isinstance(view, int):
view = str(view)
elif isinstance(view, View):
view = view.rendered_template
elif isinstance(view, self.request.__class__):
view = self.data()
elif view is None:
raise ResponseError('Responses cannot be of type: None.')
if not isinstance(view, str):
raise ResponseError('Invalid response type of {}'.format(type(view)))
self.app.bind('Response', view)
self.make_headers()
return self.data()
def redirect(self, location=None, status=302):
self.request.status(status)
if not location:
location = self.request.redirect_url
self.request.reset_headers()
self.request.header('Location', location)
self.app.bind('Response', 'redirecting ...')
return self.data()
def to_bytes(self):
return bytes(self.converted_data(), 'utf-8')
| true | true |
f7fb140774782e4b4a5d6bfb8bcc6af35d80e8a1 | 280 | py | Python | drivable/configs/drivable/ccnet_r101-d8_512x1024_80k_drivable_bdd100k.py | danielzhangau/bdd100k-models | 10311ad98a111b7a34fbfc84f58776175b251ef0 | [
"Apache-2.0"
] | null | null | null | drivable/configs/drivable/ccnet_r101-d8_512x1024_80k_drivable_bdd100k.py | danielzhangau/bdd100k-models | 10311ad98a111b7a34fbfc84f58776175b251ef0 | [
"Apache-2.0"
] | null | null | null | drivable/configs/drivable/ccnet_r101-d8_512x1024_80k_drivable_bdd100k.py | danielzhangau/bdd100k-models | 10311ad98a111b7a34fbfc84f58776175b251ef0 | [
"Apache-2.0"
] | null | null | null | """CCNet with ResNet-101-d8."""
_base_ = "./ccnet_r50-d8_512x1024_80k_drivable_bdd100k.py"
model = dict(pretrained="open-mmlab://resnet101_v1c", backbone=dict(depth=101))
load_from = "https://dl.cv.ethz.ch/bdd100k/drivable/models/ccnet_r101-d8_512x1024_80k_drivable_bdd100k.pth"
| 46.666667 | 107 | 0.785714 |
_base_ = "./ccnet_r50-d8_512x1024_80k_drivable_bdd100k.py"
model = dict(pretrained="open-mmlab://resnet101_v1c", backbone=dict(depth=101))
load_from = "https://dl.cv.ethz.ch/bdd100k/drivable/models/ccnet_r101-d8_512x1024_80k_drivable_bdd100k.pth"
| true | true |
f7fb153ab017555bcae740748306748e4bcf1cc1 | 825 | py | Python | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/website_portal_sale/__manifest__.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | 1 | 2019-12-19T01:53:13.000Z | 2019-12-19T01:53:13.000Z | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/website_portal_sale/__manifest__.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/website_portal_sale/__manifest__.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Website Portal for Sales',
'category': 'Website',
'summary': 'Add your sales document in the frontend portal (sales order, quotations, invoices)',
'version': '1.0',
'description': """
Add your sales document in the frontend portal. Your customers will be able to connect to their portal to see the list (and the state) of their invoices (pdf report), sales orders and quotations (web pages).
""",
'depends': [
'portal_sale',
'website_portal',
'website_payment',
],
'data': [
'views/website_portal_sale_templates.xml',
'security/ir.model.access.csv',
],
'demo': [
'data/sale_demo.xml'
],
'installable': True,
}
| 31.730769 | 207 | 0.621818 |
{
'name': 'Website Portal for Sales',
'category': 'Website',
'summary': 'Add your sales document in the frontend portal (sales order, quotations, invoices)',
'version': '1.0',
'description': """
Add your sales document in the frontend portal. Your customers will be able to connect to their portal to see the list (and the state) of their invoices (pdf report), sales orders and quotations (web pages).
""",
'depends': [
'portal_sale',
'website_portal',
'website_payment',
],
'data': [
'views/website_portal_sale_templates.xml',
'security/ir.model.access.csv',
],
'demo': [
'data/sale_demo.xml'
],
'installable': True,
}
| true | true |
f7fb154e2cad7dc11c88f8c9e6d093d24c4375b0 | 151 | py | Python | day06/test01.py | jaywoong/python | 99daedd5a9418b72b2d5c3b800080e730eb9b3ea | [
"Apache-2.0"
] | null | null | null | day06/test01.py | jaywoong/python | 99daedd5a9418b72b2d5c3b800080e730eb9b3ea | [
"Apache-2.0"
] | null | null | null | day06/test01.py | jaywoong/python | 99daedd5a9418b72b2d5c3b800080e730eb9b3ea | [
"Apache-2.0"
] | null | null | null | data = [1,2,3,4,5]
su = 0
for d in data:
su += d
print(su)
su2 = 0
for i in range(0,len(data)):
su2 += data[i]
print(su2)
print(sum(data))
| 10.066667 | 28 | 0.549669 | data = [1,2,3,4,5]
su = 0
for d in data:
su += d
print(su)
su2 = 0
for i in range(0,len(data)):
su2 += data[i]
print(su2)
print(sum(data))
| true | true |
f7fb15a165389e445d41522b64eaad8073ddaa3f | 736 | py | Python | exercises/Linked Lists/findKthLastElement.py | RakaPKS/StructuresAlgorithmsAndConcepts | 570339f06562ca6dbe4ff7a2f12ec8413b1dcbba | [
"MIT"
] | null | null | null | exercises/Linked Lists/findKthLastElement.py | RakaPKS/StructuresAlgorithmsAndConcepts | 570339f06562ca6dbe4ff7a2f12ec8413b1dcbba | [
"MIT"
] | null | null | null | exercises/Linked Lists/findKthLastElement.py | RakaPKS/StructuresAlgorithmsAndConcepts | 570339f06562ca6dbe4ff7a2f12ec8413b1dcbba | [
"MIT"
] | null | null | null | # Find k-th last element in a single linked list
import sys
sys.path.append("../../LinkedList")
from LinkedList import Node
from LinkedList import LinkedList
def findKthElement(node, k, n=0):
if node:
item, m = findKthElement(node.next, k, n+1)
#print("k" + str(k))
#print("n" + str(n))
#print("m" + str(m))
if m-n == k:
return (node, m)
else:
return(item, m)
else:
return (None, n)
# generate linked list
ll = LinkedList()
first = Node("first")
second = Node("second")
third = Node("third")
last = Node("second")
ll.head = first
first.next = second
second.next = third
third.next = last
node, _ = findKthElement(ll.head, 2)
print(node.data)
| 20.444444 | 51 | 0.597826 |
import sys
sys.path.append("../../LinkedList")
from LinkedList import Node
from LinkedList import LinkedList
def findKthElement(node, k, n=0):
if node:
item, m = findKthElement(node.next, k, n+1)
if m-n == k:
return (node, m)
else:
return(item, m)
else:
return (None, n)
ll = LinkedList()
first = Node("first")
second = Node("second")
third = Node("third")
last = Node("second")
ll.head = first
first.next = second
second.next = third
third.next = last
node, _ = findKthElement(ll.head, 2)
print(node.data)
| true | true |
f7fb17993e7d481ee3819b26d1d6bef086debfda | 2,458 | py | Python | src/genie/libs/parser/ios/cat6k/tests/ShowInventory/cli/equal/golden_output_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/ios/cat6k/tests/ShowInventory/cli/equal/golden_output_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/ios/cat6k/tests/ShowInventory/cli/equal/golden_output_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z | expected_output = {
'index': {
1: {
'descr': 'Cisco Systems Catalyst 6500 3-slot Chassis System',
'name': 'WS-C6503-E',
'pid': 'WS-C6503-E',
'sn': 'FXS1821Q2H9',
'vid': 'V03',
},
2: {
'descr': 'OSR-7600 Clock FRU 1',
'name': 'CLK-7600 1',
'pid': 'CLK-7600',
'sn': 'FXS181101V4',
},
3: {
'descr': 'OSR-7600 Clock FRU 2',
'name': 'CLK-7600 2',
'pid': 'CLK-7600',
'sn': 'FXS181101V4',
},
4: {
'descr': 'WS-SUP720-3BXL 2 ports Supervisor Engine 720 Rev. 5.6',
'name': '1',
'pid': 'WS-SUP720-3BXL',
'sn': 'SAL11434P2C',
'vid': 'V05',
},
5: {
'descr': 'WS-SUP720 MSFC3 Daughterboard Rev. 3.1',
'name': 'msfc sub-module of 1',
'pid': 'WS-SUP720',
'sn': 'SAL11434N9G',
},
6: {
'descr': 'WS-F6K-PFC3BXL Policy Feature Card 3 Rev. 1.8',
'name': 'switching engine sub-module of 1',
'pid': 'WS-F6K-PFC3BXL',
'sn': 'SAL11434LYG',
'vid': 'V01',
},
7: {
'descr': 'WS-X6748-GE-TX CEF720 48 port 10/100/1000mb Ethernet Rev. 2.6',
'name': '2',
'pid': 'WS-X6748-GE-TX',
'sn': 'SAL1128UPQ9',
'vid': 'V02',
},
8: {
'descr': 'WS-F6700-DFC3CXL Distributed Forwarding Card 3 Rev. 1.1',
'name': 'switching engine sub-module of 2',
'pid': 'WS-F6700-DFC3CXL',
'sn': 'SAL1214LAG5',
'vid': 'V01',
},
9: {
'descr': 'Enhanced 3-slot Fan Tray 1',
'name': 'WS-C6503-E-FAN 1',
'pid': 'WS-C6503-E-FAN',
'sn': 'DCH183500KW',
'vid': 'V02',
},
10: {
'descr': 'AC power supply, 1400 watt 1',
'name': 'PS 1 PWR-1400-AC',
'pid': 'PWR-1400-AC',
'sn': 'ABC0830J127',
'vid': 'V01',
},
},
}
| 34.138889 | 89 | 0.349471 | expected_output = {
'index': {
1: {
'descr': 'Cisco Systems Catalyst 6500 3-slot Chassis System',
'name': 'WS-C6503-E',
'pid': 'WS-C6503-E',
'sn': 'FXS1821Q2H9',
'vid': 'V03',
},
2: {
'descr': 'OSR-7600 Clock FRU 1',
'name': 'CLK-7600 1',
'pid': 'CLK-7600',
'sn': 'FXS181101V4',
},
3: {
'descr': 'OSR-7600 Clock FRU 2',
'name': 'CLK-7600 2',
'pid': 'CLK-7600',
'sn': 'FXS181101V4',
},
4: {
'descr': 'WS-SUP720-3BXL 2 ports Supervisor Engine 720 Rev. 5.6',
'name': '1',
'pid': 'WS-SUP720-3BXL',
'sn': 'SAL11434P2C',
'vid': 'V05',
},
5: {
'descr': 'WS-SUP720 MSFC3 Daughterboard Rev. 3.1',
'name': 'msfc sub-module of 1',
'pid': 'WS-SUP720',
'sn': 'SAL11434N9G',
},
6: {
'descr': 'WS-F6K-PFC3BXL Policy Feature Card 3 Rev. 1.8',
'name': 'switching engine sub-module of 1',
'pid': 'WS-F6K-PFC3BXL',
'sn': 'SAL11434LYG',
'vid': 'V01',
},
7: {
'descr': 'WS-X6748-GE-TX CEF720 48 port 10/100/1000mb Ethernet Rev. 2.6',
'name': '2',
'pid': 'WS-X6748-GE-TX',
'sn': 'SAL1128UPQ9',
'vid': 'V02',
},
8: {
'descr': 'WS-F6700-DFC3CXL Distributed Forwarding Card 3 Rev. 1.1',
'name': 'switching engine sub-module of 2',
'pid': 'WS-F6700-DFC3CXL',
'sn': 'SAL1214LAG5',
'vid': 'V01',
},
9: {
'descr': 'Enhanced 3-slot Fan Tray 1',
'name': 'WS-C6503-E-FAN 1',
'pid': 'WS-C6503-E-FAN',
'sn': 'DCH183500KW',
'vid': 'V02',
},
10: {
'descr': 'AC power supply, 1400 watt 1',
'name': 'PS 1 PWR-1400-AC',
'pid': 'PWR-1400-AC',
'sn': 'ABC0830J127',
'vid': 'V01',
},
},
}
| true | true |
f7fb17c3bb55bbdbb399edfb845e6480f26351f1 | 1,446 | py | Python | core/registration/static/forms.py | jlin/inventory | c098c98e570c3bf9fadfd811eb75e1213f6ea428 | [
"BSD-3-Clause"
] | 22 | 2015-01-16T01:36:32.000Z | 2020-06-08T00:46:18.000Z | core/registration/static/forms.py | jlin/inventory | c098c98e570c3bf9fadfd811eb75e1213f6ea428 | [
"BSD-3-Clause"
] | 8 | 2015-12-28T18:56:19.000Z | 2019-04-01T17:33:48.000Z | core/registration/static/forms.py | jlin/inventory | c098c98e570c3bf9fadfd811eb75e1213f6ea428 | [
"BSD-3-Clause"
] | 13 | 2015-01-13T20:56:22.000Z | 2022-02-23T06:01:17.000Z | from django import forms
from django.forms import widgets
from core.registration.static.models import StaticReg
from core.utils import resolve_ip_type
from mozdns.forms import BaseForm
from mozdns.utils import ensure_label_domain
class StaticRegForm(BaseForm):
class Meta:
model = StaticReg
fields = (
'label', 'domain', 'ip_str', 'ip_type', 'ttl', 'views', 'system',
'name', 'description', 'decommissioned'
)
widgets = {'views': forms.CheckboxSelectMultiple}
class StaticRegFQDNForm(BaseForm):
class Meta:
model = StaticReg
fields = (
'fqdn', 'ip_str', 'ip_type', 'ttl', 'views', 'name', 'description',
'decommissioned'
)
widgets = {'views': forms.CheckboxSelectMultiple}
class StaticRegAutoForm(BaseForm):
def clean(self, *args, **kwargs):
self.instance.ip_type = resolve_ip_type(self.cleaned_data['ip_str'])[0]
self.instance.label, self.instance.domain = ensure_label_domain(
self.cleaned_data['fqdn']
)
return super(StaticRegAutoForm, self).clean(*args, **kwargs)
class Meta:
model = StaticReg
fields = (
'fqdn', 'ip_str', 'views', 'description', 'system', 'ttl', 'name',
'decommissioned'
)
widgets = {
'views': forms.CheckboxSelectMultiple,
'system': widgets.HiddenInput
}
| 30.125 | 79 | 0.613416 | from django import forms
from django.forms import widgets
from core.registration.static.models import StaticReg
from core.utils import resolve_ip_type
from mozdns.forms import BaseForm
from mozdns.utils import ensure_label_domain
class StaticRegForm(BaseForm):
class Meta:
model = StaticReg
fields = (
'label', 'domain', 'ip_str', 'ip_type', 'ttl', 'views', 'system',
'name', 'description', 'decommissioned'
)
widgets = {'views': forms.CheckboxSelectMultiple}
class StaticRegFQDNForm(BaseForm):
class Meta:
model = StaticReg
fields = (
'fqdn', 'ip_str', 'ip_type', 'ttl', 'views', 'name', 'description',
'decommissioned'
)
widgets = {'views': forms.CheckboxSelectMultiple}
class StaticRegAutoForm(BaseForm):
def clean(self, *args, **kwargs):
self.instance.ip_type = resolve_ip_type(self.cleaned_data['ip_str'])[0]
self.instance.label, self.instance.domain = ensure_label_domain(
self.cleaned_data['fqdn']
)
return super(StaticRegAutoForm, self).clean(*args, **kwargs)
class Meta:
model = StaticReg
fields = (
'fqdn', 'ip_str', 'views', 'description', 'system', 'ttl', 'name',
'decommissioned'
)
widgets = {
'views': forms.CheckboxSelectMultiple,
'system': widgets.HiddenInput
}
| true | true |
f7fb17e7692aa6aae2d41d6d66cf67f080614521 | 8,676 | py | Python | plugins/time.py | Jeglet/pcbot | 89178d4982151adb2fadfacdc3080e46cda9e891 | [
"MIT"
] | null | null | null | plugins/time.py | Jeglet/pcbot | 89178d4982151adb2fadfacdc3080e46cda9e891 | [
"MIT"
] | null | null | null | plugins/time.py | Jeglet/pcbot | 89178d4982151adb2fadfacdc3080e46cda9e891 | [
"MIT"
] | null | null | null | """ Module for time commands and reminders and such.
Commands:
when
countdown
"""
import asyncio
from operator import itemgetter
import discord
import pendulum
from pytz import all_timezones
import bot
import plugins
from pcbot import Config, Annotate
client = plugins.client # type: bot.Client
time_cfg = Config("time", data=dict(countdown={}, timezone={}))
dt_format = "%A, %d %B %Y %H:%M:%S"
@plugins.argument()
def tz_arg(timezone: str):
""" Get timezone from a string. """
for tz in all_timezones:
if tz.lower().endswith(timezone.lower()):
return tz
return None
def reverse_gmt(timezone: str):
""" POSIX is stupid so these are reversed. """
if "+" in timezone:
timezone = timezone.replace("+", "-")
elif "-" in timezone:
timezone = timezone.replace("-", "+")
return timezone
async def init_dt(message: discord.Message, time: str, timezone: str):
""" Setup the datetime and timezone properly. """
timezone = reverse_gmt(timezone)
try:
dt = pendulum.parse(time, tz=timezone, strict=False)
except ValueError:
await client.say(message, "Time format not recognized.")
return None, None
return dt, timezone
def format_when(dt: pendulum.datetime, timezone: str = "UTC"):
""" Format when something will happen"""
now = pendulum.now("UTC")
diff = dt.diff(now)
major_diff = dt.diff_for_humans(absolute=True)
detailed_diff = diff.in_words().replace("-", "")
return "`{time} {tz}` {pronoun} **{major}{diff}{pronoun2}**.".format(
time=dt.strftime(dt_format),
tz=timezone,
pronoun="is in" if dt > now else "was",
major="~" + major_diff + "** / **" if major_diff not in detailed_diff else "",
diff=detailed_diff,
pronoun2=" ago" if dt < now else ""
)
@plugins.command(aliases="timezone")
async def when(message: discord.Message, *time, timezone: tz_arg = "UTC"):
""" Convert time from specified timezone or UTC to formatted string of e.g.
`2 hours from now`. """
timezone_name = timezone
if time:
dt, timezone = await init_dt(message, " ".join(time), timezone)
if dt is None or timezone is None:
return
await client.say(message, format_when(dt, timezone_name))
else:
timezone = reverse_gmt(timezone)
dt = pendulum.now(timezone)
await client.say(message, "`{} {}` is **UTC{}{}**.".format(
dt.strftime(dt_format), timezone_name,
"-" if dt.offset_hours < 0 else ("+" if dt.offset_hours > 0 else ""),
abs(dt.offset_hours) if dt.offset_hours else "",
))
@plugins.argument()
def tag_arg(tag: str):
""" A countdown tag. """
return tag.lower().replace(" ", "")
@plugins.command(aliases="cd downcount")
async def countdown(message: discord.Message, tag: Annotate.Content):
""" Display a countdown with the specified tag. """
tag = tag_arg(tag)
assert tag in time_cfg.data["countdown"], "Countdown with tag `{}` does not exist.".format(tag)
cd = time_cfg.data["countdown"][tag]
dt = pendulum.parse(cd["time"], tz=cd["tz"])
timezone_name = cd["tz_name"]
await client.say(message, format_when(dt, timezone_name))
@plugins.command()
async def created(message: discord.Message, member: discord.Member = Annotate.Self):
""" When your or the selected member's discord account was created. """
member_created = pendulum.instance(member.created_at)
channel = message.channel
embed = discord.Embed(description=f'**Created {member_created.diff_for_humans()}**', timestamp=member.created_at,
color=member.color)
embed.set_author(name=member.display_name, icon_url=member.display_avatar.url)
await client.send_message(channel, embed=embed)
@plugins.command()
async def joined(message: discord.Message, member: discord.Member = Annotate.Self):
""" When your or the selected member joined the guild. """
member_joined = pendulum.instance(member.joined_at)
channel = message.channel
embed = discord.Embed(description=f'**Joined {member_joined.diff_for_humans()}**', timestamp=member.joined_at,
color=member.color)
embed.set_author(name=member.display_name, icon_url=member.display_avatar.url)
await client.send_message(channel, embed=embed)
@countdown.command(aliases="add", pos_check=True)
async def create(message: discord.Message, tag: tag_arg, *time, timezone: tz_arg = "UTC"):
""" Create a countdown with the specified tag, using the same format as `{pre}when`. """
assert tag not in time_cfg.data["countdown"], "Countdown with tag `{}` already exists.".format(tag)
timezone_name = timezone
dt, timezone = await init_dt(message, " ".join(time), timezone)
if dt is None or timezone is None:
return
seconds = (dt.diff(pendulum.now(timezone)).in_seconds())
assert dt > pendulum.now(timezone), "A countdown has to be set in the future."
cd = dict(time=dt.to_datetime_string(), tz=timezone, tz_name=timezone_name, tag=tag,
author=str(message.author.id), channel=str(message.channel.id))
time_cfg.data["countdown"][tag] = cd
await time_cfg.asyncsave()
await client.say(message, "Added countdown with tag `{}`.".format(tag))
client.loop.create_task(wait_for_reminder(cd, seconds))
@countdown.command(aliases="remove")
async def delete(message: discord.Message, tag: Annotate.Content):
""" Remove a countdown with the specified tag. You need to be the author of a tag
in order to remove it. """
tag = tag_arg(tag)
assert tag in time_cfg.data["countdown"], "Countdown with tag `{}` does not exist.".format(tag)
author_id = time_cfg.data["countdown"][tag]["author"]
assert str(message.author.id) == author_id, "You are not the author of this tag ({}).".format(
getattr(discord.utils.get(client.get_all_members(), id=author_id), "name", None) or "~~Unknown~~")
del time_cfg.data["countdown"][tag]
await time_cfg.asyncsave()
await client.say(message, "Countdown with tag `{}` removed.".format(tag))
@countdown.command(name="list")
async def countdown_list(message: discord.Message, author: discord.Member = None):
""" List all countdowns or all countdowns by the specified author. """
assert time_cfg.data["countdown"], "There are no countdowns created."
if author:
tags = (tag for tag, value in time_cfg.data["countdown"].items() if value["author"] == str(author.id))
else:
tags = (tag for tag in time_cfg.data["countdown"].keys())
await client.say(message, "**{}countdown tags**:```\n{}```".format(
"{}'s ".format(author.name) if author else "", ", ".join(tags)))
async def wait_for_reminder(cd, seconds):
""" Wait for and send the reminder. This is a separate function so that . """
try:
await asyncio.sleep(seconds)
except asyncio.CancelledError:
return
channel = client.get_channel(int(cd["channel"]))
author = channel.guild.get_member(int(cd["author"]))
msg = "Hey {0}, your countdown **{cd[tag]}** at `{cd[time]} {cd[tz_name]}` is over!".format(author.mention, cd=cd)
await client.send_message(channel, msg)
del time_cfg.data["countdown"][cd["tag"]]
await time_cfg.asyncsave()
async def handle_countdown_reminders():
""" Handle countdowns after starting.
Countdowns created afterwards are handled by the cd create command.
"""
reminders = []
for tag, cd in dict(time_cfg.data["countdown"]).items():
dt = pendulum.parse(cd["time"], tz=cd["tz"])
cd = dict(cd)
cd["tag"] = tag
cd["dt"] = dt
reminders.append(cd)
if not reminders:
return
# Go through the reminders starting at the newest one
for cd in sorted(reminders, key=itemgetter("dt")):
# Find in how many seconds the countdown will finish
seconds = (cd["dt"].diff(pendulum.now(cd["tz"])).in_seconds())
# If the next reminder is in longer than a month, don't bother waiting,
if seconds > 60 * 60 * 24 * 30:
return
# In case of multiple countdowns at once, set a threshold at -10 seconds
# If below, remove the countdown and continue
if seconds < -10:
del time_cfg.data["countdown"][cd["tag"]]
await time_cfg.asyncsave()
continue
seconds = max(seconds, 0)
await wait_for_reminder(cd, seconds)
async def on_ready():
""" Start a task for startup countdowns. """
client.loop.create_task(handle_countdown_reminders())
| 35.125506 | 118 | 0.653988 |
import asyncio
from operator import itemgetter
import discord
import pendulum
from pytz import all_timezones
import bot
import plugins
from pcbot import Config, Annotate
client = plugins.client
time_cfg = Config("time", data=dict(countdown={}, timezone={}))
dt_format = "%A, %d %B %Y %H:%M:%S"
@plugins.argument()
def tz_arg(timezone: str):
for tz in all_timezones:
if tz.lower().endswith(timezone.lower()):
return tz
return None
def reverse_gmt(timezone: str):
if "+" in timezone:
timezone = timezone.replace("+", "-")
elif "-" in timezone:
timezone = timezone.replace("-", "+")
return timezone
async def init_dt(message: discord.Message, time: str, timezone: str):
timezone = reverse_gmt(timezone)
try:
dt = pendulum.parse(time, tz=timezone, strict=False)
except ValueError:
await client.say(message, "Time format not recognized.")
return None, None
return dt, timezone
def format_when(dt: pendulum.datetime, timezone: str = "UTC"):
now = pendulum.now("UTC")
diff = dt.diff(now)
major_diff = dt.diff_for_humans(absolute=True)
detailed_diff = diff.in_words().replace("-", "")
return "`{time} {tz}` {pronoun} **{major}{diff}{pronoun2}**.".format(
time=dt.strftime(dt_format),
tz=timezone,
pronoun="is in" if dt > now else "was",
major="~" + major_diff + "** / **" if major_diff not in detailed_diff else "",
diff=detailed_diff,
pronoun2=" ago" if dt < now else ""
)
@plugins.command(aliases="timezone")
async def when(message: discord.Message, *time, timezone: tz_arg = "UTC"):
timezone_name = timezone
if time:
dt, timezone = await init_dt(message, " ".join(time), timezone)
if dt is None or timezone is None:
return
await client.say(message, format_when(dt, timezone_name))
else:
timezone = reverse_gmt(timezone)
dt = pendulum.now(timezone)
await client.say(message, "`{} {}` is **UTC{}{}**.".format(
dt.strftime(dt_format), timezone_name,
"-" if dt.offset_hours < 0 else ("+" if dt.offset_hours > 0 else ""),
abs(dt.offset_hours) if dt.offset_hours else "",
))
@plugins.argument()
def tag_arg(tag: str):
return tag.lower().replace(" ", "")
@plugins.command(aliases="cd downcount")
async def countdown(message: discord.Message, tag: Annotate.Content):
tag = tag_arg(tag)
assert tag in time_cfg.data["countdown"], "Countdown with tag `{}` does not exist.".format(tag)
cd = time_cfg.data["countdown"][tag]
dt = pendulum.parse(cd["time"], tz=cd["tz"])
timezone_name = cd["tz_name"]
await client.say(message, format_when(dt, timezone_name))
@plugins.command()
async def created(message: discord.Message, member: discord.Member = Annotate.Self):
member_created = pendulum.instance(member.created_at)
channel = message.channel
embed = discord.Embed(description=f'**Created {member_created.diff_for_humans()}**', timestamp=member.created_at,
color=member.color)
embed.set_author(name=member.display_name, icon_url=member.display_avatar.url)
await client.send_message(channel, embed=embed)
@plugins.command()
async def joined(message: discord.Message, member: discord.Member = Annotate.Self):
member_joined = pendulum.instance(member.joined_at)
channel = message.channel
embed = discord.Embed(description=f'**Joined {member_joined.diff_for_humans()}**', timestamp=member.joined_at,
color=member.color)
embed.set_author(name=member.display_name, icon_url=member.display_avatar.url)
await client.send_message(channel, embed=embed)
@countdown.command(aliases="add", pos_check=True)
async def create(message: discord.Message, tag: tag_arg, *time, timezone: tz_arg = "UTC"):
assert tag not in time_cfg.data["countdown"], "Countdown with tag `{}` already exists.".format(tag)
timezone_name = timezone
dt, timezone = await init_dt(message, " ".join(time), timezone)
if dt is None or timezone is None:
return
seconds = (dt.diff(pendulum.now(timezone)).in_seconds())
assert dt > pendulum.now(timezone), "A countdown has to be set in the future."
cd = dict(time=dt.to_datetime_string(), tz=timezone, tz_name=timezone_name, tag=tag,
author=str(message.author.id), channel=str(message.channel.id))
time_cfg.data["countdown"][tag] = cd
await time_cfg.asyncsave()
await client.say(message, "Added countdown with tag `{}`.".format(tag))
client.loop.create_task(wait_for_reminder(cd, seconds))
@countdown.command(aliases="remove")
async def delete(message: discord.Message, tag: Annotate.Content):
tag = tag_arg(tag)
assert tag in time_cfg.data["countdown"], "Countdown with tag `{}` does not exist.".format(tag)
author_id = time_cfg.data["countdown"][tag]["author"]
assert str(message.author.id) == author_id, "You are not the author of this tag ({}).".format(
getattr(discord.utils.get(client.get_all_members(), id=author_id), "name", None) or "~~Unknown~~")
del time_cfg.data["countdown"][tag]
await time_cfg.asyncsave()
await client.say(message, "Countdown with tag `{}` removed.".format(tag))
@countdown.command(name="list")
async def countdown_list(message: discord.Message, author: discord.Member = None):
assert time_cfg.data["countdown"], "There are no countdowns created."
if author:
tags = (tag for tag, value in time_cfg.data["countdown"].items() if value["author"] == str(author.id))
else:
tags = (tag for tag in time_cfg.data["countdown"].keys())
await client.say(message, "**{}countdown tags**:```\n{}```".format(
"{}'s ".format(author.name) if author else "", ", ".join(tags)))
async def wait_for_reminder(cd, seconds):
try:
await asyncio.sleep(seconds)
except asyncio.CancelledError:
return
channel = client.get_channel(int(cd["channel"]))
author = channel.guild.get_member(int(cd["author"]))
msg = "Hey {0}, your countdown **{cd[tag]}** at `{cd[time]} {cd[tz_name]}` is over!".format(author.mention, cd=cd)
await client.send_message(channel, msg)
del time_cfg.data["countdown"][cd["tag"]]
await time_cfg.asyncsave()
async def handle_countdown_reminders():
reminders = []
for tag, cd in dict(time_cfg.data["countdown"]).items():
dt = pendulum.parse(cd["time"], tz=cd["tz"])
cd = dict(cd)
cd["tag"] = tag
cd["dt"] = dt
reminders.append(cd)
if not reminders:
return
# Go through the reminders starting at the newest one
for cd in sorted(reminders, key=itemgetter("dt")):
# Find in how many seconds the countdown will finish
seconds = (cd["dt"].diff(pendulum.now(cd["tz"])).in_seconds())
# If the next reminder is in longer than a month, don't bother waiting,
if seconds > 60 * 60 * 24 * 30:
return
if seconds < -10:
del time_cfg.data["countdown"][cd["tag"]]
await time_cfg.asyncsave()
continue
seconds = max(seconds, 0)
await wait_for_reminder(cd, seconds)
async def on_ready():
client.loop.create_task(handle_countdown_reminders())
| true | true |
f7fb180212f335243444030a032a42fb4df9df72 | 3,887 | py | Python | rfm69_test.py | Pythonaire/RFMGate | ccf24dceeec5c6946c6b552863459c375149bbb0 | [
"Unlicense"
] | null | null | null | rfm69_test.py | Pythonaire/RFMGate | ccf24dceeec5c6946c6b552863459c375149bbb0 | [
"Unlicense"
] | null | null | null | rfm69_test.py | Pythonaire/RFMGate | ccf24dceeec5c6946c6b552863459c375149bbb0 | [
"Unlicense"
] | null | null | null | # Simple example to send a message and then wait indefinitely for messages
# to be received. This uses the default RadioHead compatible GFSK_Rb250_Fd250
# modulation and packet format for the radio.
# Author: Tony DiCola
import board
import busio
import digitalio
import adafruit_rfm69
import json
# Define radio parameters.
RADIO_FREQ_MHZ = 433.0 # Frequency of the radio in Mhz. Must match your
# module! Can be a value like 915.0, 433.0, etc.
# Define pins connected to the chip, use these if wiring up the breakout according to the guide:
#CS = digitalio.DigitalInOut(board.D5)
#RESET = digitalio.DigitalInOut(board.D6)
# Or uncomment and instead use these if using a Feather M0 RFM69 board
# and the appropriate CircuitPython build:
#CS = digitalio.DigitalInOut(board.RFM69_CS)
CS = digitalio.DigitalInOut(board.CE1)
#RESET = digitalio.DigitalInOut(board.RFM69_RST)
RESET = digitalio.DigitalInOut(board.D25)
# Initialize SPI bus.
spi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)
# Initialze RFM radio
rfm69 = adafruit_rfm69.RFM69(spi, CS, RESET, RADIO_FREQ_MHZ)
header = rfm69.preamble_length #set to default length of RadioHead RFM69 library
# set node addresses
rfm69.node = 1
# Optionally set an encryption key (16 byte AES key). MUST match both
# on the transmitter and receiver (or be set to None to disable/the default).
rfm69.encryption_key = b'\x01\x02\x03\x04\x05\x06\x07\x08\x01\x02\x03\x04\x05\x06\x07\x08'
# Print out some chip state:
print('Temperature: {0}C'.format(rfm69.temperature))
print('Frequency: {0}mhz'.format(rfm69.frequency_mhz))
print('Bit rate: {0}kbit/s'.format(rfm69.bitrate/1000))
print('Frequency deviation: {0}hz'.format(rfm69.frequency_deviation))
# Send a packet. Note you can only send a packet up to 60 bytes in length.
# This is a limitation of the radio packet size, so if you need to send larger
# amounts of data you will need to break it into smaller send calls. Each send
# call will wait for the previous one to finish before continuing.
#rfm69.send(bytes('Hello world!\r\n',"utf-8"))
# print('Sent hello world message!')
# Wait to receive packets. Note that this library can't receive data at a fast
# rate, in fact it can only receive and process one 60 byte packet at a time.
# This means you should only use this for low bandwidth scenarios, like sending
# and receiving a single message at a time.
print('Waiting for packets...')
while True:
#packet = rfm69.receive()
packet = rfm69.receive(timeout=0.5, keep_listening=True, with_header=True)
# Optionally change the receive timeout from its default of 0.5 seconds:
#packet = rfm69.receive(timeout=5.0)
# If no packet was received during the timeout then None is returned.
if packet is None:
continue
# Packet has not been received
print('Received nothing! Listening again...')
else:
# Received a packet!
# Print out the raw bytes of the packet:
print('RSSI: {0}'.format(rfm69.rssi))
client_id = packet[1]
print('data coming from: {0}'.format(client_id))
# And decode to ASCII text and print it too. Note that you always
# receive raw bytes and need to convert to a text format like ASCII
# if you intend to do string processing on your data. Make sure the
# sending side is sending ASCII data before you try to decode!
del packet[0:4] # delete the header
#packet_content = json.loads(packet)
#packet_text = str(packet, 'ascii')
my_json = packet.decode('utf8').replace("'", '"')
print(my_json)
#data = json.loads(my_json)
#s = json.dumps(data, indent=4, sort_keys=True)
#print(s)
#print('Received (ASCII): {0}'.format(packet_text))
#rfm69.send(bytes('Hello back again!\r\n',"utf-8"),timeout=2.0, tx_header=(client_id, 1, 0, 0))
| 44.678161 | 103 | 0.710831 |
import board
import busio
import digitalio
import adafruit_rfm69
import json
RADIO_FREQ_MHZ = 433.0
CS = digitalio.DigitalInOut(board.CE1)
RESET = digitalio.DigitalInOut(board.D25)
spi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)
rfm69 = adafruit_rfm69.RFM69(spi, CS, RESET, RADIO_FREQ_MHZ)
header = rfm69.preamble_length
rfm69.node = 1
rfm69.encryption_key = b'\x01\x02\x03\x04\x05\x06\x07\x08\x01\x02\x03\x04\x05\x06\x07\x08'
print('Temperature: {0}C'.format(rfm69.temperature))
print('Frequency: {0}mhz'.format(rfm69.frequency_mhz))
print('Bit rate: {0}kbit/s'.format(rfm69.bitrate/1000))
print('Frequency deviation: {0}hz'.format(rfm69.frequency_deviation))
# rate, in fact it can only receive and process one 60 byte packet at a time.
# This means you should only use this for low bandwidth scenarios, like sending
# and receiving a single message at a time.
print('Waiting for packets...')
while True:
#packet = rfm69.receive()
packet = rfm69.receive(timeout=0.5, keep_listening=True, with_header=True)
# Optionally change the receive timeout from its default of 0.5 seconds:
#packet = rfm69.receive(timeout=5.0)
# If no packet was received during the timeout then None is returned.
if packet is None:
continue
# Packet has not been received
print('Received nothing! Listening again...')
else:
# Received a packet!
# Print out the raw bytes of the packet:
print('RSSI: {0}'.format(rfm69.rssi))
client_id = packet[1]
print('data coming from: {0}'.format(client_id))
# And decode to ASCII text and print it too. Note that you always
# receive raw bytes and need to convert to a text format like ASCII
# if you intend to do string processing on your data. Make sure the
# sending side is sending ASCII data before you try to decode!
del packet[0:4] # delete the header
#packet_content = json.loads(packet)
#packet_text = str(packet, 'ascii')
my_json = packet.decode('utf8').replace("'", '"')
print(my_json)
#data = json.loads(my_json)
#s = json.dumps(data, indent=4, sort_keys=True)
#print(s)
#print('Received (ASCII): {0}'.format(packet_text))
#rfm69.send(bytes('Hello back again!\r\n',"utf-8"),timeout=2.0, tx_header=(client_id, 1, 0, 0))
| true | true |
f7fb1882915956c3078b72bc3dfed7584de6fe07 | 1,983 | py | Python | src/uff/aperture.py | davidbradway/uff.py | 118001211018a4fc95d1dd7304ae6335bdf805f9 | [
"MIT"
] | 7 | 2021-11-16T17:27:54.000Z | 2021-12-25T18:09:35.000Z | src/uff/aperture.py | davidbradway/uff.py | 118001211018a4fc95d1dd7304ae6335bdf805f9 | [
"MIT"
] | 6 | 2021-11-16T17:27:33.000Z | 2022-02-04T08:51:06.000Z | src/uff/aperture.py | davidbradway/uff.py | 118001211018a4fc95d1dd7304ae6335bdf805f9 | [
"MIT"
] | 1 | 2021-11-16T19:26:36.000Z | 2021-11-16T19:26:36.000Z | from dataclasses import dataclass
from uff.position import Position
from uff.uff_io import Serializable
@dataclass
class Aperture(Serializable):
"""
UFF class to define analytically the aperture use in an ultrasound wave.
Notes:
The class aperture defines the transmit apodization profile. In this case
origin defines the center of the aperture. The size of the aperture can
be described with fixed_size, or with f_number in which case the aperture
size is d/f_number where d is the distance between uff.wave.origin and
uff.wave.aperture.origin. The parameter window is a string describing
the apodization window.
Attributes:
origin (Position): Location of the aperture center in space.
window (str): String defining the apodization window type and
parameter (e.g., 'Hamming', 'Gauss(8)', 'Tukey(0.5)')
f_number list[float]: Desired F-number of the aperture [Az, El]
fixed_size list[float]: If non-zero, this overwrites the size of the aperture
in [m] [Az, El]
minimun_size (float): (Optional) If non-zero, this sets a limit for the minimum
dynamic aperture in m [Az, El]
maximum_size (float): (Optional) If non-zero, this sets a limit for the maximum
dynamic aperture in m [Az, El]
"""
@staticmethod
def str_name():
return 'aperture'
# @classmethod
# def deserialize(cls: object, data: dict):
# data['position'] = data.pop('origin')
# return super().deserialize(data)
# TODO: standard has this named aperture but defined as position
origin: Position
# TODO: what should fixed size type be? list? float? how do you reproduce the same functionality
fixed_size: float
f_number: float = 1.0
window: str = 'rectwin'
minimum_size: float = None
maximum_size: float = None
| 39.66 | 100 | 0.648512 | from dataclasses import dataclass
from uff.position import Position
from uff.uff_io import Serializable
@dataclass
class Aperture(Serializable):
@staticmethod
def str_name():
return 'aperture'
origin: Position
fixed_size: float
f_number: float = 1.0
window: str = 'rectwin'
minimum_size: float = None
maximum_size: float = None
| true | true |
f7fb194da78bbfc9c441023ad316d3d523a1d956 | 7,880 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/_load_balancer_probes_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 1 | 2021-06-02T08:01:35.000Z | 2021-06-02T08:01:35.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/_load_balancer_probes_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/_load_balancer_probes_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class LoadBalancerProbesOperations(object):
"""LoadBalancerProbesOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2018-10-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-10-01"
self.config = config
def list(
self, resource_group_name, load_balancer_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the load balancer probes.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Probe
:rtype:
~azure.mgmt.network.v2018_10_01.models.ProbePaged[~azure.mgmt.network.v2018_10_01.models.Probe]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.ProbePaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes'}
def get(
self, resource_group_name, load_balancer_name, probe_name, custom_headers=None, raw=False, **operation_config):
"""Gets load balancer probe.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param probe_name: The name of the probe.
:type probe_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Probe or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2018_10_01.models.Probe or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'probeName': self._serialize.url("probe_name", probe_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Probe', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'}
| 44.519774 | 176 | 0.654315 |
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class LoadBalancerProbesOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-10-01"
self.config = config
def list(
self, resource_group_name, load_balancer_name, custom_headers=None, raw=False, **operation_config):
def prepare_request(next_link=None):
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
header_dict = None
if raw:
header_dict = {}
deserialized = models.ProbePaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes'}
def get(
self, resource_group_name, load_balancer_name, probe_name, custom_headers=None, raw=False, **operation_config):
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'probeName': self._serialize.url("probe_name", probe_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Probe', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'}
| true | true |
f7fb19e97cd22ca52bdc9cefc711a0ffce8ddc83 | 16,538 | py | Python | arelle/examples/plugin/streamingExtensions.py | theredpea/Arelle | e53097f142a69b2fefc18298a72f1f1b219b973d | [
"Apache-2.0"
] | 1 | 2018-01-04T01:39:04.000Z | 2018-01-04T01:39:04.000Z | arelle/examples/plugin/streamingExtensions.py | theredpea/Arelle | e53097f142a69b2fefc18298a72f1f1b219b973d | [
"Apache-2.0"
] | null | null | null | arelle/examples/plugin/streamingExtensions.py | theredpea/Arelle | e53097f142a69b2fefc18298a72f1f1b219b973d | [
"Apache-2.0"
] | null | null | null | '''
StreamingExtensions is a plug-in to both GUI menu and command line/web service
that provides an alternative approach to big instance documents without building a DOM, to save
memory footprint. lxml iterparse is used to parse the big instance. ModelObjects are specialized by features
for efficiency and to avoid dependency on an underlying DOM.
(Note that this module is based on iterparse, the module under the installation/plugs is much faster.)
(c) Copyright 2013 Mark V Systems Limited, All rights reserved.
'''
import io, sys, os, time
from decimal import Decimal, InvalidOperation
from lxml import etree
from collections import defaultdict
from arelle import XbrlConst, XmlUtil, XmlValidate, ValidateXbrlDimensions
from arelle.ModelDocument import ModelDocument, Type
from arelle.ModelObject import ModelObject
from arelle.ModelObjectFactory import parser
from arelle.ModelValue import QName
from arelle.ModelInstanceObject import ModelContext, ModelFact, ModelUnit
from arelle.Validate import Validate
_streamingExtensionsValidate = False
_streamingExtensionsCheck = False
def precedingProcessingInstruction(elt, target):
pi = elt.getprevious()
while pi is not None:
if isinstance(pi, etree._ProcessingInstruction) and pi.target == target:
return pi
pi = pi.getprevious()
return None
def streamingExtensionsLoader(modelXbrl, mappedUri, filepath):
# check if big instance and has header with an initial incomplete tree walk (just 2 elements
def logSyntaxErrors(parsercontext):
for error in parsercontext.error_log:
modelXbrl.error("xmlSchema:syntax",
_("%(error)s, %(fileName)s, line %(line)s, column %(column)s, %(sourceAction)s source element"),
modelObject=modelDocument, fileName=os.path.basename(filepath),
error=error.message, line=error.line, column=error.column, sourceAction="streaming")
#### note: written for iterparse of lxml prior to version 3.3, otherwise rewrite to use XmlPullParser ###
#### note: iterparse wants a binary file, but file is text mode
_file, = modelXbrl.fileSource.file(filepath, binary=True)
startedAt = time.time()
modelXbrl.profileActivity()
parsercontext = etree.iterparse(_file, events=("start","end"), huge_tree=True)
foundInstance = False
foundErrors = False
streamingAspects = None
numRootFacts1 = 0
numElts = 0
elt = None
for event, elt in parsercontext:
if event == "start":
if elt.getparent() is not None:
if elt.getparent().tag == "{http://www.xbrl.org/2003/instance}xbrl":
if not foundInstance:
foundInstance = True
pi = precedingProcessingInstruction(elt, "xbrl-streamable-instance")
if pi is None:
break
else:
streamingAspects = dict(pi.attrib.copy())
if not elt.tag.startswith("{http://www.xbrl.org/"):
numRootFacts1 += 1
if numRootFacts1 % 1000 == 0:
modelXbrl.profileActivity("... streaming tree check", minTimeToShow=20.0)
elif not foundInstance:
break
elif elt.tag == "{http://www.xbrl.org/2003/instance}xbrl" and precedingProcessingInstruction(elt, "xbrl-streamable-instance") is not None:
modelXbrl.error("streamingExtensions:headerMisplaced",
_("Header is misplaced: %(error)s, must follow xbrli:xbrl element"),
modelObject=elt)
elif event == "end":
elt.clear()
numElts += 1
if numElts % 1000 == 0 and elt.getparent() is not None:
while elt.getprevious() is not None and elt.getparent() is not None:
del elt.getparent()[0]
if elt is not None:
elt.clear()
_file.seek(0,io.SEEK_SET) # allow reparsing
if not foundInstance or streamingAspects is None:
del elt, parsercontext
_file.close()
return None
modelXbrl.profileStat(_("streaming tree check"), time.time() - startedAt)
startedAt = time.time()
try:
version = Decimal(streamingAspects.get("version"))
if int(version) != 1:
modelXbrl.error("streamingExtensions:unsupportedVersion",
_("Streaming version %(version)s, major version number must be 1"),
modelObject=elt, version=version)
foundErrors = True
except (InvalidOperation, OverflowError):
modelXbrl.error("streamingExtensions:versionError",
_("Version %(version)s, number must be 1.n"),
modelObject=elt, version=streamingAspects.get("version", "(none)"))
foundErrors = True
for bufAspect in ("contextBuffer", "unitBuffer", "footnoteBuffer"):
try:
bufLimit = Decimal(streamingAspects.get(bufAspect, "INF"))
if bufLimit < 1 or (bufLimit.is_finite() and bufLimit % 1 != 0):
raise InvalidOperation
elif bufAspect == "contextBuffer":
contextBufferLimit = bufLimit
elif bufAspect == "unitBuffer":
unitBufferLimit = bufLimit
elif bufAspect == "footnoteBuffer":
footnoteBufferLimit = bufLimit
except InvalidOperation:
modelXbrl.error("streamingExtensions:valueError",
_("Streaming %(attrib)s %(value)s, number must be a positive integer or INF"),
modelObject=elt, attrib=bufAspect, value=streamingAspects.get(bufAspect))
foundErrors = True
if parsercontext.error_log:
foundErrors = True
logSyntaxErrors(parsercontext)
if foundErrors:
_file.close()
return None
parsercontext = etree.iterparse(_file, events=("start","end"), huge_tree=True)
_parser, _parserLookupName, _parserLookupClass = parser(modelXbrl,filepath)
eltMdlObjs = {}
beforeInstanceStream = True
validator = None
contextBuffer = []
unitBuffer = []
footnoteBuffer = []
factBuffer = []
numFacts = numRootFacts2 = 1
for event, elt in parsercontext:
if event == "start":
mdlObj = _parser.makeelement(elt.tag, attrib=elt.attrib, nsmap=elt.nsmap)
mdlObj.sourceline = elt.sourceline
eltMdlObjs[elt] = mdlObj
if elt.getparent() is None:
modelDocument = ModelDocument(modelXbrl, Type.INSTANCE, mappedUri, filepath, etree.ElementTree(mdlObj))
modelDocument.xmlRootElement = mdlObj
modelXbrl.modelDocument = modelDocument # needed for incremental validation
mdlObj.init(modelDocument)
modelXbrl.info("streamingExtensions:streaming",
_("Stream processing this instance."),
modelObject = modelDocument)
else:
eltMdlObjs[elt.getparent()].append(mdlObj)
mdlObj._init()
ns = mdlObj.namespaceURI
ln = mdlObj.localName
if (beforeInstanceStream and (
(ns == XbrlConst.link and ln not in ("schemaRef", "linkbaseRef")) or
(ns == XbrlConst.xbrli and ln in ("context", "unit")) or
(ns not in (XbrlConst.link, XbrlConst.xbrli)))):
beforeInstanceStream = False
if _streamingExtensionsValidate:
validator = Validate(modelXbrl)
validator.instValidator.validate(modelXbrl, modelXbrl.modelManager.formulaOptions.typedParameters())
else: # need default dimensions
ValidateXbrlDimensions.loadDimensionDefaults(modelXbrl)
mdlObj = None # deref
elif event == "end":
mdlObj = eltMdlObjs.pop(elt)
if elt.text: # text available after child nodes processed
mdlObj.text = elt.text
ns = mdlObj.namespaceURI
ln = mdlObj.localName
parentMdlObj = mdlObj.getparent()
if ns == XbrlConst.xbrli:
if ln == "context":
if mdlObj.get("sticky"):
del mdlObj.attrib["sticky"]
modelDocument.contextDiscover(mdlObj)
else:
if _streamingExtensionsValidate and len(contextBuffer) >= contextBufferLimit:
# drop before adding as dropped may have same id as added
cntx = contextBuffer.pop(0)
dropContext(modelXbrl, cntx)
del parentMdlObj[parentMdlObj.index(cntx)]
cntx = None
modelDocument.contextDiscover(mdlObj)
if contextBufferLimit.is_finite():
contextBuffer.append(mdlObj)
if _streamingExtensionsValidate:
contextsToCheck = (mdlObj,)
validator.instValidator.checkContexts(contextsToCheck)
if modelXbrl.hasXDT:
validator.instValidator.checkContextsDimensions(contextsToCheck)
del contextsToCheck # dereference
elif ln == "unit":
if _streamingExtensionsValidate and len(unitBuffer) >= unitBufferLimit:
# drop before additing as dropped may have same id as added
unit = unitBuffer.pop(0)
dropUnit(modelXbrl, unit)
del parentMdlObj[parentMdlObj.index(unit)]
unit = None
modelDocument.unitDiscover(mdlObj)
if unitBufferLimit.is_finite():
unitBuffer.append(mdlObj)
if _streamingExtensionsValidate:
validator.instValidator.checkUnits( (mdlObj,) )
elif ln == "xbrl": # end of document
# check remaining footnote refs
for footnoteLink in footnoteBuffer:
checkFootnoteHrefs(modelXbrl, footnoteLink)
elt.clear()
elif ns == XbrlConst.link:
if ln in ("schemaRef", "linkbaseRef"):
modelDocument.discoverHref(mdlObj)
elif ln in ("roleRef", "arcroleRef"):
modelDocument.linkbaseDiscover((mdlObj,), inInstance=True)
elif ln == "footnoteLink":
footnoteLinks = (mdlObj,)
modelDocument.linkbaseDiscover(footnoteLinks, inInstance=True)
if footnoteBufferLimit.is_finite():
footnoteBuffer.append(mdlObj)
if _streamingExtensionsValidate:
validator.instValidator.checkLinks(footnoteLinks)
if len(footnoteBuffer) > footnoteBufferLimit:
# check that hrefObjects for locators were all satisfied
# drop before addition as dropped may have same id as added
footnoteLink = footnoteBuffer.pop(0)
checkFootnoteHrefs(modelXbrl, footnoteLink)
dropFootnoteLink(modelXbrl, footnoteLink)
del parentMdlObj[parentMdlObj.index(footnoteLink)]
footnoteLink = None
footnoteLinks = None
elt.clear()
elif parentMdlObj.qname == XbrlConst.qnXbrliXbrl:
numRootFacts2 += 1
modelDocument.factDiscover(mdlObj, modelXbrl.facts)
XmlValidate.validate(modelXbrl, mdlObj)
if _streamingExtensionsValidate:
factsToCheck = (mdlObj,)
validator.instValidator.checkFacts(factsToCheck)
if modelXbrl.hasXDT:
validator.instValidator.checkFactsDimensions(factsToCheck)
del factsToCheck
dropFact(modelXbrl, mdlObj, modelXbrl.facts)
del parentMdlObj[parentMdlObj.index(mdlObj)]
if numRootFacts2 % 1000 == 0:
modelXbrl.profileActivity("... streaming fact {0} of {1} {2:.2f}%".format(numRootFacts2, numRootFacts1, 100.0 * numRootFacts2 / numRootFacts1),
minTimeToShow=20.0)
# get rid of root element from iterparse's tree
elt.clear()
while elt.getprevious() is not None: # cleans up any prior siblings
del elt.getparent()[0]
mdlObj = None # deref
logSyntaxErrors(parsercontext)
del parsercontext
if validator is not None:
validator.close()
_file.close()
modelXbrl.profileStat(_("streaming complete"), time.time() - startedAt)
return modelDocument
def checkFootnoteHrefs(modelXbrl, footnoteLink):
for locElt in footnoteLink.iterchildren(tag="{http://www.xbrl.org/2003/linkbase}loc"):
for hrefElt, doc, id in footnoteLink.modelDocument.hrefObjects:
if locElt == hrefElt and id not in footnoteLink.modelDocument.idObjects:
modelXbrl.error("streamingExtensions:footnoteId",
_("Footnote id %(id)s not matched to fact in buffered region"),
modelObject=footnoteLink, id=id)
def dropContext(modelXbrl, cntx):
del modelXbrl.contexts[cntx.id]
dropObject(modelXbrl, cntx)
def dropUnit(modelXbrl, unit):
del modelXbrl.units[unit.id]
dropObject(modelXbrl, unit)
def dropFootnoteLink(modelXbrl, footnoteLink):
for baseSet in modelXbrl.baseSets.values():
if footnoteLink in baseSet:
baseSet.remove(footnoteLink)
dropObject(modelXbrl, footnoteLink)
def dropFact(modelXbrl, fact, facts):
while fact.modelTupleFacts:
dropFact(modelXbrl, fact.modelTupleFacts[0], fact.modelTupleFacts)
modelXbrl.factsInInstance.discard(fact)
facts.remove(fact)
modelXbrl.modelObjects[fact.objectIndex] = None # objects found by index, can't remove position from list
fact.modelDocument.modelObjects.remove(fact)
fact.clear()
def dropObject(modelXbrl, mdlObj):
for childObj in mdlObj.iterchildren():
dropObject(modelXbrl, childObj)
if mdlObj.qname == XbrlConst.qnLinkLoc:
hrefs = mdlObj.modelDocument.hrefObjects
removedHrefs = [i for i, hrefObj in enumerate(hrefs) if mdlObj == hrefObj[0]]
for i in removedHrefs:
del hrefs[i]
modelXbrl.modelObjects[mdlObj.objectIndex] = None # objects found by index, can't remove position from list
mdlObj.modelDocument.modelObjects.remove(mdlObj)
mdlObj.modelDocument.idObjects.pop(mdlObj.id, None)
mdlObj.clear()
def streamingOptionsExtender(parser):
parser.add_option("--check-streaming",
action="store_true",
dest="check_streaming",
help=_('Check streamability of instance document."'))
def streamingExtensionsSetup(self, options, **kwargs):
global _streamingExtensionsCheck, _streamingExtensionsValidate
_streamingExtensionsCheck = getattr(options, 'check_streaming', False)
_streamingExtensionsValidate = options.validate
if options.validate:
options.validate = False # prevent cmdLine calling validation
'''
Do not use _( ) in pluginInfo itself (it is applied later, after loading
'''
__pluginInfo__ = {
'name': 'Streaming Extensions Loader',
'version': '0.9',
'description': "This plug-in loads big XBRL instances without building a DOM in memory. "
"lxml iterparse parses XBRL directly into an object model without a DOM. ",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2014 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'CntlrCmdLine.Options': streamingOptionsExtender,
'CntlrCmdLine.Utility.Run': streamingExtensionsSetup,
'ModelDocument.PullLoader': streamingExtensionsLoader,
}
| 49.367164 | 164 | 0.604245 |
import io, sys, os, time
from decimal import Decimal, InvalidOperation
from lxml import etree
from collections import defaultdict
from arelle import XbrlConst, XmlUtil, XmlValidate, ValidateXbrlDimensions
from arelle.ModelDocument import ModelDocument, Type
from arelle.ModelObject import ModelObject
from arelle.ModelObjectFactory import parser
from arelle.ModelValue import QName
from arelle.ModelInstanceObject import ModelContext, ModelFact, ModelUnit
from arelle.Validate import Validate
_streamingExtensionsValidate = False
_streamingExtensionsCheck = False
def precedingProcessingInstruction(elt, target):
pi = elt.getprevious()
while pi is not None:
if isinstance(pi, etree._ProcessingInstruction) and pi.target == target:
return pi
pi = pi.getprevious()
return None
def streamingExtensionsLoader(modelXbrl, mappedUri, filepath):
def logSyntaxErrors(parsercontext):
for error in parsercontext.error_log:
modelXbrl.error("xmlSchema:syntax",
_("%(error)s, %(fileName)s, line %(line)s, column %(column)s, %(sourceAction)s source element"),
modelObject=modelDocument, fileName=os.path.basename(filepath),
error=error.message, line=error.line, column=error.column, sourceAction="streaming")
tp://www.xbrl.org/2003/instance}xbrl":
if not foundInstance:
foundInstance = True
pi = precedingProcessingInstruction(elt, "xbrl-streamable-instance")
if pi is None:
break
else:
streamingAspects = dict(pi.attrib.copy())
if not elt.tag.startswith("{http://www.xbrl.org/"):
numRootFacts1 += 1
if numRootFacts1 % 1000 == 0:
modelXbrl.profileActivity("... streaming tree check", minTimeToShow=20.0)
elif not foundInstance:
break
elif elt.tag == "{http://www.xbrl.org/2003/instance}xbrl" and precedingProcessingInstruction(elt, "xbrl-streamable-instance") is not None:
modelXbrl.error("streamingExtensions:headerMisplaced",
_("Header is misplaced: %(error)s, must follow xbrli:xbrl element"),
modelObject=elt)
elif event == "end":
elt.clear()
numElts += 1
if numElts % 1000 == 0 and elt.getparent() is not None:
while elt.getprevious() is not None and elt.getparent() is not None:
del elt.getparent()[0]
if elt is not None:
elt.clear()
_file.seek(0,io.SEEK_SET)
if not foundInstance or streamingAspects is None:
del elt, parsercontext
_file.close()
return None
modelXbrl.profileStat(_("streaming tree check"), time.time() - startedAt)
startedAt = time.time()
try:
version = Decimal(streamingAspects.get("version"))
if int(version) != 1:
modelXbrl.error("streamingExtensions:unsupportedVersion",
_("Streaming version %(version)s, major version number must be 1"),
modelObject=elt, version=version)
foundErrors = True
except (InvalidOperation, OverflowError):
modelXbrl.error("streamingExtensions:versionError",
_("Version %(version)s, number must be 1.n"),
modelObject=elt, version=streamingAspects.get("version", "(none)"))
foundErrors = True
for bufAspect in ("contextBuffer", "unitBuffer", "footnoteBuffer"):
try:
bufLimit = Decimal(streamingAspects.get(bufAspect, "INF"))
if bufLimit < 1 or (bufLimit.is_finite() and bufLimit % 1 != 0):
raise InvalidOperation
elif bufAspect == "contextBuffer":
contextBufferLimit = bufLimit
elif bufAspect == "unitBuffer":
unitBufferLimit = bufLimit
elif bufAspect == "footnoteBuffer":
footnoteBufferLimit = bufLimit
except InvalidOperation:
modelXbrl.error("streamingExtensions:valueError",
_("Streaming %(attrib)s %(value)s, number must be a positive integer or INF"),
modelObject=elt, attrib=bufAspect, value=streamingAspects.get(bufAspect))
foundErrors = True
if parsercontext.error_log:
foundErrors = True
logSyntaxErrors(parsercontext)
if foundErrors:
_file.close()
return None
parsercontext = etree.iterparse(_file, events=("start","end"), huge_tree=True)
_parser, _parserLookupName, _parserLookupClass = parser(modelXbrl,filepath)
eltMdlObjs = {}
beforeInstanceStream = True
validator = None
contextBuffer = []
unitBuffer = []
footnoteBuffer = []
factBuffer = []
numFacts = numRootFacts2 = 1
for event, elt in parsercontext:
if event == "start":
mdlObj = _parser.makeelement(elt.tag, attrib=elt.attrib, nsmap=elt.nsmap)
mdlObj.sourceline = elt.sourceline
eltMdlObjs[elt] = mdlObj
if elt.getparent() is None:
modelDocument = ModelDocument(modelXbrl, Type.INSTANCE, mappedUri, filepath, etree.ElementTree(mdlObj))
modelDocument.xmlRootElement = mdlObj
modelXbrl.modelDocument = modelDocument
mdlObj.init(modelDocument)
modelXbrl.info("streamingExtensions:streaming",
_("Stream processing this instance."),
modelObject = modelDocument)
else:
eltMdlObjs[elt.getparent()].append(mdlObj)
mdlObj._init()
ns = mdlObj.namespaceURI
ln = mdlObj.localName
if (beforeInstanceStream and (
(ns == XbrlConst.link and ln not in ("schemaRef", "linkbaseRef")) or
(ns == XbrlConst.xbrli and ln in ("context", "unit")) or
(ns not in (XbrlConst.link, XbrlConst.xbrli)))):
beforeInstanceStream = False
if _streamingExtensionsValidate:
validator = Validate(modelXbrl)
validator.instValidator.validate(modelXbrl, modelXbrl.modelManager.formulaOptions.typedParameters())
else:
ValidateXbrlDimensions.loadDimensionDefaults(modelXbrl)
mdlObj = None
elif event == "end":
mdlObj = eltMdlObjs.pop(elt)
if elt.text:
mdlObj.text = elt.text
ns = mdlObj.namespaceURI
ln = mdlObj.localName
parentMdlObj = mdlObj.getparent()
if ns == XbrlConst.xbrli:
if ln == "context":
if mdlObj.get("sticky"):
del mdlObj.attrib["sticky"]
modelDocument.contextDiscover(mdlObj)
else:
if _streamingExtensionsValidate and len(contextBuffer) >= contextBufferLimit:
cntx = contextBuffer.pop(0)
dropContext(modelXbrl, cntx)
del parentMdlObj[parentMdlObj.index(cntx)]
cntx = None
modelDocument.contextDiscover(mdlObj)
if contextBufferLimit.is_finite():
contextBuffer.append(mdlObj)
if _streamingExtensionsValidate:
contextsToCheck = (mdlObj,)
validator.instValidator.checkContexts(contextsToCheck)
if modelXbrl.hasXDT:
validator.instValidator.checkContextsDimensions(contextsToCheck)
del contextsToCheck
elif ln == "unit":
if _streamingExtensionsValidate and len(unitBuffer) >= unitBufferLimit:
unit = unitBuffer.pop(0)
dropUnit(modelXbrl, unit)
del parentMdlObj[parentMdlObj.index(unit)]
unit = None
modelDocument.unitDiscover(mdlObj)
if unitBufferLimit.is_finite():
unitBuffer.append(mdlObj)
if _streamingExtensionsValidate:
validator.instValidator.checkUnits( (mdlObj,) )
elif ln == "xbrl":
for footnoteLink in footnoteBuffer:
checkFootnoteHrefs(modelXbrl, footnoteLink)
elt.clear()
elif ns == XbrlConst.link:
if ln in ("schemaRef", "linkbaseRef"):
modelDocument.discoverHref(mdlObj)
elif ln in ("roleRef", "arcroleRef"):
modelDocument.linkbaseDiscover((mdlObj,), inInstance=True)
elif ln == "footnoteLink":
footnoteLinks = (mdlObj,)
modelDocument.linkbaseDiscover(footnoteLinks, inInstance=True)
if footnoteBufferLimit.is_finite():
footnoteBuffer.append(mdlObj)
if _streamingExtensionsValidate:
validator.instValidator.checkLinks(footnoteLinks)
if len(footnoteBuffer) > footnoteBufferLimit:
footnoteLink = footnoteBuffer.pop(0)
checkFootnoteHrefs(modelXbrl, footnoteLink)
dropFootnoteLink(modelXbrl, footnoteLink)
del parentMdlObj[parentMdlObj.index(footnoteLink)]
footnoteLink = None
footnoteLinks = None
elt.clear()
elif parentMdlObj.qname == XbrlConst.qnXbrliXbrl:
numRootFacts2 += 1
modelDocument.factDiscover(mdlObj, modelXbrl.facts)
XmlValidate.validate(modelXbrl, mdlObj)
if _streamingExtensionsValidate:
factsToCheck = (mdlObj,)
validator.instValidator.checkFacts(factsToCheck)
if modelXbrl.hasXDT:
validator.instValidator.checkFactsDimensions(factsToCheck)
del factsToCheck
dropFact(modelXbrl, mdlObj, modelXbrl.facts)
del parentMdlObj[parentMdlObj.index(mdlObj)]
if numRootFacts2 % 1000 == 0:
modelXbrl.profileActivity("... streaming fact {0} of {1} {2:.2f}%".format(numRootFacts2, numRootFacts1, 100.0 * numRootFacts2 / numRootFacts1),
minTimeToShow=20.0)
elt.clear()
while elt.getprevious() is not None: # cleans up any prior siblings
del elt.getparent()[0]
mdlObj = None # deref
logSyntaxErrors(parsercontext)
del parsercontext
if validator is not None:
validator.close()
_file.close()
modelXbrl.profileStat(_("streaming complete"), time.time() - startedAt)
return modelDocument
def checkFootnoteHrefs(modelXbrl, footnoteLink):
for locElt in footnoteLink.iterchildren(tag="{http://www.xbrl.org/2003/linkbase}loc"):
for hrefElt, doc, id in footnoteLink.modelDocument.hrefObjects:
if locElt == hrefElt and id not in footnoteLink.modelDocument.idObjects:
modelXbrl.error("streamingExtensions:footnoteId",
_("Footnote id %(id)s not matched to fact in buffered region"),
modelObject=footnoteLink, id=id)
def dropContext(modelXbrl, cntx):
del modelXbrl.contexts[cntx.id]
dropObject(modelXbrl, cntx)
def dropUnit(modelXbrl, unit):
del modelXbrl.units[unit.id]
dropObject(modelXbrl, unit)
def dropFootnoteLink(modelXbrl, footnoteLink):
for baseSet in modelXbrl.baseSets.values():
if footnoteLink in baseSet:
baseSet.remove(footnoteLink)
dropObject(modelXbrl, footnoteLink)
def dropFact(modelXbrl, fact, facts):
while fact.modelTupleFacts:
dropFact(modelXbrl, fact.modelTupleFacts[0], fact.modelTupleFacts)
modelXbrl.factsInInstance.discard(fact)
facts.remove(fact)
modelXbrl.modelObjects[fact.objectIndex] = None # objects found by index, can't remove position from list
fact.modelDocument.modelObjects.remove(fact)
fact.clear()
def dropObject(modelXbrl, mdlObj):
for childObj in mdlObj.iterchildren():
dropObject(modelXbrl, childObj)
if mdlObj.qname == XbrlConst.qnLinkLoc:
hrefs = mdlObj.modelDocument.hrefObjects
removedHrefs = [i for i, hrefObj in enumerate(hrefs) if mdlObj == hrefObj[0]]
for i in removedHrefs:
del hrefs[i]
modelXbrl.modelObjects[mdlObj.objectIndex] = None
mdlObj.modelDocument.modelObjects.remove(mdlObj)
mdlObj.modelDocument.idObjects.pop(mdlObj.id, None)
mdlObj.clear()
def streamingOptionsExtender(parser):
parser.add_option("--check-streaming",
action="store_true",
dest="check_streaming",
help=_('Check streamability of instance document."'))
def streamingExtensionsSetup(self, options, **kwargs):
global _streamingExtensionsCheck, _streamingExtensionsValidate
_streamingExtensionsCheck = getattr(options, 'check_streaming', False)
_streamingExtensionsValidate = options.validate
if options.validate:
options.validate = False # prevent cmdLine calling validation
__pluginInfo__ = {
'name': 'Streaming Extensions Loader',
'version': '0.9',
'description': "This plug-in loads big XBRL instances without building a DOM in memory. "
"lxml iterparse parses XBRL directly into an object model without a DOM. ",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2014 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'CntlrCmdLine.Options': streamingOptionsExtender,
'CntlrCmdLine.Utility.Run': streamingExtensionsSetup,
'ModelDocument.PullLoader': streamingExtensionsLoader,
}
| true | true |
f7fb1a059a90ee005d1e25c9193e4fb5d5b8dd5d | 34 | py | Python | kloppy/opta.py | benoitblanc/kloppy | 5c3f94ff8806f9e23f8bad095a948a403a06a54c | [
"BSD-3-Clause"
] | null | null | null | kloppy/opta.py | benoitblanc/kloppy | 5c3f94ff8806f9e23f8bad095a948a403a06a54c | [
"BSD-3-Clause"
] | null | null | null | kloppy/opta.py | benoitblanc/kloppy | 5c3f94ff8806f9e23f8bad095a948a403a06a54c | [
"BSD-3-Clause"
] | null | null | null | from ._providers.opta import load
| 17 | 33 | 0.823529 | from ._providers.opta import load
| true | true |
f7fb1a4a23e6f2290e1aacd696427c417d49f4e9 | 3,647 | py | Python | google/ads/googleads/v4/services/services/location_view_service/transports/base.py | batardo/google-ads-python | a39748521847e85138fca593f3be2681352ad024 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v4/services/services/location_view_service/transports/base.py | batardo/google-ads-python | a39748521847e85138fca593f3be2681352ad024 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v4/services/services/location_view_service/transports/base.py | batardo/google-ads-python | a39748521847e85138fca593f3be2681352ad024 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.ads.googleads.v4.resources.types import location_view
from google.ads.googleads.v4.services.types import location_view_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-ads-googleads",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class LocationViewServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for LocationViewService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_location_view: gapic_v1.method.wrap_method(
self.get_location_view,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_location_view(
self,
) -> typing.Callable[
[location_view_service.GetLocationViewRequest],
location_view.LocationView,
]:
raise NotImplementedError
__all__ = ("LocationViewServiceTransport",)
| 34.733333 | 78 | 0.674253 |
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials
from google.ads.googleads.v4.resources.types import location_view
from google.ads.googleads.v4.services.types import location_view_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-ads-googleads",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class LocationViewServiceTransport(metaclass=abc.ABCMeta):
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
if ":" not in host:
host += ":443"
self._host = host
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
self._credentials = credentials
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
self._wrapped_methods = {
self.get_location_view: gapic_v1.method.wrap_method(
self.get_location_view,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_location_view(
self,
) -> typing.Callable[
[location_view_service.GetLocationViewRequest],
location_view.LocationView,
]:
raise NotImplementedError
__all__ = ("LocationViewServiceTransport",)
| true | true |
f7fb1a50b25af0dadbac67140aa797a64ec03336 | 4,567 | py | Python | jms_oidc_rp/middleware.py | BaiJiangJie/jumpserver-django-oidc-rp | b2d6a63dd82214263b8971412a9043268c31ba7f | [
"MIT"
] | 1 | 2021-05-03T16:20:41.000Z | 2021-05-03T16:20:41.000Z | jms_oidc_rp/middleware.py | BaiJiangJie/jumpserver-django-oidc-rp | b2d6a63dd82214263b8971412a9043268c31ba7f | [
"MIT"
] | null | null | null | jms_oidc_rp/middleware.py | BaiJiangJie/jumpserver-django-oidc-rp | b2d6a63dd82214263b8971412a9043268c31ba7f | [
"MIT"
] | 2 | 2020-08-14T05:39:56.000Z | 2020-09-14T22:26:21.000Z | """
OpenID Connect relying party (RP) middlewares
=============================================
This modules defines middlewares allowing to better handle users authenticated using an OpenID
Connect provider (OP). One of the main middlewares is responsible for periodically refreshing
ID tokens and access tokens.
"""
import time
import requests
import requests.exceptions
from django.contrib import auth
from .conf.settings import dynamic_setting as oidc_rp_settings
from .utils import validate_and_return_id_token, get_logger
from .decorator import ssl_verification
logger = get_logger(__file__)
class OIDCRefreshIDTokenMiddleware:
""" Allows to periodically refresh the ID token associated with the authenticated user. """
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# Refreshes tokens only in the applicable cases.
if request.method == 'GET' and not request.is_ajax() and request.user.is_authenticated and oidc_rp_settings.AUTH_OPENID:
self.refresh_token(request)
response = self.get_response(request)
return response
@ssl_verification
def refresh_token(self, request):
""" Refreshes the token of the current user. """
log_prompt = "Process refresh Token: {}"
# logger.debug(log_prompt.format('Start'))
# NOTE: SHARE_SESSION is False means that the user does not share sessions
# with other applications
if not oidc_rp_settings.SHARE_SESSION:
logger.debug(log_prompt.format('Not share session'))
return
# NOTE: no refresh token in the session means that the user wasn't authentified using the
# OpenID Connect provider (OP).
refresh_token = request.session.get('oidc_auth_refresh_token')
if refresh_token is None:
logger.debug(log_prompt.format('Refresh token is missing'))
return
id_token_exp_timestamp = request.session.get('oidc_auth_id_token_exp_timestamp', None)
now_timestamp = time.time()
# Returns immediately if the token is still valid.
if id_token_exp_timestamp is not None and id_token_exp_timestamp > now_timestamp:
# logger.debug(log_prompt.format('Returns immediately because token is still valid'))
return
# Prepares the token payload that will be used to request a new token from the token
# endpoint.
refresh_token = request.session.pop('oidc_auth_refresh_token')
token_payload = {
'client_id': oidc_rp_settings.CLIENT_ID,
'client_secret': oidc_rp_settings.CLIENT_SECRET,
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
'scope': oidc_rp_settings.SCOPES,
}
# Calls the token endpoint.
logger.debug(log_prompt.format('Calls the token endpoint'))
token_response = requests.post(oidc_rp_settings.PROVIDER_TOKEN_ENDPOINT, data=token_payload)
try:
token_response.raise_for_status()
except requests.exceptions.HTTPError as e:
logger.debug(log_prompt.format('Request exception http error: {}'.format(str(e))))
logger.debug(log_prompt.format('Logout'))
auth.logout(request)
return
token_response_data = token_response.json()
# Validates the token.
logger.debug(log_prompt.format('Validate ID Token'))
raw_id_token = token_response_data.get('id_token')
id_token = validate_and_return_id_token(raw_id_token, validate_nonce=False)
# If the token cannot be validated we have to log out the current user.
if id_token is None:
logger.debug(log_prompt.format('ID Token is None'))
auth.logout(request)
logger.debug(log_prompt.format('Logout'))
return
# Retrieves the access token and refresh token.
access_token = token_response_data.get('access_token')
refresh_token = token_response_data.get('refresh_token')
# Stores the ID token, the related access token and the refresh token in the session.
request.session['oidc_auth_id_token'] = raw_id_token
request.session['oidc_auth_access_token'] = access_token
request.session['oidc_auth_refresh_token'] = refresh_token
# Saves the new expiration timestamp.
request.session['oidc_auth_id_token_exp_timestamp'] = \
time.time() + oidc_rp_settings.ID_TOKEN_MAX_AGE
| 40.776786 | 128 | 0.681848 |
import time
import requests
import requests.exceptions
from django.contrib import auth
from .conf.settings import dynamic_setting as oidc_rp_settings
from .utils import validate_and_return_id_token, get_logger
from .decorator import ssl_verification
logger = get_logger(__file__)
class OIDCRefreshIDTokenMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if request.method == 'GET' and not request.is_ajax() and request.user.is_authenticated and oidc_rp_settings.AUTH_OPENID:
self.refresh_token(request)
response = self.get_response(request)
return response
@ssl_verification
def refresh_token(self, request):
log_prompt = "Process refresh Token: {}"
if not oidc_rp_settings.SHARE_SESSION:
logger.debug(log_prompt.format('Not share session'))
return
# OpenID Connect provider (OP).
refresh_token = request.session.get('oidc_auth_refresh_token')
if refresh_token is None:
logger.debug(log_prompt.format('Refresh token is missing'))
return
id_token_exp_timestamp = request.session.get('oidc_auth_id_token_exp_timestamp', None)
now_timestamp = time.time()
# Returns immediately if the token is still valid.
if id_token_exp_timestamp is not None and id_token_exp_timestamp > now_timestamp:
# logger.debug(log_prompt.format('Returns immediately because token is still valid'))
return
# Prepares the token payload that will be used to request a new token from the token
# endpoint.
refresh_token = request.session.pop('oidc_auth_refresh_token')
token_payload = {
'client_id': oidc_rp_settings.CLIENT_ID,
'client_secret': oidc_rp_settings.CLIENT_SECRET,
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
'scope': oidc_rp_settings.SCOPES,
}
# Calls the token endpoint.
logger.debug(log_prompt.format('Calls the token endpoint'))
token_response = requests.post(oidc_rp_settings.PROVIDER_TOKEN_ENDPOINT, data=token_payload)
try:
token_response.raise_for_status()
except requests.exceptions.HTTPError as e:
logger.debug(log_prompt.format('Request exception http error: {}'.format(str(e))))
logger.debug(log_prompt.format('Logout'))
auth.logout(request)
return
token_response_data = token_response.json()
# Validates the token.
logger.debug(log_prompt.format('Validate ID Token'))
raw_id_token = token_response_data.get('id_token')
id_token = validate_and_return_id_token(raw_id_token, validate_nonce=False)
# If the token cannot be validated we have to log out the current user.
if id_token is None:
logger.debug(log_prompt.format('ID Token is None'))
auth.logout(request)
logger.debug(log_prompt.format('Logout'))
return
# Retrieves the access token and refresh token.
access_token = token_response_data.get('access_token')
refresh_token = token_response_data.get('refresh_token')
# Stores the ID token, the related access token and the refresh token in the session.
request.session['oidc_auth_id_token'] = raw_id_token
request.session['oidc_auth_access_token'] = access_token
request.session['oidc_auth_refresh_token'] = refresh_token
# Saves the new expiration timestamp.
request.session['oidc_auth_id_token_exp_timestamp'] = \
time.time() + oidc_rp_settings.ID_TOKEN_MAX_AGE
| true | true |
f7fb1ba6528e00d558dedf9d04faac54f413e044 | 4,612 | py | Python | manubot/cite/zotero.py | benstear/manubot | df184a5c7e5eb98894a3edb43d9772d1ac3e01ab | [
"BSD-3-Clause"
] | null | null | null | manubot/cite/zotero.py | benstear/manubot | df184a5c7e5eb98894a3edb43d9772d1ac3e01ab | [
"BSD-3-Clause"
] | null | null | null | manubot/cite/zotero.py | benstear/manubot | df184a5c7e5eb98894a3edb43d9772d1ac3e01ab | [
"BSD-3-Clause"
] | null | null | null | """
Methods to interact with a Zotero translation-server.
https://github.com/zotero/translation-server
The Manubot team currently hosts a public translation server at
https://translate.manubot.org. More information on this instance at
https://github.com/manubot/manubot/issues/82.
"""
import json
import logging
import requests
from manubot.util import get_manubot_user_agent, is_http_url
base_url = "https://translate.manubot.org"
def web_query(url):
"""
Return Zotero citation metadata for a URL as a list containing a single element that
is a dictionary with the URL's metadata.
"""
headers = {"User-Agent": get_manubot_user_agent(), "Content-Type": "text/plain"}
params = {"single": 1}
api_url = f"{base_url}/web"
response = requests.post(api_url, params=params, headers=headers, data=str(url))
try:
zotero_data = response.json()
except Exception as error:
logging.warning(
f"Error parsing web_query output as JSON for {url}:\n{response.text}"
)
raise error
if response.status_code == 300:
# When single=1 is specified, multiple results should never be returned
logging.warning(
f"web_query returned multiple results for {url}:\n"
+ json.dumps(zotero_data, indent=2)
)
raise ValueError(f"multiple results for {url}")
zotero_data = _passthrough_zotero_data(zotero_data)
return zotero_data
def search_query(identifier):
"""
Retrive Zotero metadata for a DOI, ISBN, PMID, or arXiv ID.
Example usage:
```shell
curl --silent
--data '10.2307/4486062' \
--header 'Content-Type: text/plain' \
http://127.0.0.1:1969/search
```
"""
api_url = f"{base_url}/search"
headers = {"User-Agent": get_manubot_user_agent(), "Content-Type": "text/plain"}
response = requests.post(api_url, headers=headers, data=str(identifier))
try:
zotero_data = response.json()
except Exception as error:
logging.warning(
f"Error parsing search_query output as JSON for {identifier}:\n{response.text}"
)
raise error
zotero_data = _passthrough_zotero_data(zotero_data)
return zotero_data
def _passthrough_zotero_data(zotero_data):
"""
Address known issues with Zotero metadata.
Assumes zotero data should contain a single bibliographic record.
"""
if not isinstance(zotero_data, list):
raise ValueError("_passthrough_zotero_data: zotero_data should be a list")
if len(zotero_data) > 1:
# Sometimes translation-server creates multiple data items for a single record.
# If so, keep only the parent item, and remove child items (such as notes).
# https://github.com/zotero/translation-server/issues/67
zotero_data = zotero_data[:1]
return zotero_data
def export_as_csl(zotero_data):
"""
Export Zotero JSON data to CSL JSON using a translation-server /export query.
Performs a similar query to the following curl command:
```
curl --verbose \
--data @items.json \
--header 'Content-Type: application/json' \
'https://translate.manubot.org/export?format=csljson'
```
"""
api_url = f"{base_url}/export"
params = {"format": "csljson"}
headers = {"User-Agent": get_manubot_user_agent()}
response = requests.post(api_url, params=params, headers=headers, json=zotero_data)
if not response.ok:
message = f"export_as_csl: translation-server returned status code {response.status_code}"
logging.warning(f"{message} with the following output:\n{response.text}")
raise requests.HTTPError(message)
try:
csl_json = response.json()
except Exception as error:
logging.warning(f"Error parsing export_as_csl output as JSON:\n{response.text}")
raise error
return csl_json
def get_csl_item(identifier: str):
"""
Use a translation-server search query followed by an export query
to return a CSL Item (the first & only record of the returned CSL JSON).
"""
zotero_data = search_query(identifier)
csl_data = export_as_csl(zotero_data)
(csl_item,) = csl_data
return csl_item
def search_or_web_query(identifier: str) -> list:
"""
Detect whether `identifier` is a URL. If so,
retrieve zotero metadata using a /web query.
Otherwise, retrieve zotero metadata using a /search query.
"""
if is_http_url(identifier):
zotero_data = web_query(identifier)
else:
zotero_data = search_query(identifier)
return zotero_data
| 33.664234 | 98 | 0.680182 |
import json
import logging
import requests
from manubot.util import get_manubot_user_agent, is_http_url
base_url = "https://translate.manubot.org"
def web_query(url):
headers = {"User-Agent": get_manubot_user_agent(), "Content-Type": "text/plain"}
params = {"single": 1}
api_url = f"{base_url}/web"
response = requests.post(api_url, params=params, headers=headers, data=str(url))
try:
zotero_data = response.json()
except Exception as error:
logging.warning(
f"Error parsing web_query output as JSON for {url}:\n{response.text}"
)
raise error
if response.status_code == 300:
logging.warning(
f"web_query returned multiple results for {url}:\n"
+ json.dumps(zotero_data, indent=2)
)
raise ValueError(f"multiple results for {url}")
zotero_data = _passthrough_zotero_data(zotero_data)
return zotero_data
def search_query(identifier):
api_url = f"{base_url}/search"
headers = {"User-Agent": get_manubot_user_agent(), "Content-Type": "text/plain"}
response = requests.post(api_url, headers=headers, data=str(identifier))
try:
zotero_data = response.json()
except Exception as error:
logging.warning(
f"Error parsing search_query output as JSON for {identifier}:\n{response.text}"
)
raise error
zotero_data = _passthrough_zotero_data(zotero_data)
return zotero_data
def _passthrough_zotero_data(zotero_data):
if not isinstance(zotero_data, list):
raise ValueError("_passthrough_zotero_data: zotero_data should be a list")
if len(zotero_data) > 1:
zotero_data = zotero_data[:1]
return zotero_data
def export_as_csl(zotero_data):
api_url = f"{base_url}/export"
params = {"format": "csljson"}
headers = {"User-Agent": get_manubot_user_agent()}
response = requests.post(api_url, params=params, headers=headers, json=zotero_data)
if not response.ok:
message = f"export_as_csl: translation-server returned status code {response.status_code}"
logging.warning(f"{message} with the following output:\n{response.text}")
raise requests.HTTPError(message)
try:
csl_json = response.json()
except Exception as error:
logging.warning(f"Error parsing export_as_csl output as JSON:\n{response.text}")
raise error
return csl_json
def get_csl_item(identifier: str):
zotero_data = search_query(identifier)
csl_data = export_as_csl(zotero_data)
(csl_item,) = csl_data
return csl_item
def search_or_web_query(identifier: str) -> list:
if is_http_url(identifier):
zotero_data = web_query(identifier)
else:
zotero_data = search_query(identifier)
return zotero_data
| true | true |
f7fb1bcc85e55a1c0f7cd1c2ad78746f6eea903b | 1,409 | py | Python | scripts/pop2.py | yarden-livnat/episim | fc6ad77bb62d3632fc101642c6c68a36813bc08a | [
"MIT"
] | null | null | null | scripts/pop2.py | yarden-livnat/episim | fc6ad77bb62d3632fc101642c6c68a36813bc08a | [
"MIT"
] | null | null | null | scripts/pop2.py | yarden-livnat/episim | fc6ad77bb62d3632fc101642c6c68a36813bc08a | [
"MIT"
] | null | null | null | import csv
import time
import os.path
from sys import argv
from collections import defaultdict
PERSON_FILE = 'persons_ref.txt'
ZIP_COUNTY_FILE = 'zip_county.csv'
OUT_COUNTY_FILE = 'county_pop.csv'
zip2county = defaultdict(str)
pop = defaultdict(int)
missing = defaultdict(int)
# Init
if len(argv) != 2:
print 'Usage: ', argv[0], '<path to dataset directory>'
exit(0)
dir_name = argv[1]
if dir_name[-1] != '/':
dir_name += '/'
# process
with open(ZIP_COUNTY_FILE, 'rb') as zipfile:
f = csv.reader(zipfile)
f.next()
for row in f:
zip2county[row[0]] = int(row[1])
print 'read people'
t0 = time.clock()
n = 0
with open(os.path.join(dir_name, PERSON_FILE), 'rb') as csvfile:
f = csv.reader(csvfile, delimiter=' ')
f.next()
for person in f:
n += 1
if n % 1000000 == 0:
print n
p_zip = person[2]
c = zip2county[p_zip]
if c != '':
pop[c] += 1
else:
missing[p_zip] += 1
t1 = time.clock()
print '\t',n,'records in',(t1-t0),' secs'
if len(missing) > 0:
print '\t unknown zip codes:', missing.items()
print 'write', OUT_COUNTY_FILE
t0 = time.clock()
with open(os.path.join(dir_name, OUT_COUNTY_FILE), 'wb') as cfile:
o = csv.writer(cfile)
o.writerow(['county', 'pop'])
o.writerows(pop.items())
t1 = time.clock()
print '\t',len(pop),'records in',(t1-t0),' secs'
| 20.42029 | 66 | 0.605394 | import csv
import time
import os.path
from sys import argv
from collections import defaultdict
PERSON_FILE = 'persons_ref.txt'
ZIP_COUNTY_FILE = 'zip_county.csv'
OUT_COUNTY_FILE = 'county_pop.csv'
zip2county = defaultdict(str)
pop = defaultdict(int)
missing = defaultdict(int)
if len(argv) != 2:
print 'Usage: ', argv[0], '<path to dataset directory>'
exit(0)
dir_name = argv[1]
if dir_name[-1] != '/':
dir_name += '/'
with open(ZIP_COUNTY_FILE, 'rb') as zipfile:
f = csv.reader(zipfile)
f.next()
for row in f:
zip2county[row[0]] = int(row[1])
print 'read people'
t0 = time.clock()
n = 0
with open(os.path.join(dir_name, PERSON_FILE), 'rb') as csvfile:
f = csv.reader(csvfile, delimiter=' ')
f.next()
for person in f:
n += 1
if n % 1000000 == 0:
print n
p_zip = person[2]
c = zip2county[p_zip]
if c != '':
pop[c] += 1
else:
missing[p_zip] += 1
t1 = time.clock()
print '\t',n,'records in',(t1-t0),' secs'
if len(missing) > 0:
print '\t unknown zip codes:', missing.items()
print 'write', OUT_COUNTY_FILE
t0 = time.clock()
with open(os.path.join(dir_name, OUT_COUNTY_FILE), 'wb') as cfile:
o = csv.writer(cfile)
o.writerow(['county', 'pop'])
o.writerows(pop.items())
t1 = time.clock()
print '\t',len(pop),'records in',(t1-t0),' secs'
| false | true |
f7fb1cf6551ea08d02fa45fc877faafa31c94109 | 3,243 | py | Python | ooobuild/dyn/sheet/cell_flags.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/sheet/cell_flags.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/sheet/cell_flags.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.sheet
from enum import IntFlag
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.sheet import CellFlags as CellFlags
if hasattr(CellFlags, '_constants') and isinstance(CellFlags._constants, dict):
CellFlags._constants['__ooo_ns__'] = 'com.sun.star.sheet'
CellFlags._constants['__ooo_full_ns__'] = 'com.sun.star.sheet.CellFlags'
CellFlags._constants['__ooo_type_name__'] = 'const'
def build_enum():
global CellFlagsEnum
ls = [f for f in dir(CellFlags) if not callable(getattr(CellFlags, f)) and not f.startswith('__')]
_dict = {}
for name in ls:
_dict[name] = getattr(CellFlags, name)
CellFlagsEnum = IntFlag('CellFlagsEnum', _dict)
build_enum()
else:
from ...lo.sheet.cell_flags import CellFlags as CellFlags
class CellFlagsEnum(IntFlag):
"""
Enum of Const Class CellFlags
These constants select different types of cell contents.
The values can be combined. They are used to insert, copy, or delete contents.
"""
VALUE = CellFlags.VALUE
"""
selects constant numeric values that are not formatted as dates or times.
"""
DATETIME = CellFlags.DATETIME
"""
selects constant numeric values that have a date or time number format.
"""
STRING = CellFlags.STRING
"""
selects constant strings.
"""
ANNOTATION = CellFlags.ANNOTATION
"""
selects cell annotations.
"""
FORMULA = CellFlags.FORMULA
"""
selects formulas.
"""
HARDATTR = CellFlags.HARDATTR
"""
selects all explicit formatting, but not the formatting which is applied implicitly through style sheets.
"""
STYLES = CellFlags.STYLES
"""
selects cell styles.
"""
OBJECTS = CellFlags.OBJECTS
"""
selects drawing objects.
"""
EDITATTR = CellFlags.EDITATTR
"""
selects formatting within parts of the cell contents.
"""
FORMATTED = CellFlags.FORMATTED
"""
selects cells with formatting within the cells or cells with more than one paragraph within the cells.
"""
__all__ = ['CellFlags', 'CellFlagsEnum']
| 34.136842 | 113 | 0.653716 |
from enum import IntFlag
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.sheet import CellFlags as CellFlags
if hasattr(CellFlags, '_constants') and isinstance(CellFlags._constants, dict):
CellFlags._constants['__ooo_ns__'] = 'com.sun.star.sheet'
CellFlags._constants['__ooo_full_ns__'] = 'com.sun.star.sheet.CellFlags'
CellFlags._constants['__ooo_type_name__'] = 'const'
def build_enum():
global CellFlagsEnum
ls = [f for f in dir(CellFlags) if not callable(getattr(CellFlags, f)) and not f.startswith('__')]
_dict = {}
for name in ls:
_dict[name] = getattr(CellFlags, name)
CellFlagsEnum = IntFlag('CellFlagsEnum', _dict)
build_enum()
else:
from ...lo.sheet.cell_flags import CellFlags as CellFlags
class CellFlagsEnum(IntFlag):
"""
Enum of Const Class CellFlags
These constants select different types of cell contents.
The values can be combined. They are used to insert, copy, or delete contents.
"""
VALUE = CellFlags.VALUE
"""
selects constant numeric values that are not formatted as dates or times.
"""
DATETIME = CellFlags.DATETIME
"""
selects constant numeric values that have a date or time number format.
"""
STRING = CellFlags.STRING
"""
selects constant strings.
"""
ANNOTATION = CellFlags.ANNOTATION
"""
selects cell annotations.
"""
FORMULA = CellFlags.FORMULA
"""
selects formulas.
"""
HARDATTR = CellFlags.HARDATTR
"""
selects all explicit formatting, but not the formatting which is applied implicitly through style sheets.
"""
STYLES = CellFlags.STYLES
"""
selects cell styles.
"""
OBJECTS = CellFlags.OBJECTS
"""
selects drawing objects.
"""
EDITATTR = CellFlags.EDITATTR
"""
selects formatting within parts of the cell contents.
"""
FORMATTED = CellFlags.FORMATTED
"""
selects cells with formatting within the cells or cells with more than one paragraph within the cells.
"""
__all__ = ['CellFlags', 'CellFlagsEnum']
| true | true |
f7fb1d14e496b53b017c54f11e6c83eae533dbe7 | 1,070 | py | Python | examples/cities/cities/commands/crawl.py | jiansongyang/scrapy-auto-translation-middelware | 8107338275f2b1fbebc1e72a5512d4e44435cb57 | [
"MIT"
] | 4 | 2020-07-03T11:46:01.000Z | 2021-11-15T08:07:45.000Z | examples/cities/cities/commands/crawl.py | jiansongyang/scrapy-auto-translation-middelware | 8107338275f2b1fbebc1e72a5512d4e44435cb57 | [
"MIT"
] | null | null | null | examples/cities/cities/commands/crawl.py | jiansongyang/scrapy-auto-translation-middelware | 8107338275f2b1fbebc1e72a5512d4e44435cb57 | [
"MIT"
] | 1 | 2021-11-15T08:32:23.000Z | 2021-11-15T08:32:23.000Z | from scrapy.exceptions import UsageError
from scrapy.commands.crawl import Command as CrawlCommand
import scrapy
import re
usage_info = """
Congratulations!
You have correctly installed scrapy-auto-translation-middelware. The example project will show you how the
middleware will work.
However, you have to use your own Google Cloud API key for testing purpose. This can be specified by setting GOOGLE_CLOUD_API_KEY
variable in the command line options, like this:
scrapy crawl {spider} -s GOOGLE_CLOUD_API_KEY=<your-google-cloud-key>
Alternatively, you can specify the key in your settings.py file:
GOOGLE_CLOUD_API_KEY=<your-google-cloud-key>
For more information about Google Cloud API Key please go to Google Cloud's official website.
Good luck!
"""
class Command(CrawlCommand):
def process_options(self, args, opts):
super(Command, self).process_options(args, opts)
google_key_found = self.settings.get('GOOGLE_CLOUD_API_KEY')
if not google_key_found:
raise UsageError(usage_info.format(spider=args[0]))
| 31.470588 | 129 | 0.769159 | from scrapy.exceptions import UsageError
from scrapy.commands.crawl import Command as CrawlCommand
import scrapy
import re
usage_info = """
Congratulations!
You have correctly installed scrapy-auto-translation-middelware. The example project will show you how the
middleware will work.
However, you have to use your own Google Cloud API key for testing purpose. This can be specified by setting GOOGLE_CLOUD_API_KEY
variable in the command line options, like this:
scrapy crawl {spider} -s GOOGLE_CLOUD_API_KEY=<your-google-cloud-key>
Alternatively, you can specify the key in your settings.py file:
GOOGLE_CLOUD_API_KEY=<your-google-cloud-key>
For more information about Google Cloud API Key please go to Google Cloud's official website.
Good luck!
"""
class Command(CrawlCommand):
def process_options(self, args, opts):
super(Command, self).process_options(args, opts)
google_key_found = self.settings.get('GOOGLE_CLOUD_API_KEY')
if not google_key_found:
raise UsageError(usage_info.format(spider=args[0]))
| true | true |
f7fb1df55e38fb95603e4c520767d0651b0eec45 | 383 | py | Python | Exercises/bin/dos.plot.py | addman2/KvantSim | 7bfc56f909c6a007aac9ba973227f392c50b33e2 | [
"MIT"
] | null | null | null | Exercises/bin/dos.plot.py | addman2/KvantSim | 7bfc56f909c6a007aac9ba973227f392c50b33e2 | [
"MIT"
] | null | null | null | Exercises/bin/dos.plot.py | addman2/KvantSim | 7bfc56f909c6a007aac9ba973227f392c50b33e2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import numpy as np
from matplotlib import pyplot as plt
E, N = np.loadtxt(sys.argv[1], usecols = (0,1), unpack = True)
plt.plot(E, N)
try:
plt.plot([float(sys.argv[2])] * 2, [max(N),0.0],"r-")
except:
pass
plt.xlim([min(E),max(E)])
plt.ylim([0.0,max(N)])
plt.plot(plt.xlim(),[0.0]*2,"k-")
plt.savefig(sys.argv[1].replace("dos","png"))
| 17.409091 | 62 | 0.613577 |
import sys
import numpy as np
from matplotlib import pyplot as plt
E, N = np.loadtxt(sys.argv[1], usecols = (0,1), unpack = True)
plt.plot(E, N)
try:
plt.plot([float(sys.argv[2])] * 2, [max(N),0.0],"r-")
except:
pass
plt.xlim([min(E),max(E)])
plt.ylim([0.0,max(N)])
plt.plot(plt.xlim(),[0.0]*2,"k-")
plt.savefig(sys.argv[1].replace("dos","png"))
| true | true |
f7fb1f141576c32633397db9e4feba90d4c133ed | 6,096 | py | Python | tfx/components/tuner/component.py | johnPertoft/tfx | c6335684a54651adbcbe50aa52918b9b9948326e | [
"Apache-2.0"
] | null | null | null | tfx/components/tuner/component.py | johnPertoft/tfx | c6335684a54651adbcbe50aa52918b9b9948326e | [
"Apache-2.0"
] | null | null | null | tfx/components/tuner/component.py | johnPertoft/tfx | c6335684a54651adbcbe50aa52918b9b9948326e | [
"Apache-2.0"
] | null | null | null | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX Tuner component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, NamedTuple, Optional, Text
from kerastuner.engine import base_tuner
from tfx import types
from tfx.components.tuner import executor
from tfx.components.util import udf_utils
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.proto import trainer_pb2
from tfx.proto import tuner_pb2
from tfx.types import standard_artifacts
from tfx.types import standard_component_specs
from tfx.utils import json_utils
# tuner: A BaseTuner that will be used for tuning.
# fit_kwargs: Args to pass to tuner's run_trial function for fitting the
# model , e.g., the training and validation dataset. Required
# args depend on the tuner's implementation.
TunerFnResult = NamedTuple('TunerFnResult', [('tuner', base_tuner.BaseTuner),
('fit_kwargs', Dict[Text, Any])])
TunerFnResult.__doc__ = """
Return type of tuner_fn.
tuner_fn returns a TunerFnResult that contains:
- tuner: A BaseTuner that will be used for tuning.
- fit_kwargs: Args to pass to tuner's run_trial function for fitting the
model , e.g., the training and validation dataset. Required
args depend on the tuner's implementation.
"""
class Tuner(base_component.BaseComponent):
"""A TFX component for model hyperparameter tuning.
Component `outputs` contains:
- `best_hyperparameters`: Channel of type
`standard_artifacts.HyperParameters` for result of
the best hparams.
See [the Tuner guide](https://www.tensorflow.org/tfx/guide/tuner)
for more details.
"""
SPEC_CLASS = standard_component_specs.TunerSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(self,
examples: types.Channel,
schema: Optional[types.Channel] = None,
transform_graph: Optional[types.Channel] = None,
module_file: Optional[Text] = None,
tuner_fn: Optional[Text] = None,
train_args: Optional[trainer_pb2.TrainArgs] = None,
eval_args: Optional[trainer_pb2.EvalArgs] = None,
tune_args: Optional[tuner_pb2.TuneArgs] = None,
custom_config: Optional[Dict[Text, Any]] = None):
"""Construct a Tuner component.
Args:
examples: A Channel of type `standard_artifacts.Examples`, serving as the
source of examples that are used in tuning (required).
schema: An optional Channel of type `standard_artifacts.Schema`, serving
as the schema of training and eval data. This is used when raw examples
are provided.
transform_graph: An optional Channel of type
`standard_artifacts.TransformGraph`, serving as the input transform
graph if present. This is used when transformed examples are provided.
module_file: A path to python module file containing UDF tuner definition.
The module_file must implement a function named `tuner_fn` at its top
level. The function must have the following signature.
def tuner_fn(fn_args: FnArgs) -> TunerFnResult:
Exactly one of 'module_file' or 'tuner_fn' must be supplied.
tuner_fn: A python path to UDF model definition function. See
'module_file' for the required signature of the UDF. Exactly one of
'module_file' or 'tuner_fn' must be supplied.
train_args: A trainer_pb2.TrainArgs instance, containing args used for
training. Currently only splits and num_steps are available. Default
behavior (when splits is empty) is train on `train` split.
eval_args: A trainer_pb2.EvalArgs instance, containing args used for eval.
Currently only splits and num_steps are available. Default behavior
(when splits is empty) is evaluate on `eval` split.
tune_args: A tuner_pb2.TuneArgs instance, containing args used for tuning.
Currently only num_parallel_trials is available.
custom_config: A dict which contains addtional training job parameters
that will be passed into user module.
"""
if bool(module_file) == bool(tuner_fn):
raise ValueError(
"Exactly one of 'module_file' or 'tuner_fn' must be supplied")
best_hyperparameters = types.Channel(
type=standard_artifacts.HyperParameters)
spec = standard_component_specs.TunerSpec(
examples=examples,
schema=schema,
transform_graph=transform_graph,
module_file=module_file,
tuner_fn=tuner_fn,
train_args=train_args or trainer_pb2.TrainArgs(),
eval_args=eval_args or trainer_pb2.EvalArgs(),
tune_args=tune_args,
best_hyperparameters=best_hyperparameters,
custom_config=json_utils.dumps(custom_config),
)
super(Tuner, self).__init__(spec=spec)
if udf_utils.should_package_user_modules():
# In this case, the `MODULE_PATH_KEY` execution property will be injected
# as a reference to the given user module file after packaging, at which
# point the `MODULE_FILE_KEY` execution property will be removed.
udf_utils.add_user_module_dependency(
self, standard_component_specs.MODULE_FILE_KEY,
standard_component_specs.MODULE_PATH_KEY)
| 45.492537 | 80 | 0.713255 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, NamedTuple, Optional, Text
from kerastuner.engine import base_tuner
from tfx import types
from tfx.components.tuner import executor
from tfx.components.util import udf_utils
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.proto import trainer_pb2
from tfx.proto import tuner_pb2
from tfx.types import standard_artifacts
from tfx.types import standard_component_specs
from tfx.utils import json_utils
# model , e.g., the training and validation dataset. Required
# args depend on the tuner's implementation.
TunerFnResult = NamedTuple('TunerFnResult', [('tuner', base_tuner.BaseTuner),
('fit_kwargs', Dict[Text, Any])])
TunerFnResult.__doc__ = """
Return type of tuner_fn.
tuner_fn returns a TunerFnResult that contains:
- tuner: A BaseTuner that will be used for tuning.
- fit_kwargs: Args to pass to tuner's run_trial function for fitting the
model , e.g., the training and validation dataset. Required
args depend on the tuner's implementation.
"""
class Tuner(base_component.BaseComponent):
SPEC_CLASS = standard_component_specs.TunerSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(self,
examples: types.Channel,
schema: Optional[types.Channel] = None,
transform_graph: Optional[types.Channel] = None,
module_file: Optional[Text] = None,
tuner_fn: Optional[Text] = None,
train_args: Optional[trainer_pb2.TrainArgs] = None,
eval_args: Optional[trainer_pb2.EvalArgs] = None,
tune_args: Optional[tuner_pb2.TuneArgs] = None,
custom_config: Optional[Dict[Text, Any]] = None):
if bool(module_file) == bool(tuner_fn):
raise ValueError(
"Exactly one of 'module_file' or 'tuner_fn' must be supplied")
best_hyperparameters = types.Channel(
type=standard_artifacts.HyperParameters)
spec = standard_component_specs.TunerSpec(
examples=examples,
schema=schema,
transform_graph=transform_graph,
module_file=module_file,
tuner_fn=tuner_fn,
train_args=train_args or trainer_pb2.TrainArgs(),
eval_args=eval_args or trainer_pb2.EvalArgs(),
tune_args=tune_args,
best_hyperparameters=best_hyperparameters,
custom_config=json_utils.dumps(custom_config),
)
super(Tuner, self).__init__(spec=spec)
if udf_utils.should_package_user_modules():
udf_utils.add_user_module_dependency(
self, standard_component_specs.MODULE_FILE_KEY,
standard_component_specs.MODULE_PATH_KEY)
| true | true |
f7fb1f7937f7f2627bcc9af90dbc20e590ab8f6d | 6,375 | py | Python | tensorlib/rl/envs/wrappers/atari_wrappers.py | vermouth1992/torchlib | 63b2bedb40f670b2d9fbfc0daeab4a8d44623095 | [
"MIT"
] | 3 | 2019-07-23T21:32:36.000Z | 2022-02-04T23:13:30.000Z | tensorlib/rl/envs/wrappers/atari_wrappers.py | vermouth1992/torchlib | 63b2bedb40f670b2d9fbfc0daeab4a8d44623095 | [
"MIT"
] | null | null | null | tensorlib/rl/envs/wrappers/atari_wrappers.py | vermouth1992/torchlib | 63b2bedb40f670b2d9fbfc0daeab4a8d44623095 | [
"MIT"
] | 1 | 2019-07-23T21:32:23.000Z | 2019-07-23T21:32:23.000Z | """
Code copied from UC Berkeley CS294-112
"""
from collections import deque
import cv2
import gym
import numpy as np
from gym import spaces
class NoopResetEnv(gym.Wrapper):
def __init__(self, env=None, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
super(NoopResetEnv, self).__init__(env)
self.noop_max = noop_max
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset()
noops = np.random.randint(1, self.noop_max + 1)
for _ in range(noops):
obs, _, _, _ = self.env.step(0)
return obs
class FireResetEnv(gym.Wrapper):
def __init__(self, env=None):
"""Take action on reset for environments that are fixed until firing."""
super(FireResetEnv, self).__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self):
self.env.reset()
obs, _, _, _ = self.env.step(1)
obs, _, _, _ = self.env.step(2)
return obs
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env=None):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
super(EpisodicLifeEnv, self).__init__(env)
self.lives = 0
self.was_real_done = True
self.was_real_reset = False
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert somtimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset()
self.was_real_reset = True
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.was_real_reset = False
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env=None, skip=4):
"""Return only every `skip`-th frame"""
super(MaxAndSkipEnv, self).__init__(env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = deque(maxlen=2)
self._skip = skip
def step(self, action):
total_reward = 0.0
done = None
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
self._obs_buffer.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(np.stack(self._obs_buffer), axis=0)
return max_frame, total_reward, done, info
def reset(self):
"""Clear past frame buffer and init. to first obs. from inner env."""
self._obs_buffer.clear()
obs = self.env.reset()
self._obs_buffer.append(obs)
return obs
def _process_frame84(frame):
img = np.reshape(frame, [210, 160, 3]).astype(np.float32)
img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114
resized_screen = cv2.resize(img, (84, 110), interpolation=cv2.INTER_LINEAR)
x_t = resized_screen[18:102, :]
x_t = np.reshape(x_t, [84, 84, 1])
return x_t.astype(np.uint8)
class ProcessFrame84(gym.Wrapper):
def __init__(self, env=None):
super(ProcessFrame84, self).__init__(env)
self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1))
def step(self, action):
obs, reward, done, info = self.env.step(action)
return _process_frame84(obs), reward, done, info
def reset(self):
return _process_frame84(self.env.reset())
class ClippedRewardsWrapper(gym.RewardWrapper):
def reward(self, reward):
return np.sign(reward)
class StackFrame(gym.Wrapper):
def __init__(self, env, frame_length=4):
super(StackFrame, self).__init__(env)
self.single_observation_space = env.observation_space
low = np.repeat(self.single_observation_space.low, frame_length, axis=-1)
high = np.repeat(self.single_observation_space.high, frame_length, axis=-1)
dtype = self.single_observation_space.dtype
self.observation_space = spaces.Box(low=low, high=high, shape=None, dtype=dtype)
self.obs = deque(maxlen=frame_length)
for _ in range(frame_length):
self.obs.append(np.zeros(shape=self.single_observation_space.shape))
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.obs.append(obs)
return np.concatenate(self.obs, axis=-1), reward, done, info
def reset(self):
obs = self.env.reset()
self.obs.append(obs)
return np.concatenate(self.obs, axis=-1)
def wrap_deepmind_ram(env, frame_length=4):
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = StackFrame(env, frame_length=frame_length)
env = ClippedRewardsWrapper(env)
return env
def wrap_deepmind(env, frame_length=4):
assert 'NoFrameskip' in env.spec.id
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = ProcessFrame84(env)
env = StackFrame(env, frame_length=frame_length)
env = ClippedRewardsWrapper(env)
return env
| 34.090909 | 88 | 0.631529 |
from collections import deque
import cv2
import gym
import numpy as np
from gym import spaces
class NoopResetEnv(gym.Wrapper):
def __init__(self, env=None, noop_max=30):
super(NoopResetEnv, self).__init__(env)
self.noop_max = noop_max
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self):
self.env.reset()
noops = np.random.randint(1, self.noop_max + 1)
for _ in range(noops):
obs, _, _, _ = self.env.step(0)
return obs
class FireResetEnv(gym.Wrapper):
def __init__(self, env=None):
super(FireResetEnv, self).__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self):
self.env.reset()
obs, _, _, _ = self.env.step(1)
obs, _, _, _ = self.env.step(2)
return obs
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env=None):
super(EpisodicLifeEnv, self).__init__(env)
self.lives = 0
self.was_real_done = True
self.was_real_reset = False
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
done = True
self.lives = lives
return obs, reward, done, info
def reset(self):
if self.was_real_done:
obs = self.env.reset()
self.was_real_reset = True
else:
obs, _, _, _ = self.env.step(0)
self.was_real_reset = False
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env=None, skip=4):
super(MaxAndSkipEnv, self).__init__(env)
self._obs_buffer = deque(maxlen=2)
self._skip = skip
def step(self, action):
total_reward = 0.0
done = None
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
self._obs_buffer.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(np.stack(self._obs_buffer), axis=0)
return max_frame, total_reward, done, info
def reset(self):
self._obs_buffer.clear()
obs = self.env.reset()
self._obs_buffer.append(obs)
return obs
def _process_frame84(frame):
img = np.reshape(frame, [210, 160, 3]).astype(np.float32)
img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114
resized_screen = cv2.resize(img, (84, 110), interpolation=cv2.INTER_LINEAR)
x_t = resized_screen[18:102, :]
x_t = np.reshape(x_t, [84, 84, 1])
return x_t.astype(np.uint8)
class ProcessFrame84(gym.Wrapper):
def __init__(self, env=None):
super(ProcessFrame84, self).__init__(env)
self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1))
def step(self, action):
obs, reward, done, info = self.env.step(action)
return _process_frame84(obs), reward, done, info
def reset(self):
return _process_frame84(self.env.reset())
class ClippedRewardsWrapper(gym.RewardWrapper):
def reward(self, reward):
return np.sign(reward)
class StackFrame(gym.Wrapper):
def __init__(self, env, frame_length=4):
super(StackFrame, self).__init__(env)
self.single_observation_space = env.observation_space
low = np.repeat(self.single_observation_space.low, frame_length, axis=-1)
high = np.repeat(self.single_observation_space.high, frame_length, axis=-1)
dtype = self.single_observation_space.dtype
self.observation_space = spaces.Box(low=low, high=high, shape=None, dtype=dtype)
self.obs = deque(maxlen=frame_length)
for _ in range(frame_length):
self.obs.append(np.zeros(shape=self.single_observation_space.shape))
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.obs.append(obs)
return np.concatenate(self.obs, axis=-1), reward, done, info
def reset(self):
obs = self.env.reset()
self.obs.append(obs)
return np.concatenate(self.obs, axis=-1)
def wrap_deepmind_ram(env, frame_length=4):
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = StackFrame(env, frame_length=frame_length)
env = ClippedRewardsWrapper(env)
return env
def wrap_deepmind(env, frame_length=4):
assert 'NoFrameskip' in env.spec.id
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = ProcessFrame84(env)
env = StackFrame(env, frame_length=frame_length)
env = ClippedRewardsWrapper(env)
return env
| true | true |
f7fb21e5c692bc51bb3f7e5c1812bdfdbd037d20 | 673 | py | Python | base/migrations/0018_auto_20181014_0111.py | djangulo/integralpsychology.life | 941e6c1ad1f274d7489d8ecdea5ae72f4889cc4b | [
"MIT"
] | null | null | null | base/migrations/0018_auto_20181014_0111.py | djangulo/integralpsychology.life | 941e6c1ad1f274d7489d8ecdea5ae72f4889cc4b | [
"MIT"
] | null | null | null | base/migrations/0018_auto_20181014_0111.py | djangulo/integralpsychology.life | 941e6c1ad1f274d7489d8ecdea5ae72f4889cc4b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-14 01:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('base', '0017_auto_20181014_0105'),
]
operations = [
migrations.AlterField(
model_name='stylesnippet',
name='style_file',
field=models.ForeignKey(help_text='Only filenames ending in .css, .scss or .sass will be processed.', on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document', verbose_name='style file'),
),
]
| 30.590909 | 231 | 0.673105 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('base', '0017_auto_20181014_0105'),
]
operations = [
migrations.AlterField(
model_name='stylesnippet',
name='style_file',
field=models.ForeignKey(help_text='Only filenames ending in .css, .scss or .sass will be processed.', on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document', verbose_name='style file'),
),
]
| true | true |
f7fb223ee06321e39762c1047848e35c70f008d6 | 215,692 | py | Python | tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py | renovate-bot/python-bigtable | a99bf88417d6aec03923447c70c2752f6bb5c459 | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py | renovate-bot/python-bigtable | a99bf88417d6aec03923447c70c2752f6bb5c459 | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py | renovate-bot/python-bigtable | a99bf88417d6aec03923447c70c2752f6bb5c459 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
BigtableInstanceAdminAsyncClient,
)
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
BigtableInstanceAdminClient,
)
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import transports
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin
from google.cloud.bigtable_admin_v2.types import common
from google.cloud.bigtable_admin_v2.types import instance
from google.cloud.bigtable_admin_v2.types import instance as gba_instance
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import options_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.type import expr_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert BigtableInstanceAdminClient._get_default_mtls_endpoint(None) is None
assert (
BigtableInstanceAdminClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
BigtableInstanceAdminClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
BigtableInstanceAdminClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
BigtableInstanceAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
BigtableInstanceAdminClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient,]
)
def test_bigtable_instance_admin_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "bigtableadmin.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.BigtableInstanceAdminGrpcTransport, "grpc"),
(transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_bigtable_instance_admin_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient,]
)
def test_bigtable_instance_admin_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "bigtableadmin.googleapis.com:443"
def test_bigtable_instance_admin_client_get_transport_class():
transport = BigtableInstanceAdminClient.get_transport_class()
available_transports = [
transports.BigtableInstanceAdminGrpcTransport,
]
assert transport in available_transports
transport = BigtableInstanceAdminClient.get_transport_class("grpc")
assert transport == transports.BigtableInstanceAdminGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
BigtableInstanceAdminClient,
transports.BigtableInstanceAdminGrpcTransport,
"grpc",
),
(
BigtableInstanceAdminAsyncClient,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
BigtableInstanceAdminClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(BigtableInstanceAdminClient),
)
@mock.patch.object(
BigtableInstanceAdminAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(BigtableInstanceAdminAsyncClient),
)
def test_bigtable_instance_admin_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(BigtableInstanceAdminClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(BigtableInstanceAdminClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
BigtableInstanceAdminClient,
transports.BigtableInstanceAdminGrpcTransport,
"grpc",
"true",
),
(
BigtableInstanceAdminAsyncClient,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
BigtableInstanceAdminClient,
transports.BigtableInstanceAdminGrpcTransport,
"grpc",
"false",
),
(
BigtableInstanceAdminAsyncClient,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
BigtableInstanceAdminClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(BigtableInstanceAdminClient),
)
@mock.patch.object(
BigtableInstanceAdminAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(BigtableInstanceAdminAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_bigtable_instance_admin_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
BigtableInstanceAdminClient,
transports.BigtableInstanceAdminGrpcTransport,
"grpc",
),
(
BigtableInstanceAdminAsyncClient,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_bigtable_instance_admin_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
BigtableInstanceAdminClient,
transports.BigtableInstanceAdminGrpcTransport,
"grpc",
),
(
BigtableInstanceAdminAsyncClient,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_bigtable_instance_admin_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_bigtable_instance_admin_client_client_options_from_dict():
with mock.patch(
"google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = BigtableInstanceAdminClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_create_instance(
transport: str = "grpc", request_type=bigtable_instance_admin.CreateInstanceRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.CreateInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_instance_from_dict():
test_create_instance(request_type=dict)
def test_create_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
client.create_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.CreateInstanceRequest()
@pytest.mark.asyncio
async def test_create_instance_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.CreateInstanceRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.CreateInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_instance_async_from_dict():
await test_create_instance_async(request_type=dict)
def test_create_instance_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.CreateInstanceRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_instance_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.CreateInstanceRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_instance_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_instance(
parent="parent_value",
instance_id="instance_id_value",
instance=gba_instance.Instance(name="name_value"),
clusters={"key_value": gba_instance.Cluster(name="name_value")},
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].instance_id == "instance_id_value"
assert args[0].instance == gba_instance.Instance(name="name_value")
assert args[0].clusters == {
"key_value": gba_instance.Cluster(name="name_value")
}
def test_create_instance_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_instance(
bigtable_instance_admin.CreateInstanceRequest(),
parent="parent_value",
instance_id="instance_id_value",
instance=gba_instance.Instance(name="name_value"),
clusters={"key_value": gba_instance.Cluster(name="name_value")},
)
@pytest.mark.asyncio
async def test_create_instance_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_instance(
parent="parent_value",
instance_id="instance_id_value",
instance=gba_instance.Instance(name="name_value"),
clusters={"key_value": gba_instance.Cluster(name="name_value")},
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].instance_id == "instance_id_value"
assert args[0].instance == gba_instance.Instance(name="name_value")
assert args[0].clusters == {
"key_value": gba_instance.Cluster(name="name_value")
}
@pytest.mark.asyncio
async def test_create_instance_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_instance(
bigtable_instance_admin.CreateInstanceRequest(),
parent="parent_value",
instance_id="instance_id_value",
instance=gba_instance.Instance(name="name_value"),
clusters={"key_value": gba_instance.Cluster(name="name_value")},
)
def test_get_instance(
transport: str = "grpc", request_type=bigtable_instance_admin.GetInstanceRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.Instance(
name="name_value",
display_name="display_name_value",
state=instance.Instance.State.READY,
type_=instance.Instance.Type.PRODUCTION,
)
response = client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.GetInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.Instance)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == instance.Instance.State.READY
assert response.type_ == instance.Instance.Type.PRODUCTION
def test_get_instance_from_dict():
test_get_instance(request_type=dict)
def test_get_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
client.get_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.GetInstanceRequest()
@pytest.mark.asyncio
async def test_get_instance_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.GetInstanceRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
instance.Instance(
name="name_value",
display_name="display_name_value",
state=instance.Instance.State.READY,
type_=instance.Instance.Type.PRODUCTION,
)
)
response = await client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.GetInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.Instance)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == instance.Instance.State.READY
assert response.type_ == instance.Instance.Type.PRODUCTION
@pytest.mark.asyncio
async def test_get_instance_async_from_dict():
await test_get_instance_async(request_type=dict)
def test_get_instance_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.GetInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
call.return_value = instance.Instance()
client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_instance_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.GetInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance())
await client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_instance_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.Instance()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_instance(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_instance_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_instance(
bigtable_instance_admin.GetInstanceRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_instance_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.Instance()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_instance(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_instance_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_instance(
bigtable_instance_admin.GetInstanceRequest(), name="name_value",
)
def test_list_instances(
transport: str = "grpc", request_type=bigtable_instance_admin.ListInstancesRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = bigtable_instance_admin.ListInstancesResponse(
failed_locations=["failed_locations_value"],
next_page_token="next_page_token_value",
)
response = client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.ListInstancesRequest()
# Establish that the response is the type that we expect.
assert response.raw_page is response
assert isinstance(response, bigtable_instance_admin.ListInstancesResponse)
assert response.failed_locations == ["failed_locations_value"]
assert response.next_page_token == "next_page_token_value"
def test_list_instances_from_dict():
test_list_instances(request_type=dict)
def test_list_instances_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
client.list_instances()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.ListInstancesRequest()
@pytest.mark.asyncio
async def test_list_instances_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.ListInstancesRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigtable_instance_admin.ListInstancesResponse(
failed_locations=["failed_locations_value"],
next_page_token="next_page_token_value",
)
)
response = await client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.ListInstancesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, bigtable_instance_admin.ListInstancesResponse)
assert response.failed_locations == ["failed_locations_value"]
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_instances_async_from_dict():
await test_list_instances_async(request_type=dict)
def test_list_instances_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.ListInstancesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
call.return_value = bigtable_instance_admin.ListInstancesResponse()
client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_instances_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.ListInstancesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigtable_instance_admin.ListInstancesResponse()
)
await client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_instances_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = bigtable_instance_admin.ListInstancesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_instances(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_instances_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_instances(
bigtable_instance_admin.ListInstancesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_instances_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = bigtable_instance_admin.ListInstancesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigtable_instance_admin.ListInstancesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_instances(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_instances_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_instances(
bigtable_instance_admin.ListInstancesRequest(), parent="parent_value",
)
def test_update_instance(transport: str = "grpc", request_type=instance.Instance):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.Instance(
name="name_value",
display_name="display_name_value",
state=instance.Instance.State.READY,
type_=instance.Instance.Type.PRODUCTION,
)
response = client.update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == instance.Instance()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.Instance)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == instance.Instance.State.READY
assert response.type_ == instance.Instance.Type.PRODUCTION
def test_update_instance_from_dict():
test_update_instance(request_type=dict)
def test_update_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
client.update_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == instance.Instance()
@pytest.mark.asyncio
async def test_update_instance_async(
transport: str = "grpc_asyncio", request_type=instance.Instance
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
instance.Instance(
name="name_value",
display_name="display_name_value",
state=instance.Instance.State.READY,
type_=instance.Instance.Type.PRODUCTION,
)
)
response = await client.update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == instance.Instance()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.Instance)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == instance.Instance.State.READY
assert response.type_ == instance.Instance.Type.PRODUCTION
@pytest.mark.asyncio
async def test_update_instance_async_from_dict():
await test_update_instance_async(request_type=dict)
def test_update_instance_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = instance.Instance()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
call.return_value = instance.Instance()
client.update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_instance_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = instance.Instance()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance())
await client.update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_partial_update_instance(
transport: str = "grpc",
request_type=bigtable_instance_admin.PartialUpdateInstanceRequest,
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partial_update_instance), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.partial_update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_partial_update_instance_from_dict():
test_partial_update_instance(request_type=dict)
def test_partial_update_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partial_update_instance), "__call__"
) as call:
client.partial_update_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest()
@pytest.mark.asyncio
async def test_partial_update_instance_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.PartialUpdateInstanceRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partial_update_instance), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.partial_update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_partial_update_instance_async_from_dict():
await test_partial_update_instance_async(request_type=dict)
def test_partial_update_instance_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.PartialUpdateInstanceRequest()
request.instance.name = "instance.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partial_update_instance), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.partial_update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "instance.name=instance.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_partial_update_instance_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.PartialUpdateInstanceRequest()
request.instance.name = "instance.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partial_update_instance), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.partial_update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "instance.name=instance.name/value",) in kw[
"metadata"
]
def test_partial_update_instance_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partial_update_instance), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.partial_update_instance(
instance=gba_instance.Instance(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].instance == gba_instance.Instance(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
def test_partial_update_instance_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.partial_update_instance(
bigtable_instance_admin.PartialUpdateInstanceRequest(),
instance=gba_instance.Instance(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_partial_update_instance_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partial_update_instance), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.partial_update_instance(
instance=gba_instance.Instance(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].instance == gba_instance.Instance(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
@pytest.mark.asyncio
async def test_partial_update_instance_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.partial_update_instance(
bigtable_instance_admin.PartialUpdateInstanceRequest(),
instance=gba_instance.Instance(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_delete_instance(
transport: str = "grpc", request_type=bigtable_instance_admin.DeleteInstanceRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.DeleteInstanceRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_instance_from_dict():
test_delete_instance(request_type=dict)
def test_delete_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
client.delete_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.DeleteInstanceRequest()
@pytest.mark.asyncio
async def test_delete_instance_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.DeleteInstanceRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.DeleteInstanceRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_instance_async_from_dict():
await test_delete_instance_async(request_type=dict)
def test_delete_instance_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.DeleteInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
call.return_value = None
client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_instance_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.DeleteInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_instance_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_instance(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_instance_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_instance(
bigtable_instance_admin.DeleteInstanceRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_instance_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_instance(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_instance_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_instance(
bigtable_instance_admin.DeleteInstanceRequest(), name="name_value",
)
def test_create_cluster(
transport: str = "grpc", request_type=bigtable_instance_admin.CreateClusterRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.CreateClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_cluster_from_dict():
test_create_cluster(request_type=dict)
def test_create_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
client.create_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.CreateClusterRequest()
@pytest.mark.asyncio
async def test_create_cluster_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.CreateClusterRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.CreateClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_cluster_async_from_dict():
await test_create_cluster_async(request_type=dict)
def test_create_cluster_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.CreateClusterRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_cluster_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.CreateClusterRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_cluster_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_cluster(
parent="parent_value",
cluster_id="cluster_id_value",
cluster=instance.Cluster(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].cluster == instance.Cluster(name="name_value")
def test_create_cluster_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_cluster(
bigtable_instance_admin.CreateClusterRequest(),
parent="parent_value",
cluster_id="cluster_id_value",
cluster=instance.Cluster(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_cluster_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_cluster(
parent="parent_value",
cluster_id="cluster_id_value",
cluster=instance.Cluster(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].cluster == instance.Cluster(name="name_value")
@pytest.mark.asyncio
async def test_create_cluster_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_cluster(
bigtable_instance_admin.CreateClusterRequest(),
parent="parent_value",
cluster_id="cluster_id_value",
cluster=instance.Cluster(name="name_value"),
)
def test_get_cluster(
transport: str = "grpc", request_type=bigtable_instance_admin.GetClusterRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.Cluster(
name="name_value",
location="location_value",
state=instance.Cluster.State.READY,
serve_nodes=1181,
default_storage_type=common.StorageType.SSD,
)
response = client.get_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.GetClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.Cluster)
assert response.name == "name_value"
assert response.location == "location_value"
assert response.state == instance.Cluster.State.READY
assert response.serve_nodes == 1181
assert response.default_storage_type == common.StorageType.SSD
def test_get_cluster_from_dict():
test_get_cluster(request_type=dict)
def test_get_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
client.get_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.GetClusterRequest()
@pytest.mark.asyncio
async def test_get_cluster_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.GetClusterRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
instance.Cluster(
name="name_value",
location="location_value",
state=instance.Cluster.State.READY,
serve_nodes=1181,
default_storage_type=common.StorageType.SSD,
)
)
response = await client.get_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.GetClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.Cluster)
assert response.name == "name_value"
assert response.location == "location_value"
assert response.state == instance.Cluster.State.READY
assert response.serve_nodes == 1181
assert response.default_storage_type == common.StorageType.SSD
@pytest.mark.asyncio
async def test_get_cluster_async_from_dict():
await test_get_cluster_async(request_type=dict)
def test_get_cluster_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.GetClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
call.return_value = instance.Cluster()
client.get_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_cluster_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.GetClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Cluster())
await client.get_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_cluster_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.Cluster()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_cluster_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_cluster(
bigtable_instance_admin.GetClusterRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_cluster_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.Cluster()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Cluster())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_cluster_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_cluster(
bigtable_instance_admin.GetClusterRequest(), name="name_value",
)
def test_list_clusters(
transport: str = "grpc", request_type=bigtable_instance_admin.ListClustersRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = bigtable_instance_admin.ListClustersResponse(
failed_locations=["failed_locations_value"],
next_page_token="next_page_token_value",
)
response = client.list_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.ListClustersRequest()
# Establish that the response is the type that we expect.
assert response.raw_page is response
assert isinstance(response, bigtable_instance_admin.ListClustersResponse)
assert response.failed_locations == ["failed_locations_value"]
assert response.next_page_token == "next_page_token_value"
def test_list_clusters_from_dict():
test_list_clusters(request_type=dict)
def test_list_clusters_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
client.list_clusters()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.ListClustersRequest()
@pytest.mark.asyncio
async def test_list_clusters_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.ListClustersRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigtable_instance_admin.ListClustersResponse(
failed_locations=["failed_locations_value"],
next_page_token="next_page_token_value",
)
)
response = await client.list_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.ListClustersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, bigtable_instance_admin.ListClustersResponse)
assert response.failed_locations == ["failed_locations_value"]
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_clusters_async_from_dict():
await test_list_clusters_async(request_type=dict)
def test_list_clusters_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.ListClustersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
call.return_value = bigtable_instance_admin.ListClustersResponse()
client.list_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_clusters_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.ListClustersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigtable_instance_admin.ListClustersResponse()
)
await client.list_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_clusters_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = bigtable_instance_admin.ListClustersResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_clusters(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_clusters_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_clusters(
bigtable_instance_admin.ListClustersRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_clusters_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = bigtable_instance_admin.ListClustersResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigtable_instance_admin.ListClustersResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_clusters(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_clusters_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_clusters(
bigtable_instance_admin.ListClustersRequest(), parent="parent_value",
)
def test_update_cluster(transport: str = "grpc", request_type=instance.Cluster):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == instance.Cluster()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_cluster_from_dict():
test_update_cluster(request_type=dict)
def test_update_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
client.update_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == instance.Cluster()
@pytest.mark.asyncio
async def test_update_cluster_async(
transport: str = "grpc_asyncio", request_type=instance.Cluster
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == instance.Cluster()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_cluster_async_from_dict():
await test_update_cluster_async(request_type=dict)
def test_update_cluster_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = instance.Cluster()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_cluster_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = instance.Cluster()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_cluster(
transport: str = "grpc", request_type=bigtable_instance_admin.DeleteClusterRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.DeleteClusterRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_cluster_from_dict():
test_delete_cluster(request_type=dict)
def test_delete_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
client.delete_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.DeleteClusterRequest()
@pytest.mark.asyncio
async def test_delete_cluster_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.DeleteClusterRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.DeleteClusterRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_cluster_async_from_dict():
await test_delete_cluster_async(request_type=dict)
def test_delete_cluster_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.DeleteClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
call.return_value = None
client.delete_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_cluster_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.DeleteClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_cluster_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_cluster_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_cluster(
bigtable_instance_admin.DeleteClusterRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_cluster_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_cluster_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_cluster(
bigtable_instance_admin.DeleteClusterRequest(), name="name_value",
)
def test_create_app_profile(
transport: str = "grpc",
request_type=bigtable_instance_admin.CreateAppProfileRequest,
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = instance.AppProfile(
name="name_value",
etag="etag_value",
description="description_value",
multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny(
cluster_ids=["cluster_ids_value"]
),
)
response = client.create_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.CreateAppProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.AppProfile)
assert response.name == "name_value"
assert response.etag == "etag_value"
assert response.description == "description_value"
def test_create_app_profile_from_dict():
test_create_app_profile(request_type=dict)
def test_create_app_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_app_profile), "__call__"
) as call:
client.create_app_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.CreateAppProfileRequest()
@pytest.mark.asyncio
async def test_create_app_profile_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.CreateAppProfileRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
instance.AppProfile(
name="name_value", etag="etag_value", description="description_value",
)
)
response = await client.create_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.CreateAppProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.AppProfile)
assert response.name == "name_value"
assert response.etag == "etag_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_create_app_profile_async_from_dict():
await test_create_app_profile_async(request_type=dict)
def test_create_app_profile_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.CreateAppProfileRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_app_profile), "__call__"
) as call:
call.return_value = instance.AppProfile()
client.create_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_app_profile_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.CreateAppProfileRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_app_profile), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile())
await client.create_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_app_profile_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = instance.AppProfile()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_app_profile(
parent="parent_value",
app_profile_id="app_profile_id_value",
app_profile=instance.AppProfile(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].app_profile_id == "app_profile_id_value"
assert args[0].app_profile == instance.AppProfile(name="name_value")
def test_create_app_profile_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_app_profile(
bigtable_instance_admin.CreateAppProfileRequest(),
parent="parent_value",
app_profile_id="app_profile_id_value",
app_profile=instance.AppProfile(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_app_profile_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = instance.AppProfile()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_app_profile(
parent="parent_value",
app_profile_id="app_profile_id_value",
app_profile=instance.AppProfile(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].app_profile_id == "app_profile_id_value"
assert args[0].app_profile == instance.AppProfile(name="name_value")
@pytest.mark.asyncio
async def test_create_app_profile_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_app_profile(
bigtable_instance_admin.CreateAppProfileRequest(),
parent="parent_value",
app_profile_id="app_profile_id_value",
app_profile=instance.AppProfile(name="name_value"),
)
def test_get_app_profile(
transport: str = "grpc", request_type=bigtable_instance_admin.GetAppProfileRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.AppProfile(
name="name_value",
etag="etag_value",
description="description_value",
multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny(
cluster_ids=["cluster_ids_value"]
),
)
response = client.get_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.GetAppProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.AppProfile)
assert response.name == "name_value"
assert response.etag == "etag_value"
assert response.description == "description_value"
def test_get_app_profile_from_dict():
test_get_app_profile(request_type=dict)
def test_get_app_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call:
client.get_app_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.GetAppProfileRequest()
@pytest.mark.asyncio
async def test_get_app_profile_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.GetAppProfileRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
instance.AppProfile(
name="name_value", etag="etag_value", description="description_value",
)
)
response = await client.get_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.GetAppProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.AppProfile)
assert response.name == "name_value"
assert response.etag == "etag_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_app_profile_async_from_dict():
await test_get_app_profile_async(request_type=dict)
def test_get_app_profile_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.GetAppProfileRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call:
call.return_value = instance.AppProfile()
client.get_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_app_profile_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.GetAppProfileRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile())
await client.get_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_app_profile_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.AppProfile()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_app_profile(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_app_profile_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_app_profile(
bigtable_instance_admin.GetAppProfileRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_app_profile_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.AppProfile()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_app_profile(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_app_profile_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_app_profile(
bigtable_instance_admin.GetAppProfileRequest(), name="name_value",
)
def test_list_app_profiles(
transport: str = "grpc", request_type=bigtable_instance_admin.ListAppProfilesRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = bigtable_instance_admin.ListAppProfilesResponse(
next_page_token="next_page_token_value",
failed_locations=["failed_locations_value"],
)
response = client.list_app_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.ListAppProfilesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAppProfilesPager)
assert response.next_page_token == "next_page_token_value"
assert response.failed_locations == ["failed_locations_value"]
def test_list_app_profiles_from_dict():
test_list_app_profiles(request_type=dict)
def test_list_app_profiles_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles), "__call__"
) as call:
client.list_app_profiles()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.ListAppProfilesRequest()
@pytest.mark.asyncio
async def test_list_app_profiles_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.ListAppProfilesRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigtable_instance_admin.ListAppProfilesResponse(
next_page_token="next_page_token_value",
failed_locations=["failed_locations_value"],
)
)
response = await client.list_app_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.ListAppProfilesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAppProfilesAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.failed_locations == ["failed_locations_value"]
@pytest.mark.asyncio
async def test_list_app_profiles_async_from_dict():
await test_list_app_profiles_async(request_type=dict)
def test_list_app_profiles_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.ListAppProfilesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles), "__call__"
) as call:
call.return_value = bigtable_instance_admin.ListAppProfilesResponse()
client.list_app_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_app_profiles_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.ListAppProfilesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigtable_instance_admin.ListAppProfilesResponse()
)
await client.list_app_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_app_profiles_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = bigtable_instance_admin.ListAppProfilesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_app_profiles(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_app_profiles_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_app_profiles(
bigtable_instance_admin.ListAppProfilesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_app_profiles_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = bigtable_instance_admin.ListAppProfilesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigtable_instance_admin.ListAppProfilesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_app_profiles(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_app_profiles_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_app_profiles(
bigtable_instance_admin.ListAppProfilesRequest(), parent="parent_value",
)
def test_list_app_profiles_pager():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[
instance.AppProfile(),
instance.AppProfile(),
instance.AppProfile(),
],
next_page_token="abc",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[], next_page_token="def",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[instance.AppProfile(),], next_page_token="ghi",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[instance.AppProfile(), instance.AppProfile(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_app_profiles(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, instance.AppProfile) for i in results)
def test_list_app_profiles_pages():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[
instance.AppProfile(),
instance.AppProfile(),
instance.AppProfile(),
],
next_page_token="abc",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[], next_page_token="def",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[instance.AppProfile(),], next_page_token="ghi",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[instance.AppProfile(), instance.AppProfile(),],
),
RuntimeError,
)
pages = list(client.list_app_profiles(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_app_profiles_async_pager():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[
instance.AppProfile(),
instance.AppProfile(),
instance.AppProfile(),
],
next_page_token="abc",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[], next_page_token="def",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[instance.AppProfile(),], next_page_token="ghi",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[instance.AppProfile(), instance.AppProfile(),],
),
RuntimeError,
)
async_pager = await client.list_app_profiles(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, instance.AppProfile) for i in responses)
@pytest.mark.asyncio
async def test_list_app_profiles_async_pages():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[
instance.AppProfile(),
instance.AppProfile(),
instance.AppProfile(),
],
next_page_token="abc",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[], next_page_token="def",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[instance.AppProfile(),], next_page_token="ghi",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[instance.AppProfile(), instance.AppProfile(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_app_profiles(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_update_app_profile(
transport: str = "grpc",
request_type=bigtable_instance_admin.UpdateAppProfileRequest,
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_app_profile_from_dict():
test_update_app_profile(request_type=dict)
def test_update_app_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_app_profile), "__call__"
) as call:
client.update_app_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest()
@pytest.mark.asyncio
async def test_update_app_profile_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.UpdateAppProfileRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_app_profile_async_from_dict():
await test_update_app_profile_async(request_type=dict)
def test_update_app_profile_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.UpdateAppProfileRequest()
request.app_profile.name = "app_profile.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_app_profile), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "app_profile.name=app_profile.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_app_profile_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.UpdateAppProfileRequest()
request.app_profile.name = "app_profile.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_app_profile), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "app_profile.name=app_profile.name/value",) in kw[
"metadata"
]
def test_update_app_profile_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_app_profile(
app_profile=instance.AppProfile(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].app_profile == instance.AppProfile(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
def test_update_app_profile_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_app_profile(
bigtable_instance_admin.UpdateAppProfileRequest(),
app_profile=instance.AppProfile(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_app_profile_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_app_profile(
app_profile=instance.AppProfile(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].app_profile == instance.AppProfile(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
@pytest.mark.asyncio
async def test_update_app_profile_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_app_profile(
bigtable_instance_admin.UpdateAppProfileRequest(),
app_profile=instance.AppProfile(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_delete_app_profile(
transport: str = "grpc",
request_type=bigtable_instance_admin.DeleteAppProfileRequest,
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_app_profile_from_dict():
test_delete_app_profile(request_type=dict)
def test_delete_app_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_app_profile), "__call__"
) as call:
client.delete_app_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest()
@pytest.mark.asyncio
async def test_delete_app_profile_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.DeleteAppProfileRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_app_profile_async_from_dict():
await test_delete_app_profile_async(request_type=dict)
def test_delete_app_profile_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.DeleteAppProfileRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_app_profile), "__call__"
) as call:
call.return_value = None
client.delete_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_app_profile_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.DeleteAppProfileRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_app_profile), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_app_profile_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_app_profile(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_app_profile_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_app_profile(
bigtable_instance_admin.DeleteAppProfileRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_app_profile_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_app_profile(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_app_profile_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_app_profile(
bigtable_instance_admin.DeleteAppProfileRequest(), name="name_value",
)
def test_get_iam_policy(
transport: str = "grpc", request_type=iam_policy_pb2.GetIamPolicyRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",)
response = client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_get_iam_policy_from_dict():
test_get_iam_policy(request_type=dict)
def test_get_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
client.get_iam_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
@pytest.mark.asyncio
async def test_get_iam_policy_async(
transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(version=774, etag=b"etag_blob",)
)
response = await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_get_iam_policy_async_from_dict():
await test_get_iam_policy_async(request_type=dict)
def test_get_iam_policy_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_iam_policy_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_get_iam_policy_from_dict_foreign():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.get_iam_policy(
request={
"resource": "resource_value",
"options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
}
)
call.assert_called()
def test_get_iam_policy_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource == "resource_value"
def test_get_iam_policy_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_iam_policy(
iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.asyncio
async def test_get_iam_policy_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].resource == "resource_value"
@pytest.mark.asyncio
async def test_get_iam_policy_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_iam_policy(
iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value",
)
def test_set_iam_policy(
transport: str = "grpc", request_type=iam_policy_pb2.SetIamPolicyRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",)
response = client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_set_iam_policy_from_dict():
test_set_iam_policy(request_type=dict)
def test_set_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
client.set_iam_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
@pytest.mark.asyncio
async def test_set_iam_policy_async(
transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(version=774, etag=b"etag_blob",)
)
response = await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_set_iam_policy_async_from_dict():
await test_set_iam_policy_async(request_type=dict)
def test_set_iam_policy_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_iam_policy_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_set_iam_policy_from_dict_foreign():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.set_iam_policy(
request={
"resource": "resource_value",
"policy": policy_pb2.Policy(version=774),
}
)
call.assert_called()
def test_set_iam_policy_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.set_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource == "resource_value"
def test_set_iam_policy_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_iam_policy(
iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.asyncio
async def test_set_iam_policy_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.set_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].resource == "resource_value"
@pytest.mark.asyncio
async def test_set_iam_policy_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.set_iam_policy(
iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value",
)
def test_test_iam_permissions(
transport: str = "grpc", request_type=iam_policy_pb2.TestIamPermissionsRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
response = client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
def test_test_iam_permissions_from_dict():
test_test_iam_permissions(request_type=dict)
def test_test_iam_permissions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
client.test_iam_permissions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
@pytest.mark.asyncio
async def test_test_iam_permissions_async(
transport: str = "grpc_asyncio",
request_type=iam_policy_pb2.TestIamPermissionsRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
)
response = await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
@pytest.mark.asyncio
async def test_test_iam_permissions_async_from_dict():
await test_test_iam_permissions_async(request_type=dict)
def test_test_iam_permissions_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_test_iam_permissions_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse()
)
await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_test_iam_permissions_from_dict_foreign():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
response = client.test_iam_permissions(
request={
"resource": "resource_value",
"permissions": ["permissions_value"],
}
)
call.assert_called()
def test_test_iam_permissions_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.test_iam_permissions(
resource="resource_value", permissions=["permissions_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource == "resource_value"
assert args[0].permissions == ["permissions_value"]
def test_test_iam_permissions_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.test_iam_permissions(
iam_policy_pb2.TestIamPermissionsRequest(),
resource="resource_value",
permissions=["permissions_value"],
)
@pytest.mark.asyncio
async def test_test_iam_permissions_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.test_iam_permissions(
resource="resource_value", permissions=["permissions_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].resource == "resource_value"
assert args[0].permissions == ["permissions_value"]
@pytest.mark.asyncio
async def test_test_iam_permissions_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.test_iam_permissions(
iam_policy_pb2.TestIamPermissionsRequest(),
resource="resource_value",
permissions=["permissions_value"],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.BigtableInstanceAdminGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.BigtableInstanceAdminGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = BigtableInstanceAdminClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.BigtableInstanceAdminGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = BigtableInstanceAdminClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.BigtableInstanceAdminGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = BigtableInstanceAdminClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.BigtableInstanceAdminGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.BigtableInstanceAdminGrpcTransport,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(client.transport, transports.BigtableInstanceAdminGrpcTransport,)
def test_bigtable_instance_admin_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.BigtableInstanceAdminTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_bigtable_instance_admin_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.BigtableInstanceAdminTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_instance",
"get_instance",
"list_instances",
"update_instance",
"partial_update_instance",
"delete_instance",
"create_cluster",
"get_cluster",
"list_clusters",
"update_cluster",
"delete_cluster",
"create_app_profile",
"get_app_profile",
"list_app_profiles",
"update_app_profile",
"delete_app_profile",
"get_iam_policy",
"set_iam_policy",
"test_iam_permissions",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
@requires_google_auth_gte_1_25_0
def test_bigtable_instance_admin_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.BigtableInstanceAdminTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/bigtable.admin",
"https://www.googleapis.com/auth/bigtable.admin.cluster",
"https://www.googleapis.com/auth/bigtable.admin.instance",
"https://www.googleapis.com/auth/cloud-bigtable.admin",
"https://www.googleapis.com/auth/cloud-bigtable.admin.cluster",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_bigtable_instance_admin_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.BigtableInstanceAdminTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=(
"https://www.googleapis.com/auth/bigtable.admin",
"https://www.googleapis.com/auth/bigtable.admin.cluster",
"https://www.googleapis.com/auth/bigtable.admin.instance",
"https://www.googleapis.com/auth/cloud-bigtable.admin",
"https://www.googleapis.com/auth/cloud-bigtable.admin.cluster",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id="octopus",
)
def test_bigtable_instance_admin_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.BigtableInstanceAdminTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_bigtable_instance_admin_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
BigtableInstanceAdminClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/bigtable.admin",
"https://www.googleapis.com/auth/bigtable.admin.cluster",
"https://www.googleapis.com/auth/bigtable.admin.instance",
"https://www.googleapis.com/auth/cloud-bigtable.admin",
"https://www.googleapis.com/auth/cloud-bigtable.admin.cluster",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_bigtable_instance_admin_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
BigtableInstanceAdminClient()
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/bigtable.admin",
"https://www.googleapis.com/auth/bigtable.admin.cluster",
"https://www.googleapis.com/auth/bigtable.admin.instance",
"https://www.googleapis.com/auth/cloud-bigtable.admin",
"https://www.googleapis.com/auth/cloud-bigtable.admin.cluster",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.BigtableInstanceAdminGrpcTransport,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_bigtable_instance_admin_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/bigtable.admin",
"https://www.googleapis.com/auth/bigtable.admin.cluster",
"https://www.googleapis.com/auth/bigtable.admin.instance",
"https://www.googleapis.com/auth/cloud-bigtable.admin",
"https://www.googleapis.com/auth/cloud-bigtable.admin.cluster",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.BigtableInstanceAdminGrpcTransport,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_bigtable_instance_admin_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/bigtable.admin",
"https://www.googleapis.com/auth/bigtable.admin.cluster",
"https://www.googleapis.com/auth/bigtable.admin.instance",
"https://www.googleapis.com/auth/cloud-bigtable.admin",
"https://www.googleapis.com/auth/cloud-bigtable.admin.cluster",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.BigtableInstanceAdminGrpcTransport, grpc_helpers),
(transports.BigtableInstanceAdminGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_bigtable_instance_admin_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"bigtableadmin.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/bigtable.admin",
"https://www.googleapis.com/auth/bigtable.admin.cluster",
"https://www.googleapis.com/auth/bigtable.admin.instance",
"https://www.googleapis.com/auth/cloud-bigtable.admin",
"https://www.googleapis.com/auth/cloud-bigtable.admin.cluster",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
scopes=["1", "2"],
default_host="bigtableadmin.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.BigtableInstanceAdminGrpcTransport,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
],
)
def test_bigtable_instance_admin_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_bigtable_instance_admin_host_no_port():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="bigtableadmin.googleapis.com"
),
)
assert client.transport._host == "bigtableadmin.googleapis.com:443"
def test_bigtable_instance_admin_host_with_port():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="bigtableadmin.googleapis.com:8000"
),
)
assert client.transport._host == "bigtableadmin.googleapis.com:8000"
def test_bigtable_instance_admin_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.BigtableInstanceAdminGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_bigtable_instance_admin_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.BigtableInstanceAdminGrpcTransport,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
],
)
def test_bigtable_instance_admin_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.BigtableInstanceAdminGrpcTransport,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
],
)
def test_bigtable_instance_admin_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_bigtable_instance_admin_grpc_lro_client():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_bigtable_instance_admin_grpc_lro_async_client():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_app_profile_path():
project = "squid"
instance = "clam"
app_profile = "whelk"
expected = "projects/{project}/instances/{instance}/appProfiles/{app_profile}".format(
project=project, instance=instance, app_profile=app_profile,
)
actual = BigtableInstanceAdminClient.app_profile_path(
project, instance, app_profile
)
assert expected == actual
def test_parse_app_profile_path():
expected = {
"project": "octopus",
"instance": "oyster",
"app_profile": "nudibranch",
}
path = BigtableInstanceAdminClient.app_profile_path(**expected)
# Check that the path construction is reversible.
actual = BigtableInstanceAdminClient.parse_app_profile_path(path)
assert expected == actual
def test_cluster_path():
project = "cuttlefish"
instance = "mussel"
cluster = "winkle"
expected = "projects/{project}/instances/{instance}/clusters/{cluster}".format(
project=project, instance=instance, cluster=cluster,
)
actual = BigtableInstanceAdminClient.cluster_path(project, instance, cluster)
assert expected == actual
def test_parse_cluster_path():
expected = {
"project": "nautilus",
"instance": "scallop",
"cluster": "abalone",
}
path = BigtableInstanceAdminClient.cluster_path(**expected)
# Check that the path construction is reversible.
actual = BigtableInstanceAdminClient.parse_cluster_path(path)
assert expected == actual
def test_crypto_key_path():
project = "squid"
location = "clam"
key_ring = "whelk"
crypto_key = "octopus"
expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format(
project=project, location=location, key_ring=key_ring, crypto_key=crypto_key,
)
actual = BigtableInstanceAdminClient.crypto_key_path(
project, location, key_ring, crypto_key
)
assert expected == actual
def test_parse_crypto_key_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"key_ring": "cuttlefish",
"crypto_key": "mussel",
}
path = BigtableInstanceAdminClient.crypto_key_path(**expected)
# Check that the path construction is reversible.
actual = BigtableInstanceAdminClient.parse_crypto_key_path(path)
assert expected == actual
def test_instance_path():
project = "winkle"
instance = "nautilus"
expected = "projects/{project}/instances/{instance}".format(
project=project, instance=instance,
)
actual = BigtableInstanceAdminClient.instance_path(project, instance)
assert expected == actual
def test_parse_instance_path():
expected = {
"project": "scallop",
"instance": "abalone",
}
path = BigtableInstanceAdminClient.instance_path(**expected)
# Check that the path construction is reversible.
actual = BigtableInstanceAdminClient.parse_instance_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = BigtableInstanceAdminClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = BigtableInstanceAdminClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = BigtableInstanceAdminClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = BigtableInstanceAdminClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = BigtableInstanceAdminClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = BigtableInstanceAdminClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = BigtableInstanceAdminClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = BigtableInstanceAdminClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = BigtableInstanceAdminClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = BigtableInstanceAdminClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = BigtableInstanceAdminClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = BigtableInstanceAdminClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = BigtableInstanceAdminClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = BigtableInstanceAdminClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = BigtableInstanceAdminClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.BigtableInstanceAdminTransport, "_prep_wrapped_messages"
) as prep:
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.BigtableInstanceAdminTransport, "_prep_wrapped_messages"
) as prep:
transport_class = BigtableInstanceAdminClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
| 38.256829 | 138 | 0.696391 |
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
BigtableInstanceAdminAsyncClient,
)
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
BigtableInstanceAdminClient,
)
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import transports
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin
from google.cloud.bigtable_admin_v2.types import common
from google.cloud.bigtable_admin_v2.types import instance
from google.cloud.bigtable_admin_v2.types import instance as gba_instance
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import options_pb2
from google.iam.v1 import policy_pb2
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2
from google.type import expr_pb2
import google.auth
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert BigtableInstanceAdminClient._get_default_mtls_endpoint(None) is None
assert (
BigtableInstanceAdminClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
BigtableInstanceAdminClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
BigtableInstanceAdminClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
BigtableInstanceAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
BigtableInstanceAdminClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient,]
)
def test_bigtable_instance_admin_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "bigtableadmin.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.BigtableInstanceAdminGrpcTransport, "grpc"),
(transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_bigtable_instance_admin_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient,]
)
def test_bigtable_instance_admin_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "bigtableadmin.googleapis.com:443"
def test_bigtable_instance_admin_client_get_transport_class():
transport = BigtableInstanceAdminClient.get_transport_class()
available_transports = [
transports.BigtableInstanceAdminGrpcTransport,
]
assert transport in available_transports
transport = BigtableInstanceAdminClient.get_transport_class("grpc")
assert transport == transports.BigtableInstanceAdminGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
BigtableInstanceAdminClient,
transports.BigtableInstanceAdminGrpcTransport,
"grpc",
),
(
BigtableInstanceAdminAsyncClient,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
BigtableInstanceAdminClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(BigtableInstanceAdminClient),
)
@mock.patch.object(
BigtableInstanceAdminAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(BigtableInstanceAdminAsyncClient),
)
def test_bigtable_instance_admin_client_client_options(
client_class, transport_class, transport_name
):
with mock.patch.object(BigtableInstanceAdminClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(BigtableInstanceAdminClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
BigtableInstanceAdminClient,
transports.BigtableInstanceAdminGrpcTransport,
"grpc",
"true",
),
(
BigtableInstanceAdminAsyncClient,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
BigtableInstanceAdminClient,
transports.BigtableInstanceAdminGrpcTransport,
"grpc",
"false",
),
(
BigtableInstanceAdminAsyncClient,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
BigtableInstanceAdminClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(BigtableInstanceAdminClient),
)
@mock.patch.object(
BigtableInstanceAdminAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(BigtableInstanceAdminAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_bigtable_instance_admin_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
BigtableInstanceAdminClient,
transports.BigtableInstanceAdminGrpcTransport,
"grpc",
),
(
BigtableInstanceAdminAsyncClient,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_bigtable_instance_admin_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
BigtableInstanceAdminClient,
transports.BigtableInstanceAdminGrpcTransport,
"grpc",
),
(
BigtableInstanceAdminAsyncClient,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_bigtable_instance_admin_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_bigtable_instance_admin_client_client_options_from_dict():
with mock.patch(
"google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = BigtableInstanceAdminClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_create_instance(
transport: str = "grpc", request_type=bigtable_instance_admin.CreateInstanceRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.CreateInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_instance_from_dict():
test_create_instance(request_type=dict)
def test_create_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
client.create_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.CreateInstanceRequest()
@pytest.mark.asyncio
async def test_create_instance_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.CreateInstanceRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.CreateInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_instance_async_from_dict():
await test_create_instance_async(request_type=dict)
def test_create_instance_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.CreateInstanceRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_instance_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.CreateInstanceRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_instance_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_instance(
parent="parent_value",
instance_id="instance_id_value",
instance=gba_instance.Instance(name="name_value"),
clusters={"key_value": gba_instance.Cluster(name="name_value")},
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].instance_id == "instance_id_value"
assert args[0].instance == gba_instance.Instance(name="name_value")
assert args[0].clusters == {
"key_value": gba_instance.Cluster(name="name_value")
}
def test_create_instance_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_instance(
bigtable_instance_admin.CreateInstanceRequest(),
parent="parent_value",
instance_id="instance_id_value",
instance=gba_instance.Instance(name="name_value"),
clusters={"key_value": gba_instance.Cluster(name="name_value")},
)
@pytest.mark.asyncio
async def test_create_instance_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_instance(
parent="parent_value",
instance_id="instance_id_value",
instance=gba_instance.Instance(name="name_value"),
clusters={"key_value": gba_instance.Cluster(name="name_value")},
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].instance_id == "instance_id_value"
assert args[0].instance == gba_instance.Instance(name="name_value")
assert args[0].clusters == {
"key_value": gba_instance.Cluster(name="name_value")
}
@pytest.mark.asyncio
async def test_create_instance_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_instance(
bigtable_instance_admin.CreateInstanceRequest(),
parent="parent_value",
instance_id="instance_id_value",
instance=gba_instance.Instance(name="name_value"),
clusters={"key_value": gba_instance.Cluster(name="name_value")},
)
def test_get_instance(
transport: str = "grpc", request_type=bigtable_instance_admin.GetInstanceRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.Instance(
name="name_value",
display_name="display_name_value",
state=instance.Instance.State.READY,
type_=instance.Instance.Type.PRODUCTION,
)
response = client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.GetInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.Instance)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == instance.Instance.State.READY
assert response.type_ == instance.Instance.Type.PRODUCTION
def test_get_instance_from_dict():
test_get_instance(request_type=dict)
def test_get_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
client.get_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.GetInstanceRequest()
@pytest.mark.asyncio
async def test_get_instance_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.GetInstanceRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
instance.Instance(
name="name_value",
display_name="display_name_value",
state=instance.Instance.State.READY,
type_=instance.Instance.Type.PRODUCTION,
)
)
response = await client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.GetInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.Instance)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == instance.Instance.State.READY
assert response.type_ == instance.Instance.Type.PRODUCTION
@pytest.mark.asyncio
async def test_get_instance_async_from_dict():
await test_get_instance_async(request_type=dict)
def test_get_instance_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.GetInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
call.return_value = instance.Instance()
client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_instance_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.GetInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance())
await client.get_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_instance_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.Instance()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_instance(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_instance_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_instance(
bigtable_instance_admin.GetInstanceRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_instance_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.Instance()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_instance(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_instance_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_instance(
bigtable_instance_admin.GetInstanceRequest(), name="name_value",
)
def test_list_instances(
transport: str = "grpc", request_type=bigtable_instance_admin.ListInstancesRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = bigtable_instance_admin.ListInstancesResponse(
failed_locations=["failed_locations_value"],
next_page_token="next_page_token_value",
)
response = client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.ListInstancesRequest()
# Establish that the response is the type that we expect.
assert response.raw_page is response
assert isinstance(response, bigtable_instance_admin.ListInstancesResponse)
assert response.failed_locations == ["failed_locations_value"]
assert response.next_page_token == "next_page_token_value"
def test_list_instances_from_dict():
test_list_instances(request_type=dict)
def test_list_instances_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
client.list_instances()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.ListInstancesRequest()
@pytest.mark.asyncio
async def test_list_instances_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.ListInstancesRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigtable_instance_admin.ListInstancesResponse(
failed_locations=["failed_locations_value"],
next_page_token="next_page_token_value",
)
)
response = await client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.ListInstancesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, bigtable_instance_admin.ListInstancesResponse)
assert response.failed_locations == ["failed_locations_value"]
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_instances_async_from_dict():
await test_list_instances_async(request_type=dict)
def test_list_instances_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.ListInstancesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
call.return_value = bigtable_instance_admin.ListInstancesResponse()
client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_instances_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.ListInstancesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigtable_instance_admin.ListInstancesResponse()
)
await client.list_instances(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_instances_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = bigtable_instance_admin.ListInstancesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_instances(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_instances_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_instances(
bigtable_instance_admin.ListInstancesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_instances_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = bigtable_instance_admin.ListInstancesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigtable_instance_admin.ListInstancesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_instances(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_instances_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_instances(
bigtable_instance_admin.ListInstancesRequest(), parent="parent_value",
)
def test_update_instance(transport: str = "grpc", request_type=instance.Instance):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.Instance(
name="name_value",
display_name="display_name_value",
state=instance.Instance.State.READY,
type_=instance.Instance.Type.PRODUCTION,
)
response = client.update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == instance.Instance()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.Instance)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == instance.Instance.State.READY
assert response.type_ == instance.Instance.Type.PRODUCTION
def test_update_instance_from_dict():
test_update_instance(request_type=dict)
def test_update_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
client.update_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == instance.Instance()
@pytest.mark.asyncio
async def test_update_instance_async(
transport: str = "grpc_asyncio", request_type=instance.Instance
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
instance.Instance(
name="name_value",
display_name="display_name_value",
state=instance.Instance.State.READY,
type_=instance.Instance.Type.PRODUCTION,
)
)
response = await client.update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == instance.Instance()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.Instance)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == instance.Instance.State.READY
assert response.type_ == instance.Instance.Type.PRODUCTION
@pytest.mark.asyncio
async def test_update_instance_async_from_dict():
await test_update_instance_async(request_type=dict)
def test_update_instance_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = instance.Instance()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
call.return_value = instance.Instance()
client.update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_instance_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = instance.Instance()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance())
await client.update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_partial_update_instance(
transport: str = "grpc",
request_type=bigtable_instance_admin.PartialUpdateInstanceRequest,
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partial_update_instance), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.partial_update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_partial_update_instance_from_dict():
test_partial_update_instance(request_type=dict)
def test_partial_update_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partial_update_instance), "__call__"
) as call:
client.partial_update_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest()
@pytest.mark.asyncio
async def test_partial_update_instance_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.PartialUpdateInstanceRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partial_update_instance), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.partial_update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_partial_update_instance_async_from_dict():
await test_partial_update_instance_async(request_type=dict)
def test_partial_update_instance_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.PartialUpdateInstanceRequest()
request.instance.name = "instance.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partial_update_instance), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.partial_update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "instance.name=instance.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_partial_update_instance_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.PartialUpdateInstanceRequest()
request.instance.name = "instance.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partial_update_instance), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.partial_update_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "instance.name=instance.name/value",) in kw[
"metadata"
]
def test_partial_update_instance_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partial_update_instance), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.partial_update_instance(
instance=gba_instance.Instance(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].instance == gba_instance.Instance(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
def test_partial_update_instance_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.partial_update_instance(
bigtable_instance_admin.PartialUpdateInstanceRequest(),
instance=gba_instance.Instance(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_partial_update_instance_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partial_update_instance), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.partial_update_instance(
instance=gba_instance.Instance(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].instance == gba_instance.Instance(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
@pytest.mark.asyncio
async def test_partial_update_instance_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.partial_update_instance(
bigtable_instance_admin.PartialUpdateInstanceRequest(),
instance=gba_instance.Instance(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_delete_instance(
transport: str = "grpc", request_type=bigtable_instance_admin.DeleteInstanceRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.DeleteInstanceRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_instance_from_dict():
test_delete_instance(request_type=dict)
def test_delete_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
client.delete_instance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.DeleteInstanceRequest()
@pytest.mark.asyncio
async def test_delete_instance_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.DeleteInstanceRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.DeleteInstanceRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_instance_async_from_dict():
await test_delete_instance_async(request_type=dict)
def test_delete_instance_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.DeleteInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
call.return_value = None
client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_instance_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.DeleteInstanceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_instance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_instance_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_instance(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_instance_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_instance(
bigtable_instance_admin.DeleteInstanceRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_instance_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_instance(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_instance_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_instance(
bigtable_instance_admin.DeleteInstanceRequest(), name="name_value",
)
def test_create_cluster(
transport: str = "grpc", request_type=bigtable_instance_admin.CreateClusterRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.CreateClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_cluster_from_dict():
test_create_cluster(request_type=dict)
def test_create_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
client.create_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.CreateClusterRequest()
@pytest.mark.asyncio
async def test_create_cluster_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.CreateClusterRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.CreateClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_cluster_async_from_dict():
await test_create_cluster_async(request_type=dict)
def test_create_cluster_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.CreateClusterRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_cluster_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.CreateClusterRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_cluster_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_cluster(
parent="parent_value",
cluster_id="cluster_id_value",
cluster=instance.Cluster(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].cluster == instance.Cluster(name="name_value")
def test_create_cluster_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_cluster(
bigtable_instance_admin.CreateClusterRequest(),
parent="parent_value",
cluster_id="cluster_id_value",
cluster=instance.Cluster(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_cluster_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_cluster(
parent="parent_value",
cluster_id="cluster_id_value",
cluster=instance.Cluster(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].cluster_id == "cluster_id_value"
assert args[0].cluster == instance.Cluster(name="name_value")
@pytest.mark.asyncio
async def test_create_cluster_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_cluster(
bigtable_instance_admin.CreateClusterRequest(),
parent="parent_value",
cluster_id="cluster_id_value",
cluster=instance.Cluster(name="name_value"),
)
def test_get_cluster(
transport: str = "grpc", request_type=bigtable_instance_admin.GetClusterRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.Cluster(
name="name_value",
location="location_value",
state=instance.Cluster.State.READY,
serve_nodes=1181,
default_storage_type=common.StorageType.SSD,
)
response = client.get_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.GetClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.Cluster)
assert response.name == "name_value"
assert response.location == "location_value"
assert response.state == instance.Cluster.State.READY
assert response.serve_nodes == 1181
assert response.default_storage_type == common.StorageType.SSD
def test_get_cluster_from_dict():
test_get_cluster(request_type=dict)
def test_get_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
client.get_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.GetClusterRequest()
@pytest.mark.asyncio
async def test_get_cluster_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.GetClusterRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
instance.Cluster(
name="name_value",
location="location_value",
state=instance.Cluster.State.READY,
serve_nodes=1181,
default_storage_type=common.StorageType.SSD,
)
)
response = await client.get_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.GetClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.Cluster)
assert response.name == "name_value"
assert response.location == "location_value"
assert response.state == instance.Cluster.State.READY
assert response.serve_nodes == 1181
assert response.default_storage_type == common.StorageType.SSD
@pytest.mark.asyncio
async def test_get_cluster_async_from_dict():
await test_get_cluster_async(request_type=dict)
def test_get_cluster_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.GetClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
call.return_value = instance.Cluster()
client.get_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_cluster_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.GetClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Cluster())
await client.get_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_cluster_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.Cluster()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_cluster_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_cluster(
bigtable_instance_admin.GetClusterRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_cluster_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.Cluster()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Cluster())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_cluster_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_cluster(
bigtable_instance_admin.GetClusterRequest(), name="name_value",
)
def test_list_clusters(
transport: str = "grpc", request_type=bigtable_instance_admin.ListClustersRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = bigtable_instance_admin.ListClustersResponse(
failed_locations=["failed_locations_value"],
next_page_token="next_page_token_value",
)
response = client.list_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.ListClustersRequest()
# Establish that the response is the type that we expect.
assert response.raw_page is response
assert isinstance(response, bigtable_instance_admin.ListClustersResponse)
assert response.failed_locations == ["failed_locations_value"]
assert response.next_page_token == "next_page_token_value"
def test_list_clusters_from_dict():
test_list_clusters(request_type=dict)
def test_list_clusters_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
client.list_clusters()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.ListClustersRequest()
@pytest.mark.asyncio
async def test_list_clusters_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.ListClustersRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigtable_instance_admin.ListClustersResponse(
failed_locations=["failed_locations_value"],
next_page_token="next_page_token_value",
)
)
response = await client.list_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.ListClustersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, bigtable_instance_admin.ListClustersResponse)
assert response.failed_locations == ["failed_locations_value"]
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_clusters_async_from_dict():
await test_list_clusters_async(request_type=dict)
def test_list_clusters_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.ListClustersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
call.return_value = bigtable_instance_admin.ListClustersResponse()
client.list_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_clusters_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.ListClustersRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigtable_instance_admin.ListClustersResponse()
)
await client.list_clusters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_clusters_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = bigtable_instance_admin.ListClustersResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_clusters(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_clusters_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_clusters(
bigtable_instance_admin.ListClustersRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_clusters_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = bigtable_instance_admin.ListClustersResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigtable_instance_admin.ListClustersResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_clusters(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_clusters_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_clusters(
bigtable_instance_admin.ListClustersRequest(), parent="parent_value",
)
def test_update_cluster(transport: str = "grpc", request_type=instance.Cluster):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == instance.Cluster()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_cluster_from_dict():
test_update_cluster(request_type=dict)
def test_update_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
client.update_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == instance.Cluster()
@pytest.mark.asyncio
async def test_update_cluster_async(
transport: str = "grpc_asyncio", request_type=instance.Cluster
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == instance.Cluster()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_cluster_async_from_dict():
await test_update_cluster_async(request_type=dict)
def test_update_cluster_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = instance.Cluster()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_cluster_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = instance.Cluster()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_cluster(
transport: str = "grpc", request_type=bigtable_instance_admin.DeleteClusterRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.DeleteClusterRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_cluster_from_dict():
test_delete_cluster(request_type=dict)
def test_delete_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
client.delete_cluster()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.DeleteClusterRequest()
@pytest.mark.asyncio
async def test_delete_cluster_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.DeleteClusterRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.DeleteClusterRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_cluster_async_from_dict():
await test_delete_cluster_async(request_type=dict)
def test_delete_cluster_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.DeleteClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
call.return_value = None
client.delete_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_cluster_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.DeleteClusterRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_cluster(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_cluster_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_cluster_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_cluster(
bigtable_instance_admin.DeleteClusterRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_cluster_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_cluster(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_cluster_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_cluster(
bigtable_instance_admin.DeleteClusterRequest(), name="name_value",
)
def test_create_app_profile(
transport: str = "grpc",
request_type=bigtable_instance_admin.CreateAppProfileRequest,
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = instance.AppProfile(
name="name_value",
etag="etag_value",
description="description_value",
multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny(
cluster_ids=["cluster_ids_value"]
),
)
response = client.create_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.CreateAppProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.AppProfile)
assert response.name == "name_value"
assert response.etag == "etag_value"
assert response.description == "description_value"
def test_create_app_profile_from_dict():
test_create_app_profile(request_type=dict)
def test_create_app_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_app_profile), "__call__"
) as call:
client.create_app_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.CreateAppProfileRequest()
@pytest.mark.asyncio
async def test_create_app_profile_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.CreateAppProfileRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
instance.AppProfile(
name="name_value", etag="etag_value", description="description_value",
)
)
response = await client.create_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.CreateAppProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.AppProfile)
assert response.name == "name_value"
assert response.etag == "etag_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_create_app_profile_async_from_dict():
await test_create_app_profile_async(request_type=dict)
def test_create_app_profile_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.CreateAppProfileRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_app_profile), "__call__"
) as call:
call.return_value = instance.AppProfile()
client.create_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_app_profile_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.CreateAppProfileRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_app_profile), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile())
await client.create_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_app_profile_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = instance.AppProfile()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_app_profile(
parent="parent_value",
app_profile_id="app_profile_id_value",
app_profile=instance.AppProfile(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].app_profile_id == "app_profile_id_value"
assert args[0].app_profile == instance.AppProfile(name="name_value")
def test_create_app_profile_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_app_profile(
bigtable_instance_admin.CreateAppProfileRequest(),
parent="parent_value",
app_profile_id="app_profile_id_value",
app_profile=instance.AppProfile(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_app_profile_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = instance.AppProfile()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_app_profile(
parent="parent_value",
app_profile_id="app_profile_id_value",
app_profile=instance.AppProfile(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].app_profile_id == "app_profile_id_value"
assert args[0].app_profile == instance.AppProfile(name="name_value")
@pytest.mark.asyncio
async def test_create_app_profile_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_app_profile(
bigtable_instance_admin.CreateAppProfileRequest(),
parent="parent_value",
app_profile_id="app_profile_id_value",
app_profile=instance.AppProfile(name="name_value"),
)
def test_get_app_profile(
transport: str = "grpc", request_type=bigtable_instance_admin.GetAppProfileRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.AppProfile(
name="name_value",
etag="etag_value",
description="description_value",
multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny(
cluster_ids=["cluster_ids_value"]
),
)
response = client.get_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.GetAppProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.AppProfile)
assert response.name == "name_value"
assert response.etag == "etag_value"
assert response.description == "description_value"
def test_get_app_profile_from_dict():
test_get_app_profile(request_type=dict)
def test_get_app_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call:
client.get_app_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.GetAppProfileRequest()
@pytest.mark.asyncio
async def test_get_app_profile_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.GetAppProfileRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
instance.AppProfile(
name="name_value", etag="etag_value", description="description_value",
)
)
response = await client.get_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.GetAppProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, instance.AppProfile)
assert response.name == "name_value"
assert response.etag == "etag_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_app_profile_async_from_dict():
await test_get_app_profile_async(request_type=dict)
def test_get_app_profile_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.GetAppProfileRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call:
call.return_value = instance.AppProfile()
client.get_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_app_profile_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.GetAppProfileRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile())
await client.get_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_app_profile_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.AppProfile()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_app_profile(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_app_profile_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_app_profile(
bigtable_instance_admin.GetAppProfileRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_app_profile_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = instance.AppProfile()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_app_profile(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_app_profile_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_app_profile(
bigtable_instance_admin.GetAppProfileRequest(), name="name_value",
)
def test_list_app_profiles(
transport: str = "grpc", request_type=bigtable_instance_admin.ListAppProfilesRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = bigtable_instance_admin.ListAppProfilesResponse(
next_page_token="next_page_token_value",
failed_locations=["failed_locations_value"],
)
response = client.list_app_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.ListAppProfilesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAppProfilesPager)
assert response.next_page_token == "next_page_token_value"
assert response.failed_locations == ["failed_locations_value"]
def test_list_app_profiles_from_dict():
test_list_app_profiles(request_type=dict)
def test_list_app_profiles_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles), "__call__"
) as call:
client.list_app_profiles()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.ListAppProfilesRequest()
@pytest.mark.asyncio
async def test_list_app_profiles_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.ListAppProfilesRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigtable_instance_admin.ListAppProfilesResponse(
next_page_token="next_page_token_value",
failed_locations=["failed_locations_value"],
)
)
response = await client.list_app_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.ListAppProfilesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAppProfilesAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.failed_locations == ["failed_locations_value"]
@pytest.mark.asyncio
async def test_list_app_profiles_async_from_dict():
await test_list_app_profiles_async(request_type=dict)
def test_list_app_profiles_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.ListAppProfilesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles), "__call__"
) as call:
call.return_value = bigtable_instance_admin.ListAppProfilesResponse()
client.list_app_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_app_profiles_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.ListAppProfilesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigtable_instance_admin.ListAppProfilesResponse()
)
await client.list_app_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_app_profiles_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = bigtable_instance_admin.ListAppProfilesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_app_profiles(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_app_profiles_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_app_profiles(
bigtable_instance_admin.ListAppProfilesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_app_profiles_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = bigtable_instance_admin.ListAppProfilesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigtable_instance_admin.ListAppProfilesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_app_profiles(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_app_profiles_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_app_profiles(
bigtable_instance_admin.ListAppProfilesRequest(), parent="parent_value",
)
def test_list_app_profiles_pager():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[
instance.AppProfile(),
instance.AppProfile(),
instance.AppProfile(),
],
next_page_token="abc",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[], next_page_token="def",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[instance.AppProfile(),], next_page_token="ghi",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[instance.AppProfile(), instance.AppProfile(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_app_profiles(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, instance.AppProfile) for i in results)
def test_list_app_profiles_pages():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[
instance.AppProfile(),
instance.AppProfile(),
instance.AppProfile(),
],
next_page_token="abc",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[], next_page_token="def",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[instance.AppProfile(),], next_page_token="ghi",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[instance.AppProfile(), instance.AppProfile(),],
),
RuntimeError,
)
pages = list(client.list_app_profiles(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_app_profiles_async_pager():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[
instance.AppProfile(),
instance.AppProfile(),
instance.AppProfile(),
],
next_page_token="abc",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[], next_page_token="def",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[instance.AppProfile(),], next_page_token="ghi",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[instance.AppProfile(), instance.AppProfile(),],
),
RuntimeError,
)
async_pager = await client.list_app_profiles(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, instance.AppProfile) for i in responses)
@pytest.mark.asyncio
async def test_list_app_profiles_async_pages():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_app_profiles),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[
instance.AppProfile(),
instance.AppProfile(),
instance.AppProfile(),
],
next_page_token="abc",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[], next_page_token="def",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[instance.AppProfile(),], next_page_token="ghi",
),
bigtable_instance_admin.ListAppProfilesResponse(
app_profiles=[instance.AppProfile(), instance.AppProfile(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_app_profiles(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_update_app_profile(
transport: str = "grpc",
request_type=bigtable_instance_admin.UpdateAppProfileRequest,
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_app_profile_from_dict():
test_update_app_profile(request_type=dict)
def test_update_app_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_app_profile), "__call__"
) as call:
client.update_app_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest()
@pytest.mark.asyncio
async def test_update_app_profile_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.UpdateAppProfileRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_app_profile_async_from_dict():
await test_update_app_profile_async(request_type=dict)
def test_update_app_profile_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.UpdateAppProfileRequest()
request.app_profile.name = "app_profile.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_app_profile), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "app_profile.name=app_profile.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_app_profile_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.UpdateAppProfileRequest()
request.app_profile.name = "app_profile.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_app_profile), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "app_profile.name=app_profile.name/value",) in kw[
"metadata"
]
def test_update_app_profile_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_app_profile(
app_profile=instance.AppProfile(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].app_profile == instance.AppProfile(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
def test_update_app_profile_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_app_profile(
bigtable_instance_admin.UpdateAppProfileRequest(),
app_profile=instance.AppProfile(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_app_profile_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_app_profile(
app_profile=instance.AppProfile(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].app_profile == instance.AppProfile(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
@pytest.mark.asyncio
async def test_update_app_profile_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_app_profile(
bigtable_instance_admin.UpdateAppProfileRequest(),
app_profile=instance.AppProfile(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_delete_app_profile(
transport: str = "grpc",
request_type=bigtable_instance_admin.DeleteAppProfileRequest,
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_app_profile_from_dict():
test_delete_app_profile(request_type=dict)
def test_delete_app_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_app_profile), "__call__"
) as call:
client.delete_app_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest()
@pytest.mark.asyncio
async def test_delete_app_profile_async(
transport: str = "grpc_asyncio",
request_type=bigtable_instance_admin.DeleteAppProfileRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_app_profile_async_from_dict():
await test_delete_app_profile_async(request_type=dict)
def test_delete_app_profile_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.DeleteAppProfileRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_app_profile), "__call__"
) as call:
call.return_value = None
client.delete_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_app_profile_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_instance_admin.DeleteAppProfileRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_app_profile), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_app_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_app_profile_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_app_profile(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_app_profile_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_app_profile(
bigtable_instance_admin.DeleteAppProfileRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_app_profile_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_app_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_app_profile(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_app_profile_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_app_profile(
bigtable_instance_admin.DeleteAppProfileRequest(), name="name_value",
)
def test_get_iam_policy(
transport: str = "grpc", request_type=iam_policy_pb2.GetIamPolicyRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",)
response = client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_get_iam_policy_from_dict():
test_get_iam_policy(request_type=dict)
def test_get_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
client.get_iam_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
@pytest.mark.asyncio
async def test_get_iam_policy_async(
transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(version=774, etag=b"etag_blob",)
)
response = await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_get_iam_policy_async_from_dict():
await test_get_iam_policy_async(request_type=dict)
def test_get_iam_policy_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_iam_policy_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_get_iam_policy_from_dict_foreign():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.get_iam_policy(
request={
"resource": "resource_value",
"options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
}
)
call.assert_called()
def test_get_iam_policy_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource == "resource_value"
def test_get_iam_policy_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_iam_policy(
iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.asyncio
async def test_get_iam_policy_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].resource == "resource_value"
@pytest.mark.asyncio
async def test_get_iam_policy_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_iam_policy(
iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value",
)
def test_set_iam_policy(
transport: str = "grpc", request_type=iam_policy_pb2.SetIamPolicyRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",)
response = client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_set_iam_policy_from_dict():
test_set_iam_policy(request_type=dict)
def test_set_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
client.set_iam_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
@pytest.mark.asyncio
async def test_set_iam_policy_async(
transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(version=774, etag=b"etag_blob",)
)
response = await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_set_iam_policy_async_from_dict():
await test_set_iam_policy_async(request_type=dict)
def test_set_iam_policy_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_iam_policy_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_set_iam_policy_from_dict_foreign():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.set_iam_policy(
request={
"resource": "resource_value",
"policy": policy_pb2.Policy(version=774),
}
)
call.assert_called()
def test_set_iam_policy_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.set_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource == "resource_value"
def test_set_iam_policy_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_iam_policy(
iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.asyncio
async def test_set_iam_policy_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.set_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].resource == "resource_value"
@pytest.mark.asyncio
async def test_set_iam_policy_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.set_iam_policy(
iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value",
)
def test_test_iam_permissions(
transport: str = "grpc", request_type=iam_policy_pb2.TestIamPermissionsRequest
):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
response = client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
def test_test_iam_permissions_from_dict():
test_test_iam_permissions(request_type=dict)
def test_test_iam_permissions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
client.test_iam_permissions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
@pytest.mark.asyncio
async def test_test_iam_permissions_async(
transport: str = "grpc_asyncio",
request_type=iam_policy_pb2.TestIamPermissionsRequest,
):
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
)
response = await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
@pytest.mark.asyncio
async def test_test_iam_permissions_async_from_dict():
await test_test_iam_permissions_async(request_type=dict)
def test_test_iam_permissions_field_headers():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_test_iam_permissions_field_headers_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse()
)
await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_test_iam_permissions_from_dict_foreign():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
response = client.test_iam_permissions(
request={
"resource": "resource_value",
"permissions": ["permissions_value"],
}
)
call.assert_called()
def test_test_iam_permissions_flattened():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.test_iam_permissions(
resource="resource_value", permissions=["permissions_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource == "resource_value"
assert args[0].permissions == ["permissions_value"]
def test_test_iam_permissions_flattened_error():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.test_iam_permissions(
iam_policy_pb2.TestIamPermissionsRequest(),
resource="resource_value",
permissions=["permissions_value"],
)
@pytest.mark.asyncio
async def test_test_iam_permissions_flattened_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.test_iam_permissions(
resource="resource_value", permissions=["permissions_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].resource == "resource_value"
assert args[0].permissions == ["permissions_value"]
@pytest.mark.asyncio
async def test_test_iam_permissions_flattened_error_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.test_iam_permissions(
iam_policy_pb2.TestIamPermissionsRequest(),
resource="resource_value",
permissions=["permissions_value"],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.BigtableInstanceAdminGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.BigtableInstanceAdminGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = BigtableInstanceAdminClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.BigtableInstanceAdminGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = BigtableInstanceAdminClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.BigtableInstanceAdminGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = BigtableInstanceAdminClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.BigtableInstanceAdminGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.BigtableInstanceAdminGrpcTransport,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(client.transport, transports.BigtableInstanceAdminGrpcTransport,)
def test_bigtable_instance_admin_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.BigtableInstanceAdminTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_bigtable_instance_admin_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.BigtableInstanceAdminTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_instance",
"get_instance",
"list_instances",
"update_instance",
"partial_update_instance",
"delete_instance",
"create_cluster",
"get_cluster",
"list_clusters",
"update_cluster",
"delete_cluster",
"create_app_profile",
"get_app_profile",
"list_app_profiles",
"update_app_profile",
"delete_app_profile",
"get_iam_policy",
"set_iam_policy",
"test_iam_permissions",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
@requires_google_auth_gte_1_25_0
def test_bigtable_instance_admin_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.BigtableInstanceAdminTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/bigtable.admin",
"https://www.googleapis.com/auth/bigtable.admin.cluster",
"https://www.googleapis.com/auth/bigtable.admin.instance",
"https://www.googleapis.com/auth/cloud-bigtable.admin",
"https://www.googleapis.com/auth/cloud-bigtable.admin.cluster",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_bigtable_instance_admin_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.BigtableInstanceAdminTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=(
"https://www.googleapis.com/auth/bigtable.admin",
"https://www.googleapis.com/auth/bigtable.admin.cluster",
"https://www.googleapis.com/auth/bigtable.admin.instance",
"https://www.googleapis.com/auth/cloud-bigtable.admin",
"https://www.googleapis.com/auth/cloud-bigtable.admin.cluster",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id="octopus",
)
def test_bigtable_instance_admin_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.BigtableInstanceAdminTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_bigtable_instance_admin_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
BigtableInstanceAdminClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/bigtable.admin",
"https://www.googleapis.com/auth/bigtable.admin.cluster",
"https://www.googleapis.com/auth/bigtable.admin.instance",
"https://www.googleapis.com/auth/cloud-bigtable.admin",
"https://www.googleapis.com/auth/cloud-bigtable.admin.cluster",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_bigtable_instance_admin_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
BigtableInstanceAdminClient()
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/bigtable.admin",
"https://www.googleapis.com/auth/bigtable.admin.cluster",
"https://www.googleapis.com/auth/bigtable.admin.instance",
"https://www.googleapis.com/auth/cloud-bigtable.admin",
"https://www.googleapis.com/auth/cloud-bigtable.admin.cluster",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.BigtableInstanceAdminGrpcTransport,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_bigtable_instance_admin_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/bigtable.admin",
"https://www.googleapis.com/auth/bigtable.admin.cluster",
"https://www.googleapis.com/auth/bigtable.admin.instance",
"https://www.googleapis.com/auth/cloud-bigtable.admin",
"https://www.googleapis.com/auth/cloud-bigtable.admin.cluster",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.BigtableInstanceAdminGrpcTransport,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_bigtable_instance_admin_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/bigtable.admin",
"https://www.googleapis.com/auth/bigtable.admin.cluster",
"https://www.googleapis.com/auth/bigtable.admin.instance",
"https://www.googleapis.com/auth/cloud-bigtable.admin",
"https://www.googleapis.com/auth/cloud-bigtable.admin.cluster",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.BigtableInstanceAdminGrpcTransport, grpc_helpers),
(transports.BigtableInstanceAdminGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_bigtable_instance_admin_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"bigtableadmin.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/bigtable.admin",
"https://www.googleapis.com/auth/bigtable.admin.cluster",
"https://www.googleapis.com/auth/bigtable.admin.instance",
"https://www.googleapis.com/auth/cloud-bigtable.admin",
"https://www.googleapis.com/auth/cloud-bigtable.admin.cluster",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
scopes=["1", "2"],
default_host="bigtableadmin.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.BigtableInstanceAdminGrpcTransport,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
],
)
def test_bigtable_instance_admin_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_bigtable_instance_admin_host_no_port():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="bigtableadmin.googleapis.com"
),
)
assert client.transport._host == "bigtableadmin.googleapis.com:443"
def test_bigtable_instance_admin_host_with_port():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="bigtableadmin.googleapis.com:8000"
),
)
assert client.transport._host == "bigtableadmin.googleapis.com:8000"
def test_bigtable_instance_admin_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.BigtableInstanceAdminGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_bigtable_instance_admin_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.BigtableInstanceAdminGrpcTransport,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
],
)
def test_bigtable_instance_admin_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.BigtableInstanceAdminGrpcTransport,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
],
)
def test_bigtable_instance_admin_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_bigtable_instance_admin_grpc_lro_client():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_bigtable_instance_admin_grpc_lro_async_client():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_app_profile_path():
project = "squid"
instance = "clam"
app_profile = "whelk"
expected = "projects/{project}/instances/{instance}/appProfiles/{app_profile}".format(
project=project, instance=instance, app_profile=app_profile,
)
actual = BigtableInstanceAdminClient.app_profile_path(
project, instance, app_profile
)
assert expected == actual
def test_parse_app_profile_path():
expected = {
"project": "octopus",
"instance": "oyster",
"app_profile": "nudibranch",
}
path = BigtableInstanceAdminClient.app_profile_path(**expected)
# Check that the path construction is reversible.
actual = BigtableInstanceAdminClient.parse_app_profile_path(path)
assert expected == actual
def test_cluster_path():
project = "cuttlefish"
instance = "mussel"
cluster = "winkle"
expected = "projects/{project}/instances/{instance}/clusters/{cluster}".format(
project=project, instance=instance, cluster=cluster,
)
actual = BigtableInstanceAdminClient.cluster_path(project, instance, cluster)
assert expected == actual
def test_parse_cluster_path():
expected = {
"project": "nautilus",
"instance": "scallop",
"cluster": "abalone",
}
path = BigtableInstanceAdminClient.cluster_path(**expected)
# Check that the path construction is reversible.
actual = BigtableInstanceAdminClient.parse_cluster_path(path)
assert expected == actual
def test_crypto_key_path():
project = "squid"
location = "clam"
key_ring = "whelk"
crypto_key = "octopus"
expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format(
project=project, location=location, key_ring=key_ring, crypto_key=crypto_key,
)
actual = BigtableInstanceAdminClient.crypto_key_path(
project, location, key_ring, crypto_key
)
assert expected == actual
def test_parse_crypto_key_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"key_ring": "cuttlefish",
"crypto_key": "mussel",
}
path = BigtableInstanceAdminClient.crypto_key_path(**expected)
# Check that the path construction is reversible.
actual = BigtableInstanceAdminClient.parse_crypto_key_path(path)
assert expected == actual
def test_instance_path():
project = "winkle"
instance = "nautilus"
expected = "projects/{project}/instances/{instance}".format(
project=project, instance=instance,
)
actual = BigtableInstanceAdminClient.instance_path(project, instance)
assert expected == actual
def test_parse_instance_path():
expected = {
"project": "scallop",
"instance": "abalone",
}
path = BigtableInstanceAdminClient.instance_path(**expected)
# Check that the path construction is reversible.
actual = BigtableInstanceAdminClient.parse_instance_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = BigtableInstanceAdminClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = BigtableInstanceAdminClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = BigtableInstanceAdminClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = BigtableInstanceAdminClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = BigtableInstanceAdminClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = BigtableInstanceAdminClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = BigtableInstanceAdminClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = BigtableInstanceAdminClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = BigtableInstanceAdminClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = BigtableInstanceAdminClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = BigtableInstanceAdminClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = BigtableInstanceAdminClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = BigtableInstanceAdminClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = BigtableInstanceAdminClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = BigtableInstanceAdminClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.BigtableInstanceAdminTransport, "_prep_wrapped_messages"
) as prep:
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.BigtableInstanceAdminTransport, "_prep_wrapped_messages"
) as prep:
transport_class = BigtableInstanceAdminClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = BigtableInstanceAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
| true | true |
f7fb22471c1dc26094f9fab0f04f6eeabc78b20a | 2,263 | py | Python | tests/test_bytecode.py | 4577/Jawa | 23f93020ef6687567e45a9afa09bfd6e0faf6f0a | [
"MIT"
] | null | null | null | tests/test_bytecode.py | 4577/Jawa | 23f93020ef6687567e45a9afa09bfd6e0faf6f0a | [
"MIT"
] | null | null | null | tests/test_bytecode.py | 4577/Jawa | 23f93020ef6687567e45a9afa09bfd6e0faf6f0a | [
"MIT"
] | 1 | 2021-01-21T12:17:39.000Z | 2021-01-21T12:17:39.000Z | from jawa.util.bytecode import Instruction, Operand, OperandTypes
GOOD_TABLE_SWITCH = [
Instruction(mnemonic='iconst_1', opcode=4, operands=[], pos=0),
Instruction(mnemonic='tableswitch', opcode=170, operands=[
# DEFAULT
Operand(OperandTypes.BRANCH, value=30),
# LOW
Operand(OperandTypes.LITERAL, value=1),
# HIGH
Operand(OperandTypes.LITERAL, value=3),
# TABLE
Operand(OperandTypes.BRANCH, value=27),
Operand(OperandTypes.BRANCH, value=28),
Operand(OperandTypes.BRANCH, value=29)
], pos=1),
Instruction(mnemonic='return', opcode=177, operands=[], pos=28),
Instruction(mnemonic='return', opcode=177, operands=[], pos=29),
Instruction(mnemonic='return', opcode=177, operands=[], pos=30),
Instruction(mnemonic='return', opcode=177, operands=[], pos=31)
]
GOOD_LOOKUP_SWITCH = [
Instruction(mnemonic='iconst_1', opcode=4, operands=[], pos=0),
Instruction(mnemonic='lookupswitch', opcode=171, operands=[
{1: 27, 3: 28},
Operand(op_type=OperandTypes.BRANCH, value=29)
], pos=1),
Instruction(mnemonic='return', opcode=177, operands=[], pos=28),
Instruction(mnemonic='return', opcode=177, operands=[], pos=29),
Instruction(mnemonic='return', opcode=177, operands=[], pos=30)
]
def test_table_switch(loader):
# Ensure we can both read and write table switch opcodes.
cf = loader['TableSwitch']
main = cf.methods.find_one(name='main')
instructions = list(main.code.disassemble())
assert instructions == GOOD_TABLE_SWITCH
main.code.assemble(instructions)
instructions = list(main.code.disassemble())
assert instructions == GOOD_TABLE_SWITCH
def test_lookup_switch(loader):
# Ensure we can both read and write lookup switch opcodes.
cf = loader['LookupSwitch']
main = cf.methods.find_one(name='main')
instructions = list(main.code.disassemble())
assert instructions == GOOD_LOOKUP_SWITCH
main.code.assemble(instructions)
instructions = list(main.code.disassemble())
assert instructions == GOOD_LOOKUP_SWITCH
def test_compare():
ins = Instruction.create('return')
assert ins == 'return'
assert ins == ins
assert ins != 'not_return'
| 32.797101 | 68 | 0.679187 | from jawa.util.bytecode import Instruction, Operand, OperandTypes
GOOD_TABLE_SWITCH = [
Instruction(mnemonic='iconst_1', opcode=4, operands=[], pos=0),
Instruction(mnemonic='tableswitch', opcode=170, operands=[
Operand(OperandTypes.BRANCH, value=30),
Operand(OperandTypes.LITERAL, value=1),
Operand(OperandTypes.LITERAL, value=3),
Operand(OperandTypes.BRANCH, value=27),
Operand(OperandTypes.BRANCH, value=28),
Operand(OperandTypes.BRANCH, value=29)
], pos=1),
Instruction(mnemonic='return', opcode=177, operands=[], pos=28),
Instruction(mnemonic='return', opcode=177, operands=[], pos=29),
Instruction(mnemonic='return', opcode=177, operands=[], pos=30),
Instruction(mnemonic='return', opcode=177, operands=[], pos=31)
]
GOOD_LOOKUP_SWITCH = [
Instruction(mnemonic='iconst_1', opcode=4, operands=[], pos=0),
Instruction(mnemonic='lookupswitch', opcode=171, operands=[
{1: 27, 3: 28},
Operand(op_type=OperandTypes.BRANCH, value=29)
], pos=1),
Instruction(mnemonic='return', opcode=177, operands=[], pos=28),
Instruction(mnemonic='return', opcode=177, operands=[], pos=29),
Instruction(mnemonic='return', opcode=177, operands=[], pos=30)
]
def test_table_switch(loader):
cf = loader['TableSwitch']
main = cf.methods.find_one(name='main')
instructions = list(main.code.disassemble())
assert instructions == GOOD_TABLE_SWITCH
main.code.assemble(instructions)
instructions = list(main.code.disassemble())
assert instructions == GOOD_TABLE_SWITCH
def test_lookup_switch(loader):
cf = loader['LookupSwitch']
main = cf.methods.find_one(name='main')
instructions = list(main.code.disassemble())
assert instructions == GOOD_LOOKUP_SWITCH
main.code.assemble(instructions)
instructions = list(main.code.disassemble())
assert instructions == GOOD_LOOKUP_SWITCH
def test_compare():
ins = Instruction.create('return')
assert ins == 'return'
assert ins == ins
assert ins != 'not_return'
| true | true |
f7fb236444830288f2d7add9e4ac6a27b3bf2e95 | 9,004 | py | Python | cvxportfolio/tests/test_what_if.py | Andreas237/cvxportfolio | 46910e9ac62797ffc962bd090bea9bf8eb598053 | [
"Apache-2.0"
] | 472 | 2017-05-02T07:09:16.000Z | 2022-03-30T14:00:46.000Z | cvxportfolio/tests/test_what_if.py | Andreas237/cvxportfolio | 46910e9ac62797ffc962bd090bea9bf8eb598053 | [
"Apache-2.0"
] | 64 | 2017-07-18T22:21:53.000Z | 2021-12-31T10:36:32.000Z | cvxportfolio/tests/test_what_if.py | Andreas237/cvxportfolio | 46910e9ac62797ffc962bd090bea9bf8eb598053 | [
"Apache-2.0"
] | 190 | 2017-07-12T18:03:19.000Z | 2022-03-10T02:10:26.000Z | """
Copyright 2016 Stephen Boyd, Enzo Busseti, Steven Diamond, BlackRock Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import cvxpy as cvx
import numpy as np
import pandas as pd
import copy
from cvxportfolio import simulator, HcostModel, TcostModel, SinglePeriodOpt
from cvxportfolio import ReturnsForecast, MultipleReturnsForecasts, FullSigma
from .base_test import BaseTest
DIR = os.path.dirname(__file__) + os.path.sep
class TestWhatIf(BaseTest):
def setUp(self):
self.sigma = pd.read_csv(DIR + 'sigmas.csv',
index_col=0, parse_dates=[0])
self.returns = pd.read_csv(DIR + 'returns.csv',
index_col=0, parse_dates=[0])
self.volume = pd.read_csv(DIR + 'volumes.csv',
index_col=0, parse_dates=[0])
self.a, self.b, self.s = 0.0005, 1., 0.
self.universe = self.returns.columns
self.times = self.returns.index
def test_attribution(self):
"""Test attribution.
"""
# Alpha source
alpha_sources = [ReturnsForecast(
self.returns, name=i) for i in range(3)]
weights = np.array([0.1, 0.3, 0.6])
alpha_model = MultipleReturnsForecasts(alpha_sources, weights)
emp_Sigma = np.cov(self.returns.to_numpy().T)
risk_model = FullSigma(emp_Sigma, gamma=100.)
tcost_model = TcostModel(self.volume, self.sigma, self.a, self.b)
hcost_model = HcostModel(self.s, self.s * 0)
pol = SinglePeriodOpt(alpha_model, [risk_model, tcost_model,
hcost_model], [],
solver=cvx.ECOS)
tcost = TcostModel(self.a, self.b, self.sigma, self.volume)
hcost = HcostModel(self.s)
market_sim = simulator.MarketSimulator(self.returns,
costs=[tcost, hcost],
market_volumes=self.volume)
p_0 = pd.Series(index=self.universe, data=1E6)
noisy = market_sim.run_backtest(p_0, self.returns.index[1],
self.returns.index[10], pol)
# linear fit attribution
attr = market_sim.attribute(noisy, pol,
parallel=False, fit="linear")
base_line = noisy.v - sum(p_0)
for i in range(3):
self.assertItemsAlmostEqual(
attr[i] / weights[i] / sum(p_0), base_line / sum(p_0))
self.assertItemsAlmostEqual(attr['RMS error'], np.zeros(len(noisy.v)))
# least-squares fit attribution
attr = market_sim.attribute(noisy, pol,
parallel=False, fit="least-squares")
base_line = noisy.v - sum(p_0)
for i in range(3):
self.assertItemsAlmostEqual(
attr[i] / weights[i] / sum(p_0), base_line / sum(p_0))
# Residual always 0.
alpha_sources = [ReturnsForecast(
self.returns * 0, name=i) for i in range(3)]
weights = np.array([0.1, 0.3, 0.6])
alpha_model = MultipleReturnsForecasts(alpha_sources, weights)
pol = copy.copy(pol)
pol.alpha_model = alpha_model
attr = market_sim.attribute(noisy, pol,
parallel=False, fit="least-squares")
self.assertItemsAlmostEqual(attr['residual'], np.zeros(len(noisy.v)))
def test_attribute_non_profit_series(self):
"""Test attributing series quantities besides profit.
"""
# Alpha source
alpha_sources = [ReturnsForecast(
self.returns, name=i) for i in range(3)]
weights = np.array([0.1, 0.3, 0.6])
alpha_model = MultipleReturnsForecasts(alpha_sources, weights)
emp_Sigma = np.cov(self.returns.to_numpy().T)
risk_model = FullSigma(emp_Sigma, gamma=100.)
tcost_model = TcostModel(self.a, self.b, self.sigma, self.volume)
hcost_model = HcostModel(self.s, self.s * 0)
pol = SinglePeriodOpt(alpha_model, [risk_model, tcost_model,
hcost_model], [],
solver=cvx.ECOS)
tcost = TcostModel(self.volume, self.sigma, self.a, self.b)
hcost = HcostModel(self.s)
market_sim = simulator.MarketSimulator(self.returns,
costs=[tcost, hcost],
market_volumes=self.volume)
p_0 = pd.Series(index=self.universe, data=1E6)
noisy = market_sim.run_backtest(p_0, self.returns.index[1],
self.returns.index[10], pol)
# Select tcosts.
def selector(result):
return result.leverage
# linear fit attribution
attr = market_sim.attribute(noisy, pol, selector,
parallel=False, fit="linear")
base_line = noisy.leverage
for i in range(3):
self.assertItemsAlmostEqual(
attr[i] / weights[i] / sum(p_0), base_line / sum(p_0))
self.assertItemsAlmostEqual(attr['RMS error'], np.zeros(len(noisy.v)))
# least-squares fit attribution
attr = market_sim.attribute(noisy, pol, selector,
parallel=False, fit="least-squares")
for i in range(3):
self.assertItemsAlmostEqual(
attr[i] / weights[i] / sum(p_0), base_line / sum(p_0))
# Residual always 0.
alpha_sources = [ReturnsForecast(
self.returns * 0, name=i) for i in range(3)]
weights = np.array([0.1, 0.3, 0.6])
alpha_model = MultipleReturnsForecasts(alpha_sources, weights)
pol = copy.copy(pol)
pol.alpha_model = alpha_model
attr = market_sim.attribute(noisy, pol, selector,
parallel=False, fit="least-squares")
self.assertItemsAlmostEqual(attr['residual'], np.zeros(len(noisy.v)))
def test_attribute_non_profit_scalar(self):
"""Test attributing scalar quantities besides profit.
"""
# Alpha source
alpha_sources = [ReturnsForecast(
self.returns, name=i) for i in range(3)]
weights = np.array([0.1, 0.3, 0.6])
alpha_model = MultipleReturnsForecasts(alpha_sources, weights)
emp_Sigma = np.cov(self.returns.to_numpy().T)
risk_model = FullSigma(emp_Sigma)
tcost_model = TcostModel(self.a, self.b, self.sigma, self.volume)
hcost_model = HcostModel(self.s)
pol = SinglePeriodOpt(
alpha_model, [100 * risk_model, tcost_model, hcost_model], [])
market_sim = simulator.MarketSimulator(self.returns,
costs=[tcost_model, hcost_model]
)
p_0 = pd.Series(index=self.universe, data=1E6)
noisy = market_sim.run_backtest(p_0, self.returns.index[1],
self.returns.index[10], pol)
# Select tcosts.
def selector(result):
return pd.Series(index=[noisy.h.index[-1]],
data=result.volatility)
# linear fit attribution
attr = market_sim.attribute(noisy, pol, selector,
parallel=False, fit="linear")
base_line = noisy.volatility
for i in range(3):
self.assertAlmostEqual(
attr[i][0] / weights[i] / sum(p_0), base_line / sum(p_0))
self.assertItemsAlmostEqual(attr['RMS error'], np.zeros(len(noisy.v)))
# least-squares fit attribution
attr = market_sim.attribute(noisy, pol, selector,
parallel=False, fit="least-squares")
for i in range(3):
self.assertAlmostEqual(
attr[i][0] / weights[i] / sum(p_0), base_line / sum(p_0))
# Residual always 0.
alpha_sources = [ReturnsForecast(
self.returns * 0, name=i) for i in range(3)]
weights = np.array([0.1, 0.3, 0.6])
alpha_model = MultipleReturnsForecasts(alpha_sources, weights)
pol = copy.copy(pol)
pol.alpha_model = alpha_model
attr = market_sim.attribute(noisy, pol, selector,
parallel=False, fit="least-squares")
self.assertItemsAlmostEqual(attr['residual'], np.zeros(len(noisy.v)))
| 43.921951 | 79 | 0.577077 |
import os
import cvxpy as cvx
import numpy as np
import pandas as pd
import copy
from cvxportfolio import simulator, HcostModel, TcostModel, SinglePeriodOpt
from cvxportfolio import ReturnsForecast, MultipleReturnsForecasts, FullSigma
from .base_test import BaseTest
DIR = os.path.dirname(__file__) + os.path.sep
class TestWhatIf(BaseTest):
def setUp(self):
self.sigma = pd.read_csv(DIR + 'sigmas.csv',
index_col=0, parse_dates=[0])
self.returns = pd.read_csv(DIR + 'returns.csv',
index_col=0, parse_dates=[0])
self.volume = pd.read_csv(DIR + 'volumes.csv',
index_col=0, parse_dates=[0])
self.a, self.b, self.s = 0.0005, 1., 0.
self.universe = self.returns.columns
self.times = self.returns.index
def test_attribution(self):
alpha_sources = [ReturnsForecast(
self.returns, name=i) for i in range(3)]
weights = np.array([0.1, 0.3, 0.6])
alpha_model = MultipleReturnsForecasts(alpha_sources, weights)
emp_Sigma = np.cov(self.returns.to_numpy().T)
risk_model = FullSigma(emp_Sigma, gamma=100.)
tcost_model = TcostModel(self.volume, self.sigma, self.a, self.b)
hcost_model = HcostModel(self.s, self.s * 0)
pol = SinglePeriodOpt(alpha_model, [risk_model, tcost_model,
hcost_model], [],
solver=cvx.ECOS)
tcost = TcostModel(self.a, self.b, self.sigma, self.volume)
hcost = HcostModel(self.s)
market_sim = simulator.MarketSimulator(self.returns,
costs=[tcost, hcost],
market_volumes=self.volume)
p_0 = pd.Series(index=self.universe, data=1E6)
noisy = market_sim.run_backtest(p_0, self.returns.index[1],
self.returns.index[10], pol)
attr = market_sim.attribute(noisy, pol,
parallel=False, fit="linear")
base_line = noisy.v - sum(p_0)
for i in range(3):
self.assertItemsAlmostEqual(
attr[i] / weights[i] / sum(p_0), base_line / sum(p_0))
self.assertItemsAlmostEqual(attr['RMS error'], np.zeros(len(noisy.v)))
attr = market_sim.attribute(noisy, pol,
parallel=False, fit="least-squares")
base_line = noisy.v - sum(p_0)
for i in range(3):
self.assertItemsAlmostEqual(
attr[i] / weights[i] / sum(p_0), base_line / sum(p_0))
alpha_sources = [ReturnsForecast(
self.returns * 0, name=i) for i in range(3)]
weights = np.array([0.1, 0.3, 0.6])
alpha_model = MultipleReturnsForecasts(alpha_sources, weights)
pol = copy.copy(pol)
pol.alpha_model = alpha_model
attr = market_sim.attribute(noisy, pol,
parallel=False, fit="least-squares")
self.assertItemsAlmostEqual(attr['residual'], np.zeros(len(noisy.v)))
def test_attribute_non_profit_series(self):
alpha_sources = [ReturnsForecast(
self.returns, name=i) for i in range(3)]
weights = np.array([0.1, 0.3, 0.6])
alpha_model = MultipleReturnsForecasts(alpha_sources, weights)
emp_Sigma = np.cov(self.returns.to_numpy().T)
risk_model = FullSigma(emp_Sigma, gamma=100.)
tcost_model = TcostModel(self.a, self.b, self.sigma, self.volume)
hcost_model = HcostModel(self.s, self.s * 0)
pol = SinglePeriodOpt(alpha_model, [risk_model, tcost_model,
hcost_model], [],
solver=cvx.ECOS)
tcost = TcostModel(self.volume, self.sigma, self.a, self.b)
hcost = HcostModel(self.s)
market_sim = simulator.MarketSimulator(self.returns,
costs=[tcost, hcost],
market_volumes=self.volume)
p_0 = pd.Series(index=self.universe, data=1E6)
noisy = market_sim.run_backtest(p_0, self.returns.index[1],
self.returns.index[10], pol)
def selector(result):
return result.leverage
attr = market_sim.attribute(noisy, pol, selector,
parallel=False, fit="linear")
base_line = noisy.leverage
for i in range(3):
self.assertItemsAlmostEqual(
attr[i] / weights[i] / sum(p_0), base_line / sum(p_0))
self.assertItemsAlmostEqual(attr['RMS error'], np.zeros(len(noisy.v)))
attr = market_sim.attribute(noisy, pol, selector,
parallel=False, fit="least-squares")
for i in range(3):
self.assertItemsAlmostEqual(
attr[i] / weights[i] / sum(p_0), base_line / sum(p_0))
alpha_sources = [ReturnsForecast(
self.returns * 0, name=i) for i in range(3)]
weights = np.array([0.1, 0.3, 0.6])
alpha_model = MultipleReturnsForecasts(alpha_sources, weights)
pol = copy.copy(pol)
pol.alpha_model = alpha_model
attr = market_sim.attribute(noisy, pol, selector,
parallel=False, fit="least-squares")
self.assertItemsAlmostEqual(attr['residual'], np.zeros(len(noisy.v)))
def test_attribute_non_profit_scalar(self):
alpha_sources = [ReturnsForecast(
self.returns, name=i) for i in range(3)]
weights = np.array([0.1, 0.3, 0.6])
alpha_model = MultipleReturnsForecasts(alpha_sources, weights)
emp_Sigma = np.cov(self.returns.to_numpy().T)
risk_model = FullSigma(emp_Sigma)
tcost_model = TcostModel(self.a, self.b, self.sigma, self.volume)
hcost_model = HcostModel(self.s)
pol = SinglePeriodOpt(
alpha_model, [100 * risk_model, tcost_model, hcost_model], [])
market_sim = simulator.MarketSimulator(self.returns,
costs=[tcost_model, hcost_model]
)
p_0 = pd.Series(index=self.universe, data=1E6)
noisy = market_sim.run_backtest(p_0, self.returns.index[1],
self.returns.index[10], pol)
def selector(result):
return pd.Series(index=[noisy.h.index[-1]],
data=result.volatility)
attr = market_sim.attribute(noisy, pol, selector,
parallel=False, fit="linear")
base_line = noisy.volatility
for i in range(3):
self.assertAlmostEqual(
attr[i][0] / weights[i] / sum(p_0), base_line / sum(p_0))
self.assertItemsAlmostEqual(attr['RMS error'], np.zeros(len(noisy.v)))
attr = market_sim.attribute(noisy, pol, selector,
parallel=False, fit="least-squares")
for i in range(3):
self.assertAlmostEqual(
attr[i][0] / weights[i] / sum(p_0), base_line / sum(p_0))
alpha_sources = [ReturnsForecast(
self.returns * 0, name=i) for i in range(3)]
weights = np.array([0.1, 0.3, 0.6])
alpha_model = MultipleReturnsForecasts(alpha_sources, weights)
pol = copy.copy(pol)
pol.alpha_model = alpha_model
attr = market_sim.attribute(noisy, pol, selector,
parallel=False, fit="least-squares")
self.assertItemsAlmostEqual(attr['residual'], np.zeros(len(noisy.v)))
| true | true |
f7fb2368015b9f9a345bcee68ccddb6d92a0896a | 3,214 | py | Python | tktoolbox/examples/fractal_threaded.py | modal/tktoolbox | bfb1c5166818bf0f0776a10598515b6ec00fc450 | [
"Apache-2.0"
] | null | null | null | tktoolbox/examples/fractal_threaded.py | modal/tktoolbox | bfb1c5166818bf0f0776a10598515b6ec00fc450 | [
"Apache-2.0"
] | null | null | null | tktoolbox/examples/fractal_threaded.py | modal/tktoolbox | bfb1c5166818bf0f0776a10598515b6ec00fc450 | [
"Apache-2.0"
] | null | null | null | import tkinter as tk
import threading
from queue import Queue
#Globals
WIDE = 200 # image dimensions
HIGH = 200 #
REAL_FROM = -2 # bounds of fractal in the complex plane
REAL_TO = 1 #
IMAG_FROM = -1.5 # imaginary coords => y-direction
IMAG_TO = 1.5 #
N = 64
class MandelbrotWorker(threading.Thread):
def __init__(self, aQueue, imgWidth, imgHeight, bounds, **kwargs):
threading.Thread.__init__(self)
self.setDaemon(1)
self.imgWidth = imgWidth
self.imgHeight = imgHeight
self.fwidth = float(imgWidth)
self.fheight = float(imgHeight)
self.mapLeft = bounds[0]
self.mapTop = bounds[3]
self.mapWidth = bounds[1] - bounds[0]
self.mapHeight = bounds[3] - bounds[2]
self.resultQueue = aQueue
self.n = N
self.start()
def run(self):
for x in range(self.imgWidth):
for y in range(self.imgHeight):
percx = x/self.fwidth
percy = y/self.fheight
xp = self.mapLeft + percx*self.mapWidth
yp = self.mapTop - percy*self.mapHeight
o = complex(0,0)
z = complex(xp,yp)
colorcode = 0
for trials in range(self.n):
if abs(o) <= 2.0:
o = o**2 + z
else:
colorcode = trials
break
self.resultQueue.put((x,y,colorcode))
class ThreadedFractal(tk.Toplevel):
def __init__(self, master):
tk.Toplevel.__init__(self, master)
self.protocol('WM_DELETE_WINDOW', self.on_close)
self.status = tk.Label(self)
self.status.pack(fill='x')
self.display = tk.Label(self)
self.display.pack()
self.img = tk.PhotoImage(width=WIDE, height=HIGH)
self.display.config(image=self.img)
self.count = 0
self.totalPixels = WIDE*HIGH
self.queue = Queue()
self.rgb = []
self.make_colours()
self.go()
def make_colours(self):
for i in range(N):
r = i*7%200 + 55
g = i*9%200 + 55
b = i*11%200 + 55
colour = '#%02x%02x%02x' %(r,g,b)
self.rgb.append(colour)
def scheduler(self):
self.after(0, self.poll)
def go(self):
self.workerThread = MandelbrotWorker(self.queue, WIDE, HIGH,
(REAL_FROM,REAL_TO,IMAG_FROM,IMAG_TO))
self.after(500, self.poll)
def poll(self):
if self.count < self.totalPixels:
try:
x,y,code = self.queue.get_nowait()
except Queue.Empty:
pass
else:
colour = self.rgb[code]
self.img.put(colour, to=(x,y))
self.count += 1
self.status.config(text='%s of %s pixels' %(self.count,
self.totalPixels))
self.after_idle(self.scheduler)
def on_close(self):
self.master.destroy()
root = tk.Tk()
root.withdraw()
app = ThreadedFractal(root)
app.mainloop()
| 30.037383 | 78 | 0.520535 | import tkinter as tk
import threading
from queue import Queue
WIDE = 200
HIGH = 200
REAL_FROM = -2
REAL_TO = 1
IMAG_FROM = -1.5
IMAG_TO = 1.5
N = 64
class MandelbrotWorker(threading.Thread):
def __init__(self, aQueue, imgWidth, imgHeight, bounds, **kwargs):
threading.Thread.__init__(self)
self.setDaemon(1)
self.imgWidth = imgWidth
self.imgHeight = imgHeight
self.fwidth = float(imgWidth)
self.fheight = float(imgHeight)
self.mapLeft = bounds[0]
self.mapTop = bounds[3]
self.mapWidth = bounds[1] - bounds[0]
self.mapHeight = bounds[3] - bounds[2]
self.resultQueue = aQueue
self.n = N
self.start()
def run(self):
for x in range(self.imgWidth):
for y in range(self.imgHeight):
percx = x/self.fwidth
percy = y/self.fheight
xp = self.mapLeft + percx*self.mapWidth
yp = self.mapTop - percy*self.mapHeight
o = complex(0,0)
z = complex(xp,yp)
colorcode = 0
for trials in range(self.n):
if abs(o) <= 2.0:
o = o**2 + z
else:
colorcode = trials
break
self.resultQueue.put((x,y,colorcode))
class ThreadedFractal(tk.Toplevel):
def __init__(self, master):
tk.Toplevel.__init__(self, master)
self.protocol('WM_DELETE_WINDOW', self.on_close)
self.status = tk.Label(self)
self.status.pack(fill='x')
self.display = tk.Label(self)
self.display.pack()
self.img = tk.PhotoImage(width=WIDE, height=HIGH)
self.display.config(image=self.img)
self.count = 0
self.totalPixels = WIDE*HIGH
self.queue = Queue()
self.rgb = []
self.make_colours()
self.go()
def make_colours(self):
for i in range(N):
r = i*7%200 + 55
g = i*9%200 + 55
b = i*11%200 + 55
colour = '#%02x%02x%02x' %(r,g,b)
self.rgb.append(colour)
def scheduler(self):
self.after(0, self.poll)
def go(self):
self.workerThread = MandelbrotWorker(self.queue, WIDE, HIGH,
(REAL_FROM,REAL_TO,IMAG_FROM,IMAG_TO))
self.after(500, self.poll)
def poll(self):
if self.count < self.totalPixels:
try:
x,y,code = self.queue.get_nowait()
except Queue.Empty:
pass
else:
colour = self.rgb[code]
self.img.put(colour, to=(x,y))
self.count += 1
self.status.config(text='%s of %s pixels' %(self.count,
self.totalPixels))
self.after_idle(self.scheduler)
def on_close(self):
self.master.destroy()
root = tk.Tk()
root.withdraw()
app = ThreadedFractal(root)
app.mainloop()
| true | true |
f7fb23b0a7825df7c03f6fbc8d993ef3a45f44f7 | 2,410 | py | Python | workspace_tools/host_tests/host_tests_plugins/module_copy_shell.py | mfiore02/mbed | fd285784c911f5af9ca51a29aa9908857db59b9d | [
"Apache-2.0"
] | 54 | 2015-12-21T13:07:43.000Z | 2019-04-16T20:06:25.000Z | workspace_tools/host_tests/host_tests_plugins/module_copy_shell.py | GustavWi/mbed | ea01d61fa18430564b78226045b196bb6bf6b66a | [
"Apache-2.0"
] | 10 | 2015-10-31T15:16:14.000Z | 2019-03-12T13:25:29.000Z | workspace_tools/host_tests/host_tests_plugins/module_copy_shell.py | GustavWi/mbed | ea01d61fa18430564b78226045b196bb6bf6b66a | [
"Apache-2.0"
] | 30 | 2015-12-27T21:44:56.000Z | 2019-03-26T07:13:26.000Z | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from os.path import join, basename
from host_test_plugins import HostTestPluginBase
class HostTestPluginCopyMethod_Shell(HostTestPluginBase):
# Plugin interface
name = 'HostTestPluginCopyMethod_Shell'
type = 'CopyMethod'
stable = True
capabilities = ['shell', 'cp', 'copy', 'xcopy']
required_parameters = ['image_path', 'destination_disk']
def setup(self, *args, **kwargs):
""" Configure plugin, this function should be called before plugin execute() method is used.
"""
return True
def execute(self, capabilitity, *args, **kwargs):
""" Executes capability by name.
Each capability may directly just call some command line
program or execute building pythonic function
"""
result = False
if self.check_parameters(capabilitity, *args, **kwargs) is True:
image_path = kwargs['image_path']
destination_disk = kwargs['destination_disk']
# Wait for mount point to be ready
self.check_mount_point_ready(destination_disk) # Blocking
# Prepare correct command line parameter values
image_base_name = basename(image_path)
destination_path = join(destination_disk, image_base_name)
if capabilitity == 'shell':
if os.name == 'nt': capabilitity = 'copy'
elif os.name == 'posix': capabilitity = 'cp'
if capabilitity == 'cp' or capabilitity == 'copy' or capabilitity == 'copy':
copy_method = capabilitity
cmd = [copy_method, image_path, destination_path]
result = self.run_command(cmd)
return result
def load_plugin():
""" Returns plugin available in this module
"""
return HostTestPluginCopyMethod_Shell()
| 37.076923 | 100 | 0.671369 |
import os
from os.path import join, basename
from host_test_plugins import HostTestPluginBase
class HostTestPluginCopyMethod_Shell(HostTestPluginBase):
name = 'HostTestPluginCopyMethod_Shell'
type = 'CopyMethod'
stable = True
capabilities = ['shell', 'cp', 'copy', 'xcopy']
required_parameters = ['image_path', 'destination_disk']
def setup(self, *args, **kwargs):
return True
def execute(self, capabilitity, *args, **kwargs):
result = False
if self.check_parameters(capabilitity, *args, **kwargs) is True:
image_path = kwargs['image_path']
destination_disk = kwargs['destination_disk']
self.check_mount_point_ready(destination_disk)
image_base_name = basename(image_path)
destination_path = join(destination_disk, image_base_name)
if capabilitity == 'shell':
if os.name == 'nt': capabilitity = 'copy'
elif os.name == 'posix': capabilitity = 'cp'
if capabilitity == 'cp' or capabilitity == 'copy' or capabilitity == 'copy':
copy_method = capabilitity
cmd = [copy_method, image_path, destination_path]
result = self.run_command(cmd)
return result
def load_plugin():
return HostTestPluginCopyMethod_Shell()
| true | true |
f7fb23be1884a2204749e3ea4ab7ef13aed04b45 | 89 | py | Python | application/__init__.py | tafowocedric/icarehealthcenterapi | f9c74adc3db1b9b0e2bb0e66d1f7fb2bfffef512 | [
"MIT"
] | null | null | null | application/__init__.py | tafowocedric/icarehealthcenterapi | f9c74adc3db1b9b0e2bb0e66d1f7fb2bfffef512 | [
"MIT"
] | null | null | null | application/__init__.py | tafowocedric/icarehealthcenterapi | f9c74adc3db1b9b0e2bb0e66d1f7fb2bfffef512 | [
"MIT"
] | null | null | null | from sqlalchemy.ext.declarative import declarative_base
Base_Model = declarative_base()
| 22.25 | 55 | 0.853933 | from sqlalchemy.ext.declarative import declarative_base
Base_Model = declarative_base()
| true | true |
f7fb23cb0953a0e4999ea5a318d8fcc56d66e56f | 936 | py | Python | restaurants/models.py | electricsheepindream/menu | 009576c1da4a35d655abab8be66c31c5ad0d3635 | [
"MIT"
] | null | null | null | restaurants/models.py | electricsheepindream/menu | 009576c1da4a35d655abab8be66c31c5ad0d3635 | [
"MIT"
] | null | null | null | restaurants/models.py | electricsheepindream/menu | 009576c1da4a35d655abab8be66c31c5ad0d3635 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.db import models
from django.utils.text import slugify
from django.db.models.signals import pre_save
from django.core.urlresolvers import reverse
# Create your models here.
User = settings.AUTH_USER_MODEL
class Restaurants(models.Model):
owner = models.ForeignKey(User)
name = models.CharField(max_length=120, unique=True)
location = models.CharField(max_length=120, null=True, blank=True)
manager = models.CharField(max_length=30)
type = models.CharField(max_length=20)
slug = models.CharField(max_length=60, null=True, blank=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('ResHome:detail', kwargs={'mark': self.slug})
def res_pre_save(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = slugify(instance.name)
pre_save.connect(res_pre_save, sender=Restaurants)
| 30.193548 | 70 | 0.742521 | from django.conf import settings
from django.db import models
from django.utils.text import slugify
from django.db.models.signals import pre_save
from django.core.urlresolvers import reverse
User = settings.AUTH_USER_MODEL
class Restaurants(models.Model):
owner = models.ForeignKey(User)
name = models.CharField(max_length=120, unique=True)
location = models.CharField(max_length=120, null=True, blank=True)
manager = models.CharField(max_length=30)
type = models.CharField(max_length=20)
slug = models.CharField(max_length=60, null=True, blank=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('ResHome:detail', kwargs={'mark': self.slug})
def res_pre_save(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = slugify(instance.name)
pre_save.connect(res_pre_save, sender=Restaurants)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.