id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6588271 | from local_server import *
class LocalServerMock(LocalServer):
def __init__(self):
"""
Mock Local Server
"""
self._is_from_test = True
super().__init__()
| StarcoderdataPython |
354340 | <filename>src/crypto.py
from Crypto.Cipher import AES
from hashlib import sha256
from base64 import b64encode
def create_key(string):
"""
Creates sha256 hash key from given string. Key is of the right length for AES encryption.
:param string: String encoded with UTF-8.
:returns: String key, a hash of ``string``.
"""
hashed = sha256(bytes(string, 'utf8')).digest()
return hashed
def encrypt(message, key):
"""
Encrypts message with key, using AES encryption.
:param message: Bytes object. Length must be multiple of 16.
:param key: Key of length 16. Please use :func:`crypto.create_key` on your string.
:returns: Encrypted message.
"""
AESchiper = AES.new(key)
# length of a message must be multiple of 16
return AESchiper.encrypt(message)
def decrypt(message, key):
"""
Decypts message with key, using AES encryption.
:param message: Bytes object. Length must be multiple of 16.
:param key: Key of length 16. Please use :func:`crypto.create_key` on your string.
:returns: Decypted message.
"""
AESchiper = AES.new(key)
return AESchiper.decrypt(message)
| StarcoderdataPython |
6574585 | <reponame>yongkangzzz/mmfgroup
"""
===============================================================================
shap_explainer.py
explainer class for singlemodal or multimodal inputs
===============================================================================
"""
import matplotlib.pyplot as plt
import shap
import numpy as np
import copy
from . import _utils as utils
from mmf.models.mmbt import MMBT
from transformers import AutoTokenizer
from PIL import Image
from shap4mmf._plot import image
from typing import List
class Explainer(object):
""" Use shap as explainer for classification models for image, text or both.
"""
def __init__(self, model: "MMF Model", algorithm="partition",
max_evals=None, batch_size=None,
tokenizer=None
):
""" Initialise the explianer object
Args:
model: mmf model, or any model that has a .classify method as the prediction
method. It should take a PIL image object and a string to give the classification output
algorithm: currently support ("partition", )
max_evals: maximum evaluation time, default 200 if not given
batch_size: default 50
tokenizer: used for text input, default pretrianed
distilbert-base-uncased-finetuned-sst-2-english from AutoTokenizer
"""
self._supported_algos = ("partition", )
self._supported_modes = ("multimodal", "text_only", "image_only")
# input validations
if algorithm not in self._supported_algos:
raise ValueError(f"This algotithm {algorithm} is not supported!")
# model should have a .classify method
if not hasattr(model, 'classify'):
raise ValueError(f"Model object must have a .classify attribute.")
# public features
self.model = model
self.algorithm = algorithm
# some methods may allow speeding up
self.max_evals = max_evals if max_evals is not None else 200
self.batch_size = batch_size if batch_size is not None else 50
# internal features
self._tokenizer = AutoTokenizer.from_pretrained(
'distilbert-base-uncased-finetuned-sst-2-english', use_fast=True)
self._fixed_image = None
self._fixed_text = None
self._tokens = None
def _f_multimodal(self, img_txt: np.ndarray):
""" Multimodal funtion for shap to explain
Args:
img_txt: np.ndarray of shape (N, ...)
N = number of samples
... = shape of images with extra row for text
Returns:
model outputs for those samples
"""
out = np.zeros((len(img_txt), 2)) # output same shape
# seperate image array with text
images, texts = self._images_texts_split(
img_txt, self._tokens, self._tokenizer)
# inputs neeeds to be [PIL.Image.Image]; if not try to transform
if not isinstance(images[0], Image.Image):
images = utils.arr_to_img(images)
# DEBUG USE
# print(f"f_mm(), {texts=}")
return self._fill_predictions(out, images, texts)
def _f_image(self, images: np.ndarray):
""" Image-only function for shap to explain
Args:
images: np.ndarray of shape (N, D1, D2, C);
N = number of samples
D1, D2, C = three channel image
Returns:
numpy array of predictions with shape = (N, 2);
- N[i] = score of the image being i
"""
out = np.zeros((len(images), 2)) # output same shape
# inputs neeeds to be [PIL.Image.Image]; if not try to transform
if not isinstance(images[0], Image.Image):
images = utils.arr_to_img(images)
texts = [self._fixed_text for _ in range(len(images))]
return self._fill_predictions(out, images, texts)
def _f_text(self, texts: np.ndarray):
""" Text-only function for shap to explain
testing only texts
Args:
texts: np.ndarray[str] of shape (N,)
N = number of samples
Returns:
numpy array of predictions with shape = (N, 2);
- N[i] = score of the image being i
"""
out = np.zeros((len(texts), 2)) # output same shape
images = [self._fixed_image for _ in range(len(texts))]
return self._fill_predictions(out, images, texts)
def explain(self, images: np.ndarray, texts: np.ndarray, mode: str):
""" Main API to calculate shap values
Args:
images: np.ndarray of shape (N, D1, D2, C);
N = number of samples
D1, D2, C = three channel image
texts: np.ndarray of shape (N,)
mode: ("text_only", "image_only", "multimodal")
Returns:
a list of shap values calculated
a tuple of (text_shap_values, image_shap_values) is returned if mode
is "multimodal"
"""
# input validations
if mode not in self._supported_modes:
raise ValueError(f"This mode {mode} is not supported!")
if images.shape[0] != texts.shape[0]:
raise ValueError(
f"Shape mismatch, inputs' first dimensions should be equal!")
# output list
shap_values = []
if mode == "text_only":
if not isinstance(images[0], Image.Image):
images = utils.arr_to_img(images)
# initialise masker and explainer
text_masker = shap.maskers.Text(self._tokenizer)
# NOTE: if using text heatmap need to have output_names arg in .Explainer()
explainer = shap.Explainer(
self._f_text, text_masker, algorithm=self.algorithm)
# loop through samples
for i in range(len(images)):
self._fixed_image = images[i]
values = explainer(
texts[i:i + 1], max_evals=self.max_evals, batch_size=self.batch_size)
shap_values.append(values)
elif mode == "image_only":
# initialise masker and explainer
image_masker = shap.maskers.Image("inpaint_telea", images[0].shape)
image_explainer = shap.Explainer(
self._f_image, image_masker, algorithm=self.algorithm)
# loop through samples
for i in range(len(texts)):
self._fixed_text = texts[i]
values = image_explainer(
images[i:i + 1], max_evals=self.max_evals, batch_size=self.batch_size)
shap_values.append(values)
elif mode == "multimodal":
# img_txt and tokens for all N samples given
all_img_txt, all_tokens = self._combine_images_texts(
images, texts, self._tokenizer)
image_masker = shap.maskers.Image(
"inpaint_telea", all_img_txt[0].shape)
explainer = shap.Explainer(
self._f_multimodal, image_masker, algorithm=self.algorithm)
# loop through samples
image_shap_values = []
text_shap_values = []
for i in range(len(all_img_txt)):
self._tokens = all_tokens[i]
shap_values = explainer(
all_img_txt[i:i + 1], max_evals=self.max_evals, batch_size=self.batch_size)
img_values, txt_values = self._process_mm_shap_values(
shap_values, self._tokens)
image_shap_values.append(img_values)
text_shap_values.append(txt_values)
# build explanation objects - NOTE: deprecated in new api as we are returning lists
# image_shap_values = self._concat_shap_values(image_shap_values)
# text_shap_values = self._concat_shap_values(text_shap_values)
# return tuple if multimodal
return image_shap_values, text_shap_values
# return single-modal outputs
return shap_values
@staticmethod
def image_plot(shap_values: List, label_index: int = 1):
""" plot the image chart given shap values
Args:
shap_values: list of shap values
Returns:
a list of figures
"""
shap_values = copy.deepcopy(shap_values)
figs = []
for value in shap_values:
value.data = value.data.astype(np.float64)
figs.append(image(value[..., label_index:label_index + 1]))
return figs
@staticmethod
def parse_text_values(shap_values: List, label_index: int = 0):
""" plot the image chart given shap values
Args:
shap_values: list of shap values
label_index: which label the values correspond to, default 0
Returns:
a list of dictionarys, each containing (word: shap_value) pairs
the shap values are relative to "base_value" which is also in the dictionary
"""
out = []
for value in shap_values:
value = value[0, ..., label_index]
dic = {k: v for k, v in zip(value.data, value.values)}
dic['base_value'] = value.base_values
out.append(dic)
return out
# ===== helper functions =====
def _fill_predictions(self, out: np.ndarray, images: List, texts: List):
""" utility function used in the _f_xxx functions
Args:
out: np.ndarray of zeros
images: [PIL.Image] ready to be used in model.classify
texts: [str] ready to be used in model.classify
NOTE: len(out) == len(images) == len(texts)
Returns:
filled output with predictions
"""
for i, (text, image) in enumerate(zip(texts, images)):
# classify, output is a tupe (index, score)
ind, score = self.model.classify(image, text).values()
out[i][ind] = score
out[i][1 - ind] = 1 - score
return out
@staticmethod
def _combine_images_texts(images, texts, tokenizer):
""" Combines images and texts into an array
Args:
images: np.ndarray of shape = (N, ...)
texts: np.ndarray[str] of shape = (N, )
N = number of samples
... = dimensions of the images
Returns:
a tuple of np.ndarrays (img_txt, tokens)
img_txt = array where each image is concatenated with text
tokens = list of tokens, each element in the list is a token list for a string
"""
assert len(images.shape) == 4, "Shape of images should be (N, D1, D2, C)"
assert texts.shape[0] == images.shape[0], "Shape mismatch between images and texts"
# calculate row dimension to append
y_len = images[0].shape[1]
z_len = images[0].shape[2]
img_txt = []
tokens = []
for image, text in zip(images, texts):
txt_ids = tokenizer.encode(text, add_special_tokens=False)
txt_tokens = tokenizer.tokenize(text, add_special_tokens=False)
new_row = np.zeros((1, y_len, z_len))
new_row[0, :len(txt_tokens), 0] = txt_ids
img_txt.append(np.concatenate([image, new_row], axis=0))
tokens.append(txt_tokens)
return np.array(img_txt), tokens
@staticmethod
def _images_texts_split(img_txt: np.ndarray, tokens: List, tokenizer):
""" split concatenated image_text arrays up into images and texts arrays
This function will be used in the _f_multimodal function which takes in
a img_txt of shape (N, ...) where the N here is the generated masked samples
that shap uses to compute average for 1 image and 1 text given from the user.
Therefore the tokens here only should correspond to 1 text input. This is also
reflected on why in the multimodal case we have to explain 1 example at a time
Args:
img_txt: np.ndarray where each element is an image array with corresponding text appended
shape[0] = number of samples
tokens: list of tokens
tokenizer: used to encode and decode the tokens
Returns:
tuple of (list[image_array], list[texts])
"""
images_shape = list(img_txt.shape)
images_shape[1] -= 1 # deleting a row which is for text
images = []
texts = []
# for all samples
for i in range(img_txt.shape[0]):
images.append(img_txt[i, :-1, ...].copy())
text_arr = img_txt[i, -1, :len(tokens), 0].astype(int)
texts.append(tokenizer.decode(text_arr, skip_special_tokens=True))
return images, texts
@staticmethod
def _process_mm_shap_values(shap_values: shap.Explanation, tokens: List):
""" Split multimodal shapley values
"""
image_values = shap_values[:, :-1]
text_values = shap_values[:, -1, :len(tokens), 0]
text_values.data = np.array(tokens)[np.newaxis, :]
return image_values, text_values
@staticmethod
def _concat_shap_values(shap_values: List):
""" Build an explanation object with all shapley values concatenated
"""
values = np.concatenate([s.values for s in shap_values], axis=0)
data = np.concatenate([s.data for s in shap_values], axis=0)
base_values = np.concatenate(
[s.base_values for s in shap_values], axis=0)
clustering = np.concatenate([s.clustering for s in shap_values],
axis=0) if shap_values[0].clustering is not None else None
hierarchical = np.concatenate([s.hierarchical_values for s in shap_values],
axis=0) if shap_values[0].hierarchical_values is not None else None
return shap.Explanation(
values=values,
base_values=base_values,
data=data,
clustering=clustering,
hierarchical_values=hierarchical
)
def _examples():
""" Example for how to use this explainer
"""
# read data to try
data_path = r"hm-data/"
labels = utils.read_labels(data_path + "train.jsonl", True)
ids = [5643]
target_labels = [l for l in labels if l['id'] in ids]
# print(f"{target_labels = }")
target_images, target_texts = utils.parse_labels(
target_labels, img_to_array=True, separate_outputs=True)
# model to explain
model = MMBT.from_pretrained("mmbt.hateful_memes.images")
# Explainer hyper params
max_evals = 100
batch_size = 50
# test default partition algo
explainer = Explainer(model, max_evals=max_evals, batch_size=batch_size)
text_shap_values = explainer.explain(
target_images, target_texts, "text_only")
image_shap_values = explainer.explain(
target_images, target_texts, "image_only")
img_values, txt_values = explainer.explain(
target_images, target_texts, mode="multimodal")
# plots
# explainer.text_plot(text_shap_values)
# explainer.image_plot(image_shap_values)
if __name__ == "__main__":
_examples()
| StarcoderdataPython |
5037006 | import datetime
date_tomorrow = datetime.date.today() + datetime.timedelta(days=1)
date_tomorrow_iso = date_tomorrow.isoformat()
def filter_today(docs):
returned_array = []
for doc in docs:
d = doc["doc"]
# TODO: If due less than tomorrow
if d["due"] and d["due"] < date_tomorrow_iso:
if d["status"] not in ["done", "cancelled"]:
returned_array.append(doc)
return returned_array
def filter_priority(docs, priority):
returned_array = []
for doc in docs:
d = doc["doc"]
# TODO: If due less than tomorrow
if d["priority"] == priority:
returned_array.append(doc)
return returned_array
def save_task(entry, db_object):
"""
In Tasks GUI - Passes self.detailed
Prepares document for saving to database
If _id == 'new', a new document will be created, with a generated UUID
:param entry:
:param db_object:
:return:
"""
pri = entry["priority"]
if pri == 0:
pri = None
entry_id = entry["_id"]
if entry_id == "new":
"""If _id is 'new', a new UUID will be generated, and a new file created"""
new_task_template = {
"_id": entry_id,
"productivity": True,
"type": "task",
"due": entry.get("due", None),
"start": datetime.datetime.now().isoformat(),
"end": None,
"title": entry["title"],
"description": None,
"created": datetime.datetime.now().isoformat(),
"status": "plan",
"project": None, # Must be projectID _id
"context": None,
"priority": pri,
"tags": []
}
docitem = {
"doc": new_task_template
}
else:
fp = entry["filepath"]
docitem = db_object.fetchone(filepath=fp)
doc = docitem["doc"]
doc["title"] = entry["title"]
doc["priority"] = pri
db_object.insert(docitem)
return docitem
| StarcoderdataPython |
6697840 | import torch
from torch.nn.parameter import Parameter
A = torch.tensor([[[[1, 2, 0, 1],
[-1, 0, 3, 2],
[1, 3, 0, 1],
[2, -2, 1, 0]]]]).float()
conv2d = torch.nn.Conv2d(1, 1, kernel_size = 2, bias = False)
conv2d.weight = Parameter(torch.tensor([[[[1, -1], [-1, 1]]]]).float())
output = conv2d(A)
print(output)
| StarcoderdataPython |
3360947 | from time import time
import requests
urls = ["https://www.naver.com", "http://www.google.com", "https://www.nytimes.com", "https://www.mlb.com", "https://www.kakaocorp.com"]
begin = time()
result = []
for url in urls:
response = requests.get(url)
page = response.text
result.append("{0} Bytes".format(len(page)))
print(result)
end = time()
print('실행 시간: {0:.3f}초'.format(end - begin)) | StarcoderdataPython |
166826 | '''Defines data and parameters in an easily resuable format.'''
# Common sequence alphabets.
ALPHABETS = {
'dna': 'ATGCNatgcn-',
'rna': 'AUGCNaugcn',
'peptide': 'ACDEFGHIKLMNPQRSTVWYXacdefghiklmnpqrstvwyx'}
COMPLEMENTS = {
'dna': {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'N': 'N', 'a': 't',
't': 'a', 'g': 'c', 'c': 'g', 'n': 'n', '-': '-'},
'rna': {'A': 'U', 'U': 'A', 'G': 'C', 'C': 'G', 'N': 'N', 'a': 'u',
'u': 'a', 'g': 'c', 'c': 'g', 'n': 'n'}}
# The standard codon table.
CODON_TABLE = {
'A': ['GCG', 'GCA', 'GCU', 'GCC'],
'R': ['AGG', 'AGA', 'CGG', 'CGA', 'CGU', 'CGC'],
'N': ['AAU', 'AAC'],
'D': ['GAU', 'GAC'],
'C': ['UGU', 'UGC'],
'*': ['UGA', 'UAG', 'UAA'],
'Q': ['CAG', 'CAA'],
'E': ['GAG', 'GAA'],
'G': ['GGG', 'GGA', 'GGU', 'GGC'],
'H': ['CAU', 'CAC'],
'I': ['AUA', 'AUU', 'AUC'],
'L': ['UUG', 'UUA', 'CUG', 'CUA', 'CUU', 'CUC'],
'K': ['AAG', 'AAA'],
'M': ['AUG'],
'F': ['UUU', 'UUC'],
'P': ['CCG', 'CCA', 'CCU', 'CCC'],
'S': ['AGU', 'AGC', 'UCG', 'UCA', 'UCU', 'UCC'],
'T': ['ACG', 'ACA', 'ACU', 'ACC'],
'W': ['UGG'],
'Y': ['UAU', 'UAC'],
'V': ['GUG', 'GUA', 'GUU', 'GUC']}
# Saccharomyces cerevisiae
# source: http://www.kazusa.or.jp/codon/
# (which cites GenBank, i.e. yeast genome project CDS database)
CODON_FREQ = {
'sc': {
'GCG': 0.109972396541529,
'GCA': 0.288596474496094,
'GCU': 0.377014739102356,
'GCC': 0.224416389860021,
'AGG': 0.208564104515562,
'AGA': 0.481137590939125,
'CGG': 0.0392677130215486,
'CGA': 0.0676728924436203,
'CGU': 0.144572019635586,
'CGC': 0.0587856794445578,
'AAU': 0.589705127199784,
'AAC': 0.410294872800217,
'GAU': 0.65037901553924,
'GAC': 0.34962098446076,
'UGU': 0.629812614586062,
'UGC': 0.370187385413938,
'UGA': 0.303094329334787,
'UAG': 0.225736095965104,
'UAA': 0.471169574700109,
'CAG': 0.307418833439535,
'CAA': 0.692581166560465,
'GAG': 0.296739610207218,
'GAA': 0.703260389792782,
'GGG': 0.119057918187951,
'GGA': 0.215422869017838,
'GGU': 0.472217600813099,
'GGC': 0.193301611981112,
'CAU': 0.636710255236351,
'CAC': 0.363289744763649,
'AUA': 0.273331091899568,
'AUU': 0.462925823433014,
'AUC': 0.263743084667417,
'UUG': 0.286319859527146,
'UUA': 0.275534472444779,
'CUG': 0.110440170850593,
'CUA': 0.141277445174148,
'CUU': 0.129115062940288,
'CUC': 0.0573129890630467,
'AAG': 0.423936637198697,
'AAA': 0.576063362801303,
'AUG': 1,
'UUU': 0.586126603840976,
'UUC': 0.413873396159024,
'CCG': 0.120626895854398,
'CCA': 0.417143753704543,
'CCU': 0.307740315888567,
'CCC': 0.154489034552491,
'AGU': 0.159245398699046,
'AGC': 0.109749229743856,
'UCG': 0.0963590866114069,
'UCA': 0.210157220085731,
'UCU': 0.264456618519558,
'UCC': 0.160032446340401,
'ACG': 0.135583991997041,
'ACA': 0.302413913478422,
'ACU': 0.345237040780705,
'ACC': 0.216765053743832,
'UGG': 1,
'UAU': 0.559573963633711,
'UAC': 0.440426036366289,
'GUG': 0.190897642582249,
'GUA': 0.208783185960798,
'GUU': 0.391481704636128,
'GUC': 0.208837466820824}}
# Codon usage organized by organism, then amino acid
CODON_FREQ_BY_AA = {
'sc': {
'A': {'GCG': 0.109972396541529,
'GCA': 0.288596474496094,
'GCU': 0.377014739102356,
'GCC': 0.224416389860021},
'R': {'AGG': 0.208564104515562,
'AGA': 0.481137590939125,
'CGG': 0.0392677130215486,
'CGA': 0.0676728924436203,
'CGU': 0.144572019635586,
'CGC': 0.0587856794445578},
'N': {'AAU': 0.589705127199784,
'AAC': 0.410294872800217},
'D': {'GAU': 0.65037901553924,
'GAC': 0.34962098446076},
'C': {'UGU': 0.629812614586062,
'UGC': 0.370187385413938},
'*': {'UGA': 0.303094329334787,
'UAG': 0.225736095965104,
'UAA': 0.471169574700109},
'Q': {'CAG': 0.307418833439535,
'CAA': 0.692581166560465},
'E': {'GAG': 0.296739610207218,
'GAA': 0.703260389792782},
'G': {'GGG': 0.119057918187951,
'GGA': 0.215422869017838,
'GGU': 0.472217600813099,
'GGC': 0.193301611981112},
'H': {'CAU': 0.636710255236351,
'CAC': 0.363289744763649},
'I': {'AUA': 0.273331091899568,
'AUU': 0.462925823433014,
'AUC': 0.263743084667417},
'L': {'UUG': 0.286319859527146,
'UUA': 0.275534472444779,
'CUG': 0.110440170850593,
'CUA': 0.141277445174148,
'CUU': 0.129115062940288,
'CUC': 0.0573129890630467},
'K': {'AAG': 0.423936637198697,
'AAA': 0.576063362801303},
'M': {'AUG': 1},
'F': {'UUU': 0.586126603840976,
'UUC': 0.413873396159024},
'P': {'CCG': 0.120626895854398,
'CCA': 0.417143753704543,
'CCU': 0.307740315888567,
'CCC': 0.154489034552491},
'S': {'AGU': 0.159245398699046,
'AGC': 0.109749229743856,
'UCG': 0.0963590866114069,
'UCA': 0.210157220085731,
'UCU': 0.264456618519558,
'UCC': 0.160032446340401},
'T': {'ACG': 0.135583991997041,
'ACA': 0.302413913478422,
'ACU': 0.345237040780705,
'ACC': 0.216765053743832},
'W': {'UGG': 1},
'Y': {'UAU': 0.559573963633711,
'UAC': 0.440426036366289},
'V': {'GUG': 0.190897642582249,
'GUA': 0.208783185960798,
'GUU': 0.391481704636128,
'GUC': 0.208837466820824}}}
# Complete list of codons.
CODONS = {'AAA': 'K',
'AAC': 'N',
'AAG': 'K',
'AAU': 'N',
'ACA': 'T',
'ACC': 'T',
'ACG': 'T',
'ACU': 'T',
'AGA': 'R',
'AGC': 'S',
'AGG': 'R',
'AGU': 'S',
'AUA': 'I',
'AUC': 'I',
'AUG': 'M',
'AUU': 'I',
'CAA': 'Q',
'CAC': 'H',
'CAG': 'Q',
'CAU': 'H',
'CCA': 'P',
'CCC': 'P',
'CCG': 'P',
'CCU': 'P',
'CGA': 'R',
'CGC': 'R',
'CGG': 'R',
'CGU': 'R',
'CUA': 'L',
'CUC': 'L',
'CUG': 'L',
'CUU': 'L',
'GAA': 'E',
'GAC': 'D',
'GAG': 'E',
'GAU': 'D',
'GCA': 'A',
'GCC': 'A',
'GCG': 'A',
'GCU': 'A',
'GGA': 'G',
'GGC': 'G',
'GGG': 'G',
'GGU': 'G',
'GUA': 'V',
'GUC': 'V',
'GUG': 'V',
'GUU': 'V',
'UAA': '*',
'UAC': 'Y',
'UAG': '*',
'UAU': 'Y',
'UCA': 'S',
'UCC': 'S',
'UCG': 'S',
'UCU': 'S',
'UGA': '*',
'UGC': 'C',
'UGG': 'W',
'UGU': 'C',
'UUA': 'L',
'UUC': 'F',
'UUG': 'L',
'UUU': 'F'}
| StarcoderdataPython |
12852654 | <gh_stars>10-100
"""CLI handling for `routemaster`."""
import logging
import yaml
import click
import layer_loader
from routemaster.app import App
from routemaster.cron import CronThread
from routemaster.config import ConfigError, load_config
from routemaster.server import server
from routemaster.middleware import wrap_application
from routemaster.validation import ValidationError, validate_config
from routemaster.gunicorn_application import GunicornWSGIApplication
logger = logging.getLogger(__name__)
@click.group()
@click.option(
'-c',
'--config-file',
'config_files',
help="Path to the service config file.",
type=click.File(encoding='utf-8'),
required=True,
multiple=True,
)
@click.pass_context
def main(ctx, config_files):
"""Shared entrypoint configuration."""
logging.getLogger('schedule').setLevel(logging.CRITICAL)
config_data = layer_loader.load_files(
config_files,
loader=yaml.load,
)
try:
config = load_config(config_data)
except ConfigError:
logger.exception("Configuration Error")
click.get_current_context().exit(1)
ctx.obj = App(config)
_validate_config(ctx.obj)
@main.command()
@click.pass_context
def validate(ctx):
"""
Entrypoint for validation of configuration files.
Validation is done by the main handler in order to cover all code paths,
so this function is a stub so that `serve` does not have to be called.
"""
pass
@main.command()
@click.option(
'-b',
'--bind',
help="Bind address and port.",
type=str,
default='[::]:2017',
)
@click.option(
'--debug/--no-debug',
help="Enable debugging mode.",
default=False,
)
@click.option(
'--workers',
help="Number of gunicorn workers to run.",
type=int,
default=1,
)
@click.pass_context
def serve(ctx, bind, debug, workers): # pragma: no cover
"""Entrypoint for serving the Routemaster HTTP service."""
app = ctx.obj
server.config.app = app
if debug:
server.config['DEBUG'] = True
cron_thread = CronThread(app)
cron_thread.start()
wrapped_server = wrap_application(app, server)
def post_fork():
app.initialise()
app.logger.init_flask(server)
try:
instance = GunicornWSGIApplication(
wrapped_server,
bind=bind,
debug=debug,
workers=workers,
post_fork=post_fork,
)
instance.run()
finally:
cron_thread.stop()
def _validate_config(app: App):
try:
validate_config(app, app.config)
except ValidationError as e:
msg = f"Validation Error: {e}"
logger.exception(msg)
click.get_current_context().exit(1)
| StarcoderdataPython |
3421953 | <filename>doc/adjusted_MLE/tests/test_risk.py
import numpy as np, os, itertools
import pandas as pd
from rpy2 import robjects
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
import rpy2.robjects.pandas2ri
from rpy2.robjects.packages import importr
from .comparison_metrics import (sim_xy,
glmnet_lasso,
relative_risk)
from .risk_comparisons import risk_comparison
def output_file(n=200,
p=500,
rho=0.35,
s=5,
beta_type=1,
snr_values=np.array([0.10, 0.15, 0.20, 0.25, 0.30,
0.35, 0.42, 0.71, 1.22, 2.07]),
tuning_nonrand="lambda.1se",
tuning_rand="lambda.1se",
randomizing_scale=np.sqrt(0.50),
ndraw=50,
outpath = None):
df_risk = pd.DataFrame()
if n > p:
full_dispersion = True
else:
full_dispersion = False
snr_list = []
for snr in snr_values:
snr_list.append(snr)
relative_risk = np.squeeze(risk_comparison(n=n,
p=p,
nval=n,
rho=rho,
s=s,
beta_type=beta_type,
snr=snr,
randomizer_scale=randomizing_scale,
full_dispersion=full_dispersion,
tuning_nonrand =tuning_nonrand,
tuning_rand=tuning_rand, ndraw = ndraw))
df_risk = df_risk.append(pd.DataFrame(data=relative_risk.reshape((1, 6)), columns=['sel-MLE', 'ind-est', 'rand-LASSO',
'rel-rand-LASSO', 'rel-LASSO','LASSO']), ignore_index=True)
df_risk['n'] = n
df_risk['p'] = p
df_risk['s'] = s
df_risk['rho'] = rho
df_risk['beta-type'] = beta_type
df_risk['snr'] = pd.Series(np.asarray(snr_list))
df_risk['target'] = "selected"
if outpath is None:
outpath = os.path.dirname(__file__)
outfile_risk_csv = os.path.join(outpath, "dims_" + str(n) + "_" + str(p) + "_risk_betatype" + str(beta_type) + "_rho_" + str(rho) + ".csv")
outfile_risk_html = os.path.join(outpath, "dims_" + str(n) + "_" + str(p) + "_risk_betatype" + str(beta_type) + "_rho_" + str(rho) + ".html")
df_risk.to_csv(outfile_risk_csv, index=False)
df_risk.to_html(outfile_risk_html)
| StarcoderdataPython |
6683573 | #!/usr/bin/env python
import sys
import psycopg2
def execute(clause):
db = psycopg2.connect("dbname=news")
cursor = db.cursor()
cursor.execute(clause)
res = cursor.fetchall()
db.close()
return res
def get_top3_articles():
res = execute("""
SELECT * FROM article_log_view
ORDER BY views DESC
LIMIT 3;
""")
print "\n\nTop 3 articles with most views:"
print '%-35s %9s' % ('article', 'views')
print '\n'.join(['%-35s %9s' % x for x in res])
def get_sorted_authors():
res = execute("""
SELECT name, views FROM author_log_view, authors
WHERE author_log_view.author = authors.id
ORDER BY views desc;
""")
print "\n\nAuthors list sorted by views:"
print '%-35s %9s' % ('author', 'views')
print '\n'.join(['%-35s %9s' % x for x in res])
def get_error_days():
res = execute("""
SELECT day_total_view.day,
ROUND(error::numeric/total::numeric, 2) AS error_rate
FROM day_total_view, day_error_view
WHERE (day_total_view.day = day_error_view.day)
AND (error > total * 0.01);
""")
print "\n\nDays when more than 1% of requests led to errors:"
print '%-35s %9s' % ('day', 'error rate')
print '\n'.join(['%-35s %9s' % x for x in res])
if __name__ == '__main__':
for type in sys.argv:
if type == 'article':
get_top3_articles()
elif type == 'author':
get_sorted_authors()
elif type == 'error':
get_error_days()
| StarcoderdataPython |
4811921 | # 1. Reebok is designing a new type of Crossfit shoe, the Nano X. The fixed cost for the
# production will be $24,000. The variable cost will be $36 per pair of shoes. The shoes will
# sell for $107 for each pair. Using Python, graph the cost and revenue functions and
# determine how many pairs of sneakers will have to be sold for the company to break even on
# this new line of shoes.
import matplotlib.pyplot as plt
import math
# Expenses
# y = 36x + 24000
exp_slope = 36
exp_int = 24000
exp_x0 = 0
exp_y0 = exp_slope * exp_x0 + exp_int
exp_x1 = 1000
exp_y1 = exp_slope * exp_x1 + exp_int
# Revenue
# y = 107x
rev_slope = 107
rev_int = 0
rev_x0 = 0
rev_y0 = rev_slope * rev_x0 + rev_int
rev_x1 = 1000
rev_y1 = rev_slope * rev_x1 + rev_int
# Breakeven
# 107x = 36x + 24000
# 71x = 24000
# x = 338.028
# y = 107(338.028) = 36169.014
be_x = 24000 / 71
be_y = 107 * be_x
# Plot the lines
fig, shoe = plt.subplots()
shoe.scatter([exp_x0, exp_x1],
[exp_y0, exp_y1],
c = 'r')
shoe.plot([exp_x0, exp_x1],
[exp_y0, exp_y1],
c = 'r', alpha = 0.3)
shoe.scatter([rev_x0, rev_x1],
[rev_y0, rev_y1],
c = 'g')
shoe.plot([rev_x0, rev_x1],
[rev_y0, rev_y1],
c = 'g', alpha = 0.3)
shoe.scatter([be_x],
[be_y],
c = 'b', s = 100)
plt.xlim(0, 750)
plt.ylim(0, 75000)
plt.show()
print("To break even, Reebok must sell",
math.ceil(be_x), "shoes.")
# 2. Nicole invests a total of $17,500 in three products. She invests one part in a mutual fund
# which has an annual return of 11%. She invests the second part in government bonds at 7%
# per year. The third part she puts in CDs at 5% per year. She invests twice as much in the
# mutual fund as in the CDs. In the first year Nicole's investments bring a total return of $1495.
# How much did she invest in each product?
import numpy as np
from numpy.linalg import inv
import matplotlib.pyplot as plt
# x + y + z = 17500
# 0.11x + 0.07y + 0.05z = 1495
# x - 2z = 0
a = inv(np.matrix('1 1 1; 11 7 5; 1 0 -2'))
b = np.array([17500, 149500, 0])
res = a.dot(b)
print("mutual funds=", res[0, 0],
"gov't bonds =", res[0, 1],
"CDs =", res[0, 2])
labels = 'Mut Funds', "Gov't Bonds", 'CDs'
sizes = [res[0, 0], res[0, 1], res[0, 2]]
colors = ['lightskyblue', 'pink', 'yellowgreen']
plt.pie(sizes, labels = labels, colors = colors,
autopct = '%1.1f%%', startangle = 140)
plt.axis('equal')
plt.show()
# 3. A company has 252 sales reps, each to be assigned to one of four marketing teams. If the first
# team is to have three times as many members as the second team and the third team is to
# have twice as many members as the fourth team, how can the members be distributed among
# the teams?
import pandas as pd
# w + x + y + z = 252
# w = 3x
# y = 2z
# 3x + x + 2z + z = 252
# 4x + 3z = 252
# 4x = 252 - 3z
# x = 63 - 3/4z
# w = 3 * (63 - 3/4z)
# w = 189 - 9/4z
res = []
for z in range(253):
z = float(z)
x = float(63 - 3 * z / 4)
y = float(2 * z)
w = float(189 - 9 * z / 4)
a,b = False,False
if (w > 0) & (x > 0) & (y > 0) & (z > 0):
a = True
if (w.is_integer()) & (x.is_integer()) & (y.is_integer()) & (z.is_integer()):
b = True
if a & b: res.append([w, x, y, z])
teams = ['team1', 'team2', 'team3', 'team4']
print(pd.DataFrame(res, columns = teams))
pd.DataFrame(res, columns = teams).plot(kind = 'bar', stacked = True)
# 4. A company makes three types of artisanal chocolate bars: cherry, almond, and raisin. Matrix
# A gives the amount of ingredients in one batch. Matrix B gives the costs of ingredients from
# suppliers J and K. Using Python, calculate the cost of 100 batches of each candy using
# ingredients from supplier K.
import numpy as np
a = np.matrix('6 8 1; 6 4 1; 5 7 1')
b = np.matrix('4 3; 4 5; 2 2')
batch = a.dot(b)
print("100 cherry =", batch[0, 1] * 100,
"100 almond =", batch[1, 1] * 100,
"100 raisin =", batch[2, 1] * 100)
# 5. Welsh-Ryan Arena seats 15,000 people. Courtside seats cost $8, first level seats cost $6, and
# upper deck seats cost $4. The total revenue for a sellout is $76,000. If half the courtside seats,
# half the upper deck seats, and all the first level seats are sold, then the total revenue is
# $44,000. How many of each type of seat are there?
import numpy as np
from numpy.linalg import inv
# x + y + z = 15000
# 8x + 6y + 4z = 76000
# 0.5(8x + 4z) + 6y = 44000
# 4x + 6y + 2z = 44000
a = inv(np.matrix('1 1 1; 8 6 4; 4 6 2'))
b = np.array([15000, 76000, 44000])
res = a.dot(b)
print("courtside =", res[0, 0],
"first level=", res[0, 1],
"upper deck =", res[0, 2])
# 6. Due to new environmental restrictions, a chemical company must use a new process to
# reduce pollution. The old process emits 6 g of Sulphur and 3 g of lead per liter of chemical
# made. The new process emits 2 g of Sulphur and 4 g of lead per liter of chemical made. The
# company makes a profit of 25¢ per liter under the old process and 16¢ per liter under the new
# process. No more than 18,000 g of Sulphur and no more than 12,000 g of lead can be emitted
# daily. How many liters of chemicals should be made daily under each process to maximize
# profits? What is the maximum profit?
from scipy.optimize import linprog as lp
import numpy as np
# maximize: 0.25x + 0.16y
# subject to:
# 6x + 2y <= 18000
# 3x + 4y <= 12000
# x, y >= 0
A = np.array([[6, 2], [3, 4]])
b = np.array([18000, 12000])
liters = lp(np.array([-0.25, -0.16]), A, b)
print("old method=", round(liters.x[0], 2), "liters.",
"new method=", round(liters.x[1], 2), "liters.")
print("Max daily profit=",
round(0.25 * liters.x[0] + 0.16 * liters.x[1], 2))
# 7. Northwestern is looking to hire teachers and TA’s to fill its staffing needs for its summer
# program at minimum cost. The average monthly salary of a teacher is $2400 and the average
# monthly salary of a TA is $1100. The program can accommodate up to 45 staff members and
# needs at least 30 to run properly. They must have at least 10 TA’s and may have up to 3 TA’s
# for every 2 teachers. Using Python, find how many teachers and TA’s the program should
# hire to minimize costs. What is the minimum cost?
from scipy.optimize import linprog as lp
import numpy as np
# minimize: 2400x + 1100y
# subject to:
# x + y <= 45
# x + y >= 30
# y >= 10
# 2y <= 3x
# x, y >= 0
A = np.array([[-1, -1], [-3, 2]])
b = np.array([-30, 0])
x_bounds = (0, 45)
y_bounds = (10, 45)
hire = lp(np.array([2400, 1100]), A, b,
bounds = (x_bounds, y_bounds))
print("Hire", hire.x[0], "teachers.",
"Hire", hire.x[1], "TAs.")
print("Minimum cost=",
2400 * hire.x[0] + 1100 * hire.x[1])
# 8. To be at his best as a teacher, Roger needs at least 10 units of vitamin A, 12 units of vitamin
# B, and 20 units of vitamin C per day. Pill #1 contains 4 units of A and 3 of B. Pill #2 contains
# 1 unit of A, 2 of B, and 4 of C. Pill #3 contains 10 units of A, 1 of B, and 5 of C. Pill #1 costs
# 6 cents, pill #2 costs 8 cents, and pill #3 costs 1 cent. How many of each pill must Roger take
# to minimize his cost, and what is that cost?
from scipy.optimize import linprog as lp
import numpy as np
# minimize: 0.06x + 0.08y + 0.01z
# subject to:
# 4x + y + 10z >= 10
# 3x + 2y + z >= 12
# 4y + 5z >= 20
# x, y, z >= 0
A = np.array([[-4, -1, -10], [-3, -2, -1], [0, -4, -5]])
b = np.array([-10, -12, -20])
pills = lp(np.array([0.06, 0.08, 0.01]), A, b)
print("Pill #1=", pills.x[0],
"Pill #2=", pills.x[1],
"Pill #3=", pills.x[2],)
print("Minimum cost=",
0.06 * pills.x[0] + 0.08 * pills.x[1] + 0.01 * pills.x[2])
# 9. An electronics store stocks high-end DVD players, surround sound systems, and televisions.
# They have limited storage space and can stock a maximum of 210 of these three machines.
# They know from past experience that they should stock twice as many DVD players as stereo
# systems and at least 30 television sets. If each DVD player sells for $450, each surround
# sound system sells for $2000, and each television sells for $750, how many of each should be
# stocked and sold for maximum revenues? What is the maximum revenue?
from scipy.optimize import linprog as lp
import numpy as np
# maximize: 450x + 2000y + 750z
# subject to:
# x + y + z <= 210
# x >= 2y
# z >= 30
# x, y, z >= 0
A = np.array([[1, 1, 1], [-1, 2, 0], [0, 0, -1]])
b = np.array([210, 0, -30])
units = lp(np.array([-450, -2000, -750]), A, b)
print("DVDs =", units.x[0],
"SS Systems=", units.x[1],
"TVs =", units.x[2],)
print("Maximum revenue=",
450 * units.x[0] + 2000 * units.x[1] + 750 * units.x[2])
# 10. A fast-food company is conducting a sweepstakes, and ships two boxes of game pieces to a
# particular franchise. Box A has 4% of its contents being winners, while 5% of the contents of
# box B are winners. Box A contains 27% of the total tickets. The contents of both boxes are
# mixed in a drawer and a ticket is chosen at random. Using Python, find the probability it
# came from box A if it is a winner.
is_box_a = 0.27
box_a_win = 0.04
box_b_win = 0.05
a = is_box_a * box_a_win
b = (1 - is_box_a) * box_b_win
prob = a / (a + b)
print("Probability that winner came from Box A is",
round(prob * 100, 3), "%.")
# Heatmap of probabilities for drawing random card
import pandas as pd
import seaborn as sns
a2 = is_box_a * (1 - box_a_win)
b2 = (1 - is_box_a) * (1 - box_b_win)
prob2 = a / (a + b + a2 + b2)
prob3 = b / (a + b + a2 + b2)
prob4 = a2 / (a + b + a2 + b2)
prob5 = b2 / (a + b + a2 + b2)
df = pd.DataFrame([[prob2, prob4], [prob3, prob5]],
index = ['Box A', 'Box B'],
columns = ['Winner', 'Loser'])
sns.heatmap(df)
| StarcoderdataPython |
157465 |
from server.worker import celery
from server.main.models import VirtualMachineModel
@celery.task()
def task_vmspawning(id):
vmachine = VirtualMachineModel.get(id)
vmachine.spawn(
dhcp=False,
on_running=running_vmcreate_callback,
on_success=success_vmcreate_callback,
on_error=error_vmcreate_callback
)
return vmachine.name
@celery.task()
def task_vmdestroying(id):
vmachine = VirtualMachineModel.get(id)
vmachine.destroy(
on_success=success_vmdestroy_callback,
on_running=running_vmdestroy_callback,
on_error=error_vmdestroy_callback
)
# # # # # #
def running_vmcreate_callback(task, *args, **kwargs):
progress = task.info.progress
if task.info.progress is None:
progress = 100
print(f"VM <{kwargs.get('vmname')}> Creation Progress: {progress}")
def success_vmcreate_callback(task, *args, **kwargs):
print(f"VM <{kwargs.get('vmname')}> Creation Success: {task.info.result}")
vm = task.info.result
vmachine = VirtualMachineModel.query.filter_by(name=vm.name).first()
vmachine.uuid = vm.summary.config.uuid
vmachine.state = vm.summary.runtime.powerState
for each in vm.summary.vm.guest.disk:
vmachine.capacity = each.capacity/1024/1024/1024
vmachine.save()
def error_vmcreate_callback(task, *args, **kwargs):
print(f"VM <{kwargs.get('vmname')}> Error Cause: {task.info.result}")
# # # # # #
def running_vmdestroy_callback(task, *args, **kwargs):
progress = task.info.progress
if task.info.progress is None:
progress = 100
print(f"VM <{kwargs.get('vmname')}> Deletion Progress: {progress}")
def success_vmdestroy_callback(task, *args, **kwargs):
print(f"Success Callback (VM Deletion): {kwargs.get('vmname')}")
vmachine = VirtualMachineModel.query.filter_by(name=kwargs.get("vmname")).first()
vmachine.delete()
def error_vmdestroy_callback(task, *args, **kwargs):
print(f"Error Callback (VM Deletion): {task.info.error}")
# # # # # #
def make_task(action, vmid):
assert action.lower() in ["create", "delete"], f"Action not accepted ({action.upper()})"
if action.lower() == "create":
task = task_vmspawning.delay(id=vmid)
elif action.lower() == "delete":
task = task_vmdestroying.delay(id=vmid)
return task
def get_task(action, task_id):
assert action.lower() in ["create", "delete"], f"Action not accepted ({action.upper()})"
if action.lower() == "create":
task = task_vmspawning.AsyncResult(task_id)
elif action.lower() == "delete":
task = task_vmdestroying.AsyncResult(task_id)
return task | StarcoderdataPython |
1809544 | <gh_stars>0
import json
from project_manager_pro._meta import cache_commands
from colorama import init, Style, Fore, Back
init(autoreset=True)
def _acmd(alias, body):
file = open(cache_commands, 'r', encoding='utf-8')
commands = json.load(file)
commands[alias] = body
print(Fore.GREEN + '' + alias + ' \"' + commands[alias] + '\"' + ' command added')
with open(cache_commands, 'w', encoding='utf-8') as f:
f.write(json.dumps(commands, ensure_ascii=False))
| StarcoderdataPython |
8078283 | import xml.etree.ElementTree as xml_tree
import pandas as pd
import numpy as np
import os
import shutil
def check_box(path):
files = os.listdir(path)
i = 0
for anna_file in files:
tree = xml_tree.parse(path+"/"+anna_file)
root = tree.getroot()
# Image shape.
size = root.find('size')
shape = [int(size.find('height').text),
int(size.find('width').text),
int(size.find('depth').text)]
# Find annotations.
bboxes = []
labels = []
labels_text = []
difficult = []
truncated = []
for obj in root.findall('object'):
# label = obj.find('name').text
# labels.append(int(dataset_common.VOC_LABELS[label][0]))
# # labels_text.append(label.encode('ascii'))
# labels_text.append(label.encode('utf-8'))
# isdifficult = obj.find('difficult')
# if isdifficult is not None:
# difficult.append(int(isdifficult.text))
# else:
# difficult.append(0)
# istruncated = obj.find('truncated')
# if istruncated is not None:
# truncated.append(int(istruncated.text))
# else:
# truncated.append(0)
bbox = obj.find('bndbox')
# bboxes.append((float(bbox.find('ymin').text) / shape[0],
# float(bbox.find('xmin').text) / shape[1],
# float(bbox.find('ymax').text) / shape[0],
# float(bbox.find('xmax').text) / shape[1]
# ))
if (float(bbox.find('ymin').text) >= float(bbox.find('ymax').text)) or (float(bbox.find('xmin').text) >= float(bbox.find('xmax').text)):
print(anna_file)
i += 1
try:
shutil.move(path+"/"+anna_file,"./error2/"+anna_file)
shutil.move("./myData/JPEGImages/"+anna_file.split(".")[0]+".jpg","./error2/"+anna_file.split(".")[0]+".jpg")
except:
pass
print(i)
if __name__ == "__main__":
check_box("/home/ambavm/make/yolov5/datasets/score/images/Annotations") | StarcoderdataPython |
8026276 | from argparse import Namespace
from maskrcnn.train import run
args = Namespace()
args.comment = 'MXInfer'
args.config = ['main', 'mexico', 'infer']
args.mode = ['infer']
args.resume_run = 'run_01_PretrainPool'
args.no_cuda = False
args.cuda_max_devices = 1
run(args)
| StarcoderdataPython |
299799 | <filename>dqn/policies/categorical_mlp_q_policy.py
from rllab.core.lasagne_powered import LasagnePowered
import lasagne.layers as L
from rllab.core.network import MLP
from rllab.distributions.categorical import Categorical
from rllab.policies.base import Policy
from rllab.misc import tensor_utils
from rllab.spaces.discrete import Discrete
from rllab.core.serializable import Serializable
from rllab.misc import ext
from rllab.misc import logger
from rllab.misc.overrides import overrides
import numpy as np
import lasagne.nonlinearities as NL
class CategoricalMlpQPolicy(Policy, LasagnePowered, Serializable):
def __init__(
self,
name,
env_spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=NL.tanh,
num_seq_inputs=1,
):
"""
:param env_spec: A spec for the mdp.
:param hidden_sizes: list of sizes for the fully connected hidden layers
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:param prob_network: manually specified network for this policy, other network params
are ignored
:return:
"""
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Discrete)
self._env_spec = env_spec
# print( env_spec.observation_space.shape )
q_network = MLP(
input_shape=(env_spec.observation_space.flat_dim * num_seq_inputs,),
output_dim=env_spec.action_space.n,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=NL.linear,
name=name
)
self._l_q = q_network.output_layer
self._l_obs = q_network.input_layer
self._f_q = ext.compile_function(
[q_network.input_layer.input_var],
L.get_output(q_network.output_layer)
)
self._dist = Categorical(env_spec.action_space.n)
super(CategoricalMlpQPolicy, self).__init__(env_spec)
LasagnePowered.__init__(self, [q_network.output_layer])
@property
def vectorized(self):
return True
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
q = self._f_q([flat_obs])[0]
action = np.argmax(q)
return action, dict(q=q)
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
q = self._f_q(flat_obs)
actions = np.argmax(q, axis=1)
return actions, dict(q=q)
def get_qval_sym(self, obs_var, a_var):
q_vals_sym = L.get_output(self._l_q,
{self._l_obs: obs_var})
return (q_vals_sym * a_var).sum(axis=1)
def get_qval_sym_test(self, obs_var, a_var):
q_vals_sym = L.get_output(self._l_q,
{self._l_obs: obs_var})
return [q_vals_sym, a_var, q_vals_sym * a_var, (q_vals_sym * a_var).sum(axis=1)]
| StarcoderdataPython |
9674700 | <reponame>LabRobPL/robotics-toolbox-python
import numpy as np
from spatialmath import base
def jacobian_numerical(f, x, dx=1e-8, N=0):
r"""
Numerically compute Jacobian of function
:param f: the function, returns an m-vector
:type f: callable
:param x: function argument
:type x: ndarray(n)
:param dx: the numerical perturbation, defaults to 1e-8
:type dx: float, optional
:param N: function returns SE(N) matrix, defaults to 0
:type N: int, optional
:return: Jacobian matrix
:rtype: ndarray(m,n)
Computes a numerical approximation to the Jacobian for ``f(x)`` where
:math:`f: \mathbb{R}^n \mapsto \mathbb{R}^m`.
Uses first-order difference :math:`J[:,i] = (f(x + dx) - f(x)) / dx`.
If ``N`` is 2 or 3, then it is assumed that the function returns
an SE(N) matrix which is converted into a Jacobian column comprising the
translational Jacobian followed by the rotational Jacobian.
"""
Jcol = []
J0 = f(x)
I = np.eye(len(x))
f0 = f(x)
for i in range(len(x)):
fi = f(x + I[:,i] * dx)
Ji = (fi - f0) / dx
if N > 0:
t = Ji[:N,N]
r = base.vex(Ji[:N,:N] @ J0[:N,:N].T)
Ji = np.r_[t, r]
Jcol.append(Ji)
return np.c_[Jcol].T
def hessian_numerical(J, x, dx=1e-8):
r"""
Numerically compute Hessian of Jacobian function
:param J: the Jacobian function, returns an ndarray(m,n)
:type J: callable
:param x: function argument
:type x: ndarray(n)
:param dx: the numerical perturbation, defaults to 1e-8
:type dx: float, optional
:return: Hessian matrix
:rtype: ndarray(m,n,n)
Computes a numerical approximation to the Hessian for ``J(x)`` where
:math:`f: \mathbb{R}^n \mapsto \mathbb{R}^{m \times n}`
Uses first-order difference :math:`H[:,:,i] = (J(x + dx) - J(x)) / dx`.
"""
I = np.eye(len(x))
Hcol = []
J0 = J(x)
for i in range(len(x)):
Ji = J(x + I[:,i] * dx)
Hi = (Ji - J0) / dx
Hcol.append(Hi)
return np.stack(Hcol, axis=2)
if __name__ == "__main__":
import roboticstoolbox as rtb
np.set_printoptions(linewidth=120, formatter={'float': lambda x: f"{x:8.4g}" if abs(x) > 1e-10 else f"{0:8.4g}"})
robot = rtb.models.DH.Puma560()
q = robot.qn
J = jacobian_numerical(lambda q: robot.fkine(q).A, q, N=3)
print(J)
print(robot.jacob0(q))
H = hessian_numerical(robot.jacob0, q)
print(H)
print(robot.ets().hessian0(q)) | StarcoderdataPython |
4883902 | <reponame>ayroti-18/Competitive-Programming<gh_stars>1-10
#link https://practice.geeksforgeeks.org/problems/union-of-two-arrays/0#
from collections import defaultdict
for _ in range(int(input())):
n, m = map(int, input().split())
narr = list(map(int, input().split()))
marr = list(map(int, input().split()))
ndic = defaultdict(int)
c = 0
for i in narr:
if ndic[i] != 1:
ndic[i] = 1
c += 1
for j in marr:
if ndic[j] != 1:
ndic[j] = 1
c += 1
print(c)
| StarcoderdataPython |
398498 | import logging
def monitor(app, store_event):
state = app.events.State()
def announce_failed_tasks(event):
state.event(event)
# task name is sent only with -received event, and state
# will keep track of this for us.
task = state.tasks.get(event['uuid'])
store_event({'type':'task_failed', 'data': event})
def worker_online(*args):
logging.info('Worker Online %s', args)
store_event(args, event="worker_online")
def worker_offline(*args):
logging.warn('Worker Offline %s', args)
store_event(args, event="worker_offline")
def worker_heartbeat(*args):
logging.debug("Worker HeartBeat %s", args)
store_event(args, event="worker_hb")
def task_received(params):
"""uuid, name, args, kwargs, retries, eta, hostname, timestamp, root_id, parent_id"""
#print("Task Received", params)
store_event(params)
def task_succeeded(params):
"""uuid, result, runtime, hostname, timestamp"""
store_event(params)
#print("Task Success", params)
def task_retried(params):
"""(uuid, exception, traceback, hostname, timestamp)"""
store_event(params)
#print("Task RETRIED!!!!", params)
with app.connection() as connection:
recv = app.events.Receiver(connection, handlers={
'task-failed': announce_failed_tasks,
'worker-online': worker_online,
'worker-offline': worker_offline,
'worker-heartbeat': worker_heartbeat,
'task-received': task_received,
'task-sent': task_received,
'task-succeeded': task_succeeded,
'task-retried': task_retried,
'*': state.event,
})
recv.capture(limit=None, timeout=None, wakeup=True)
| StarcoderdataPython |
213334 | <reponame>adrianurdar/Automate-The-Boring-Stuff
# madLibs.py - reads in text files and lets the user add their own text anywhere the word
# ADJECTIVE, NOUN, ADVERB, or VERB appears in the text file
# Usage: madLibs.py
import re
from pathlib import Path
import sys
def madLib():
# Ask user to name the file he wants to read
fileName = input("Name the file you want to read: ")
# Check if the filename exists
try:
# Open the file
fileInput = open(fileName + ".txt")
except FileNotFoundError:
print(fileName + ".txt does not exist.")
sys.exit()
# Read the content of the file
fileContent = fileInput.read()
# Search for ADJECTIVE in the text and replace them
fileContent = re.sub(r'ADJECTIVE', input('Enter an adjective:\n'), fileContent)
# Search for NOUN in the text and replace them
fileContent = re.sub(r'NOUN', input('Enter a noun:\n'), fileContent)
# Search for VERB in the text and replace them
fileContent = re.sub(r'VERB', input('Enter a verb:\n'), fileContent)
# Search for ADVERB in the text and replace them
fileContent = re.sub(r'ADVERB', input('Enter an adverb:\n'), fileContent)
# Save the corrected text in a file
fileName = input("Save file as: ")
fileOutput = open(fileName + '.txt', 'w')
fileOutput.write(fileContent)
# Print that file to the terminal
print(fileContent)
if __name__ == "__main__":
madLib()
| StarcoderdataPython |
1624557 | # Copyright (c) 2014 University of California, Davis
#
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# read from inconsistent_articulation.txt
f_incon = open("inconsist_articulation.txt","r")
# get the inconsistent articulation
incon_list = []
incon_art = f_incon.readline()
temp_list = incon_art[1:-3].split(" ")
art_type = temp_list[0]
incon_tuple = (temp_list[1],temp_list[2])
incon_list.append(incon_tuple)
print "art_type = ", art_type
print "incon_list = ", incon_list
# get the explanation
explan_list_equals = []
explan_list_overlaps = []
explan_list_includes = []
for line in f_incon:
explan_art = line[1:-2].split(" ")
if explan_art[0] == "equals":
explan_list_equals.append(explan_art[1]+explan_art[2])
explan_list_equals.append(explan_art[2]+explan_art[1])
elif explan_art[0] == "overlaps":
explan_list_overlaps.append((explan_art[1],explan_art[2]))
explan_list_overlaps.append((explan_art[2],explan_art[1]))
elif explan_art[0] == "includes":
explan_list_includes.append((explan_art[1],explan_art[2]))
explan_list_includes.append((explan_art[2],explan_art[1]))
print "explan_list_equals = ", explan_list_equals
print "explan_list_overlaps = ",explan_list_overlaps
print "explan_list_includes = ",explan_list_includes
f_incon.close()
| StarcoderdataPython |
6605164 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from openstackclient.identity.v3 import credential
from openstackclient.tests.identity.v3 import fakes as identity_fakes
from openstackclient.tests import utils
class TestCredential(identity_fakes.TestIdentityv3):
data = {
"access": "abc123",
"secret": "hidden-message",
"trust_id": None
}
def __init__(self, *args):
super(TestCredential, self).__init__(*args)
self.json_data = json.dumps(self.data)
def setUp(self):
super(TestCredential, self).setUp()
# Get a shortcut to the CredentialManager Mock
self.credentials_mock = self.app.client_manager.identity.credentials
self.credentials_mock.reset_mock()
# Get a shortcut to the UserManager Mock
self.users_mock = self.app.client_manager.identity.users
self.users_mock.reset_mock()
# Get a shortcut to the ProjectManager Mock
self.projects_mock = self.app.client_manager.identity.projects
self.projects_mock.reset_mock()
class TestCredentialSet(TestCredential):
def setUp(self):
super(TestCredentialSet, self).setUp()
self.cmd = credential.SetCredential(self.app, None)
def test_credential_set_no_options(self):
arglist = [
identity_fakes.credential_id,
]
self.assertRaises(utils.ParserException,
self.check_parser, self.cmd, arglist, [])
def test_credential_set_missing_user(self):
arglist = [
'--type', 'ec2',
'--data', self.json_data,
identity_fakes.credential_id,
]
self.assertRaises(utils.ParserException,
self.check_parser, self.cmd, arglist, [])
def test_credential_set_missing_type(self):
arglist = [
'--user', identity_fakes.user_name,
'--data', self.json_data,
identity_fakes.credential_id,
]
self.assertRaises(utils.ParserException,
self.check_parser, self.cmd, arglist, [])
def test_credential_set_missing_data(self):
arglist = [
'--user', identity_fakes.user_name,
'--type', 'ec2',
identity_fakes.credential_id,
]
self.assertRaises(utils.ParserException,
self.check_parser, self.cmd, arglist, [])
def test_credential_set_valid(self):
arglist = [
'--user', identity_fakes.user_name,
'--type', 'ec2',
'--data', self.json_data,
identity_fakes.credential_id,
]
parsed_args = self.check_parser(self.cmd, arglist, [])
result = self.cmd.take_action(parsed_args)
self.assertIsNone(result)
def test_credential_set_valid_with_project(self):
arglist = [
'--user', identity_fakes.user_name,
'--type', 'ec2',
'--data', self.json_data,
'--project', identity_fakes.project_name,
identity_fakes.credential_id,
]
parsed_args = self.check_parser(self.cmd, arglist, [])
result = self.cmd.take_action(parsed_args)
self.assertIsNone(result)
| StarcoderdataPython |
1670590 | <filename>src/Latis.py
import io # Nécessaire car le fichier est encodé en 'utf-16-le'
import xmltodict # Pour lire facilement le fichier xml
import struct
import binascii
def nettoyage(texte: str):
# Le fishier ne respecte pas totalement la norme xml
# La première partie (qui nous intéresse) est valide
# On va donc l'extraire
pos = texte.find("</BLOC-COURBES>")
return texte[0:pos + 15]
def decoupe(table):
# Découpe des chaînes de caractères en mots de 16 caractères
pass
def conversion(table):
# Renvoie une nouvelle table avec des float au lieu de str
# struct.unpack('d', binascii.unhexify("3F5205BC01A36E2F"))
pass
def extract(path: str):
# En théorie retourne une liste de listes de liste de valeurs :
# Chaque courbe latis pro contient deux listes de valeurs, une pour x et une pour y
# Il y a potentiellement plusieurs courbes dans le fichier
with io.open(path, 'r', encoding='utf-16-le') as file: # Chargement du fichier
content = nettoyage(file.read())
doc = xmltodict.parse(content)
nb_courbes = int(doc["BLOC-COURBES"]["LESCOURBES"]["C"]["@Nb"])
table_string = []
# Récupération des chaines hexadécimales
for i in range(nb_courbes):
table_string.append([])
if doc["BLOC-COURBES"]["LESCOURBES"]["C"]["C" + str(i)]["DATAX"]["DonneesX"]["DONNEES"]:
table_string[i].append(
doc["BLOC-COURBES"]["LESCOURBES"]["C"]["C" + str(i)]["DATAX"]["DonneesX"]["DONNEES"])
table_string[i].append(doc["BLOC-COURBES"]["LESCOURBES"]["C"]["C" + str(i)]["DATAY"]["DonneesY"]["DONNEES"])
table_float = []
for i in range(len(table_string)):
table_float.append([])
for j in range(len(table_string[i])):
table_float[i].append([])
for k in range(int(len(table_string[i][j]) / 16)):
table_float[i][j].append(
struct.unpack('d', binascii.unhexlify(table_string[i][j][k * 16:(k + 1) * 16])))
return table_float
| StarcoderdataPython |
8179937 | import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import scipy.interpolate
import scipy.signal
import scipy.spatial
import scipy.stats
import sys
# %% Load data
case = int(sys.argv[1])
print("Case " + str(case))
suffix = '_uphavg'
basedata = np.load('/home/nc1472/git/qg-edgeofchaos/poincare_input/case{}_poincare_config_fd_smooth_uphavg.npz'.format(case))
qbar = basedata['qbar']
uy = basedata['uy']
nx = 2048
x = np.linspace(-np.pi, np.pi, num=nx, endpoint=False)
# Set up interpolation functions
pad = 4
xp = np.zeros(nx+2*pad)
xp[pad:-pad] = x
xp[:pad] = x[-pad:] - 2*np.pi
xp[-pad:] = x[:pad] + 2*np.pi
def circularInterpolant(vec):
vecp = np.zeros(nx+2*pad)
vecp[pad:-pad] = vec
vecp[:pad] = vec[-pad:]
vecp[-pad:] = vec[:pad]
return scipy.interpolate.interp1d(xp, vecp, kind='quadratic')
uyfft = np.fft.rfft(uy)
hilbuy = np.fft.irfft(1j*uyfft)
hilbuyf = circularInterpolant(hilbuy)
uyf = circularInterpolant(uy)
# Compute regions of zonal flow minima and maxima
uyminxs = x[scipy.signal.argrelextrema(uy, np.less)]
uymaxxs = x[scipy.signal.argrelextrema(uy, np.greater)]
# Set up function for computing correlation dimension
def fit_slope(lind, rind, psorted, bounds):
lbound = bounds[lind]
ubound = bounds[rind]
sampinds = np.array(list(map(lambda x: int(np.round(x)), np.geomspace(lbound, ubound, num=256))), dtype=int)
result = scipy.stats.linregress(np.log(psorted[sampinds-1]), np.log(ncorr[sampinds-1]))
return result
# Set up result arrays
nparticles = 127
allstdresids = np.zeros((nparticles, 257))
allranresids = np.zeros((nparticles, 257))
allcorrdims = np.zeros((nparticles, 257))
allxavgs = np.zeros((nparticles, 257))
allxstds = np.zeros((nparticles, 257))
allrotnums = np.zeros((nparticles, 257))
for ind in range(257):
print(ind)
data = np.load('/data/nc1472/qg-edgeofchaos/extra_poincare_sections/case{}_section_ind{:03d}{}.npz'.format(case, ind, suffix), 'r')
z0 = data['y'][:,0]
yclip = data['yclip']
yorig = data['y']
nparticles = len(z0)//2
colors = np.zeros((nparticles, yclip.shape[1]))
rotation_number = (data['y'][nparticles:,-1] - data['y'][nparticles:,0]) / data['y'].shape[1] / 2 / np.pi
xavg = np.average(data['y'][:nparticles,:], axis=1)
xstd = np.std(data['y'][:nparticles,:], axis=1)
# Compute "mixing lengths"
stdresid = np.zeros(nparticles)
rangeresid = np.zeros(nparticles)
for i in range(nparticles):
xall = data['y'][i,:] - xavg[i]
nvar = 16
ymat = np.zeros((nvar, len(xall)-nvar))
xmat = np.zeros((nvar, len(xall)-nvar))
for j in range(nvar):
if j == 0:
ymat[j,:] = xall[nvar-j:]
else:
ymat[j,:] = xall[nvar-j:-j]
xmat[j,:] = xall[nvar-j-1:-(j+1)]
amat = ymat @ np.linalg.pinv(xmat)
residuals = ymat - (amat @ xmat)
stdresid[i] = np.sqrt(np.average(np.abs(residuals[0,:])**2))
rangeresid[i] = np.max(residuals[0,:]) - np.min(residuals[0,:])
allstdresids[:,ind] = stdresid
allranresids[:,ind] = rangeresid
# Compute correlation dimensions
corrdim = np.zeros(nparticles)
for i in range(nparticles):
sx = np.mod(yorig[i+nparticles,:], 2*np.pi)
sy = yorig[i,:]
sxd = np.mod(scipy.spatial.distance.pdist(np.array([sx]).T)+np.pi, 2*np.pi)-np.pi
syd = scipy.spatial.distance.pdist(np.array([sy]).T)
pdists = np.sqrt(sxd**2 + syd**2)
psorted = np.sort(pdists)
ncorr = np.arange(1, len(psorted)+1)
bounds = list(map(lambda x: int(np.round(x)), np.geomspace(16, len(psorted+1), 32)))
rsq = []
slope = []
lind = 0
rind = len(bounds)-1
result = fit_slope(lind, rind, psorted, bounds)
rsq.append(result.rvalue**2)
slope.append(result.slope)
while rsq[-1] < 0.999 and (rind-lind)>16:
resultl = fit_slope(lind+1, rind, psorted, bounds)
resultr = fit_slope(lind, rind-1, psorted, bounds)
if resultl.rvalue**2 > resultr.rvalue**2:
lind = lind+1
result = resultl
else:
rind = rind-1
result = resultr
rsq.append(result.rvalue**2)
slope.append(result.slope)
corrdim[i] = slope[-1]
allcorrdims[:,ind] = corrdim
allxavgs[:,ind] = xavg
allxstds[:,ind] = xstd
allrotnums[:,ind] = rotation_number
np.savez('case{}_mixing_lengths.npz'.format(case), allcorrdims=allcorrdims, allxavgs=allxavgs, allxstds=allxstds, allrotnums=allrotnums, allstdresids=allstdresids, allranresids=allranresids)
| StarcoderdataPython |
8157836 | <filename>app/objects/c_data_encoder.py
from abc import abstractmethod
import marshmallow as ma
from app.objects.interfaces.i_object import FirstClassObjectInterface
from app.utility.base_object import BaseObject
class DataEncoderSchema(ma.Schema):
name = ma.fields.String()
description = ma.fields.String()
module = ma.fields.String()
class DataEncoder(FirstClassObjectInterface, BaseObject):
schema = DataEncoderSchema()
display_schema = DataEncoderSchema(exclude=['module'])
@property
def unique(self):
return self.hash('%s' % self.name)
def __init__(self, name, description):
super().__init__()
self.name = name
self.description = description
def store(self, ram):
existing = self.retrieve(ram['data_encoders'], self.unique)
if not existing:
ram['data_encoders'].append(self)
return self.retrieve(ram['data_encoders'], self.unique)
return existing
@abstractmethod
def encode(self, data, **_):
pass
@abstractmethod
def decode(self, data, **_):
pass
| StarcoderdataPython |
6702289 | <gh_stars>1-10
# Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Function to lookup modules from package names.
PackageModule: Returns a package module given its name.
This module works around a circular import issue where we cannot import
benchmark_sets.py directly into virtual_machine.py. After SetUpPKB is called,
package_lookup.PackageModule is equivalent to benchmark_sets.PackageModule.
"""
from perfkitbenchmarker import errors
_global_package_module_function = None
def SetPackageModuleFunction(function):
"""Sets the function called by PackageModule; See benchmark_sets.py."""
global _global_package_module_function
_global_package_module_function = function
def PackageModule(package_name):
"""Finds the module for a benchmark by name.
Args:
package_name: The name of the package.
Returns:
The package's module, or None if the package is invalid.
"""
if not _global_package_module_function:
raise errors.Setup.InvalidSetupError(
'Cannot call package_lookup.py; Was SetUpPKB called?')
return _global_package_module_function(package_name)
| StarcoderdataPython |
6701123 | import abc
import tensorflow as tf
import numpy as np
from railrl.predictors.state_action_network import StateActionNetwork
from railrl.core.tf_util import he_uniform_initializer, xavier_uniform_initializer, mlp, conv_network, linear
class NNQFunction(StateActionNetwork, metaclass=abc.ABCMeta):
def __init__(
self,
name_or_scope,
**kwargs
):
self.setup_serialization(locals())
super().__init__(name_or_scope=name_or_scope, output_dim=1, **kwargs)
class FeedForwardCritic(NNQFunction):
def __init__(
self,
name_or_scope,
hidden_W_init=None,
hidden_b_init=None,
output_W_init=None,
output_b_init=None,
embedded_hidden_sizes=(100,),
observation_hidden_sizes=(100,),
hidden_nonlinearity=tf.nn.relu,
**kwargs
):
self.setup_serialization(locals())
self.hidden_W_init = hidden_W_init or he_uniform_initializer()
self.hidden_b_init = hidden_b_init or tf.constant_initializer(0.)
self.output_W_init = output_W_init or tf.random_uniform_initializer(
-3e-3, 3e-3)
self.output_b_init = output_b_init or tf.random_uniform_initializer(
-3e-3, 3e-3)
self.embedded_hidden_sizes = embedded_hidden_sizes
self.observation_hidden_sizes = observation_hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
super().__init__(name_or_scope=name_or_scope, **kwargs)
def _create_network_internal(self, observation_input, action_input):
observation_input = self._process_layer(observation_input,
scope_name="observation_input")
action_input = self._process_layer(action_input,
scope_name="action_input")
with tf.variable_scope("observation_mlp"):
observation_output = mlp(
observation_input,
self.observation_dim,
self.observation_hidden_sizes,
self.hidden_nonlinearity,
W_initializer=self.hidden_W_init,
b_initializer=self.hidden_b_init,
pre_nonlin_lambda=self._process_layer,
)
observation_output = self._process_layer(
observation_output,
scope_name="observation_output"
)
embedded = tf.concat(1, [observation_output, action_input])
embedded_dim = self.action_dim + self.observation_hidden_sizes[-1]
with tf.variable_scope("fusion_mlp"):
fused_output = mlp(
embedded,
embedded_dim,
self.embedded_hidden_sizes,
self.hidden_nonlinearity,
W_initializer=self.hidden_W_init,
b_initializer=self.hidden_b_init,
pre_nonlin_lambda=self._process_layer,
)
fused_output = self._process_layer(fused_output)
with tf.variable_scope("output_linear"):
return linear(
fused_output,
self.embedded_hidden_sizes[-1],
1,
W_initializer=self.output_W_init,
b_initializer=self.output_b_init,
)
class ConvNNCritic(NNQFunction):
def __init__(self,
name_or_scope,
input_shape,
conv_filters=(32, 32, 32, 32, 32),
conv_filter_sizes=((3,3),(3,3),(3,3),(3,3),(3,3)),
conv_strides=(2, 2, 2, 2, 2),
conv_pads=('SAME', 'SAME', 'SAME', 'SAME', 'SAME'),
observation_hidden_sizes=(256,),
embedded_hidden_sizes=(256,),
hidden_W_init=None,
hidden_b_init=None,
output_W_init=None,
output_b_init=None,
hidden_nonlinearity=tf.nn.relu,
**kwargs
):
self.setup_serialization(locals())
self.input_shape = input_shape
self.hidden_W_init = hidden_W_init or xavier_uniform_initializer()
self.hidden_b_init = hidden_b_init or tf.constant_initializer(0.)
self.output_W_init = output_W_init or tf.random_uniform_initializer(
-3e-3, 3e-3)
self.output_b_init = output_b_init or tf.random_uniform_initializer(
-3e-3, 3e-3)
self.conv_filters = conv_filters
self.conv_filter_sizes = conv_filter_sizes
self.conv_strides = conv_strides
self.conv_pads = conv_pads
self.embedded_hidden_sizes = embedded_hidden_sizes
self.observation_hidden_sizes = observation_hidden_sizes
self.hidden_nonlinearity = hidden_nonlinearity
super().__init__(name_or_scope=name_or_scope, observation_dim=input_shape, **kwargs)
def _create_network_internal(self, observation_input=None, action_input=None):
assert observation_input is not None and action_input is not None
observation_input = self._process_layer(observation_input,
scope_name="observation_input")
action_input = self._process_layer(action_input,
scope_name="action_input")
with tf.variable_scope("conv_network"):
observation_output, output_shape = conv_network(
observation_input,
self.input_shape,
self.conv_filters,
self.conv_filter_sizes,
self.conv_strides,
self.conv_pads,
self.observation_hidden_sizes,
self.hidden_nonlinearity,
W_initializer=xavier_uniform_initializer(),
b_initializer=tf.constant_initializer(0.),
pre_nonlin_lambda=self._process_layer,
)
output_dim = np.prod(output_shape[1:])
observation_output = tf.contrib.layers.flatten(observation_output, [-1, output_dim])
with tf.variable_scope("mlp"):
observation_output = mlp(
observation_output,
output_dim,
self.observation_hidden_sizes,
self.hidden_nonlinearity,
W_initializer=xavier_uniform_initializer(),
b_initializer=tf.constant_initializer(0.),
pre_nonlin_lambda=self._process_layer,
)
embedded = tf.concat(1, [observation_output, action_input])
embedded_dim = self.action_dim + self.observation_hidden_sizes[-1]
with tf.variable_scope("fusion_mlp"):
fused_output = mlp(
embedded,
embedded_dim,
self.embedded_hidden_sizes,
self.hidden_nonlinearity,
W_initializer=self.hidden_W_init,
b_initializer=self.hidden_b_init,
pre_nonlin_lambda=self._process_layer,
)
fused_output = self._process_layer(fused_output)
with tf.variable_scope("output"):
return linear(
observation_output,
self.embedded_hidden_sizes[-1],
1,
W_initializer=xavier_uniform_initializer(),
b_initializer=tf.constant_initializer(0.),
)
# observation_output = tf.reshape(observation_input, [-1, 56448])
# with tf.variable_scope("output"):
# return linear(
# observation_output,
# 56448,
# 1,
# W_initializer=xavier_uniform_initializer(),
# b_initializer=tf.constant_initializer(0.),
# )
| StarcoderdataPython |
1821487 | """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
t = int(input())
for _ in range(t):
s = input()
l_count = s.count('L')
r_count = s.count('R')
u_count = s.count('U')
d_count = s.count('D')
print(sum(divmod(abs(l_count - r_count), 2)) + sum(divmod(abs(u_count - d_count), 2)))
| StarcoderdataPython |
5176173 | <filename>matmodlab2/core/material.py
import numpy as np
from collections import OrderedDict
from copy import deepcopy as copy
from .misc import add_metaclass
class BaseMaterial(type):
def __call__(cls, *args, **kwargs):
"""Called before __init__ method is called"""
# Call the objects __init__ method (indirectly through __call__)
obj = type.__call__(cls, *args, **kwargs)
obj.aux_models = OrderedDict()
return obj
@add_metaclass(BaseMaterial)
class Material(object):
"""The material model base class
Notes
-----
The `Material` class is a base class and is meant to be inherited by
concrete implementations of material models. At minimum, the material model
must provide an `eval` method that is called by the model driver to update
the material state. See the documentation for `eval` for more details.
For material models that require state dependent variable tracking, the
`num_sdv` member must be set to the number of state dependent variables
required. Optionally, the `sdv_names` member can also be set to a list of
state dependent variable names (for output purposes). State dependent
variables are initialized to 0. The method `sdvini` can optionally be
defined that returns alternative values for state dependent variables. See
the documentation for `sdvini` for more information.
"""
name = None
num_sdv = None
sdv_names = None
assigned = False
def sdvini(self, statev):
"""Initialize the state dependent variables
Parameters
----------
statev : ndarray or None
If `self.num_sdv is None` than `statev` is also `None`, otherwise
it an array of zeros `self.num_sdv` in length
Returns
-------
statev : ndarray or None
The initialized state dependent variables.
Notes
-----
This base method does not need to be overwritten if a material does not
have any state dependent variables, or their initial values should be
zero.
"""
return statev
@property
def num_aux_sdv(self):
return sum([x.num_sdv for x in self.aux_models.values()])
def get_aux_model_sdv_slice(self, aux_model):
num_sdv = getattr(self, 'num_sdv', None)
start = 0 if num_sdv is None else num_sdv
for (name, model) in self.aux_models.items():
if model == aux_model:
break
start += model.num_sdv
else:
raise ValueError('No such aux model: {0!r}'.format(key))
end = start + aux_model.num_sdv
return slice(start, end)
def base_eval(self, kappa, time, dtime, temp, dtemp,
F0, F, strain, d, stress, ufield, dufield, statev,
initial_temp, **kwds):
"""Wrapper method to material.eval. This is called by Matmodlab so that
addon models can first be evaluated. See documentation for eval.
"""
from matmodlab2.materials.expansion import ExpansionModel
from matmodlab2.materials.viscoelastic import ViscoelasticModel
from matmodlab2.materials.effective_stress import EffectiveStressModel
num_sdv = getattr(self, 'num_sdv', None)
if ExpansionModel.name in self.aux_models:
# Evaluate thermal expansion
aux_model = self.aux_models[ExpansionModel.name]
# Determine starting point in statev array
x_slice = self.get_aux_model_sdv_slice(aux_model)
aux_model.eval(kappa, time, dtime, temp, dtemp,
F0, F, strain, d, stress, statev[x_slice],
initial_temp=initial_temp,
ufield=ufield, dufield=dufield, **kwds)
if EffectiveStressModel.name in self.aux_models:
# Evaluate effective stress model
aux_model = self.aux_models[EffectiveStressModel.name]
# Determine starting point in statev array
x_slice = self.get_aux_model_sdv_slice(aux_model)
aux_model.eval(kappa, time, dtime, temp, dtemp,
F0, F, strain, d, stress, statev[x_slice],
initial_temp=initial_temp,
ufield=ufield, dufield=dufield, **kwds)
# Evaluate the material model
xv = None if num_sdv is None else statev[:num_sdv]
sig, xv, ddsdde = self.eval(time, dtime, temp, dtemp, F0, F, strain, d,
stress, xv, ufield=ufield, dufield=dufield,
**kwds)
if xv is not None:
statev[:num_sdv] = xv
if ViscoelasticModel.name in self.aux_models:
# Evaluate the viscoelastic overstress model
aux_model = self.aux_models[ViscoelasticModel.name]
# Determine starting point in statev array
x_slice = self.get_aux_model_sdv_slice(aux_model)
cfac = aux_model.eval(kappa, time, dtime, temp, dtemp,
F0, F, strain, d, stress, statev[x_slice],
initial_temp=initial_temp,
ufield=ufield, dufield=dufield, **kwds)
# Force the use of a numerical stiffness - otherwise we would have
# to convert the stiffness to that corresponding to the Truesdell
# rate, pull it back to the reference frame, apply the visco
# correction, push it forward, and convert to Jaummann rate. It's
# not as trivial as it sounds...
ddsdde = None
if EffectiveStressModel.name in self.aux_models:
# Add pore pressure back
aux_model = self.aux_models[EffectiveStressModel.name]
# Determine starting point in statev array
x_slice = self.get_aux_model_sdv_slice(aux_model)
aux_model.posteval(kappa, time, dtime, temp, dtemp,
F0, F, strain, d, stress, statev[x_slice],
initial_temp=initial_temp,
ufield=ufield, dufield=dufield, **kwds)
return sig, statev, ddsdde
def eval(self, time, dtime, temp, dtemp,
F0, F, strain, d, stress, statev, **kwds):
"""Evaluate the material model
Parameters
----------
time : float
Time at beginning of step
dtime : float
Time step length. `time+dtime` is the time at the end of the step
temp : float
Temperature at beginning of step
dtemp : float
Temperature increment. `temp+dtemp` is the temperature at the end
of the step
F0, F : ndarray
Deformation gradient at the beginning and end of the step
strain : ndarray
Strain at the beginning of the step
d : ndarray
Symmetric part of the velocity gradient at the middle of the step
stress : ndarray
Stress at the beginning of the step
statev : ndarray
State variables at the beginning of the step
Returns
-------
stress : ndarray
Stress at the end of the step
statev : ndarray
State variables at the end of the step
ddsdde : ndarray
Elastic stiffness (Jacobian) of the material
Notes
-----
Each material model is responsible for returning the elastic stiffness.
If an analytic elastic stiffness is not known, return `None` and it
will be computed numerically.
The input arrays `stress` and `statev` are mutable and copies are not
passed in. DO NOT MODIFY THEM IN PLACE. Doing so can cause problems
down stream.
"""
raise NotImplementedError
def Expansion(self, alpha):
if self.assigned:
raise ValueError('Expansion model must be created before assigning '
'material model to MaterialPointSimulator')
from matmodlab2.materials.expansion import ExpansionModel
self.aux_models[ExpansionModel.name] = ExpansionModel(alpha)
def Viscoelastic(self, wlf, prony):
if self.assigned:
raise ValueError('Viscoelastic model must be created before assigning '
'material model to MaterialPointSimulator')
from matmodlab2.materials.viscoelastic import ViscoelasticModel
self.aux_models[ViscoelasticModel.name] = ViscoelasticModel(wlf, prony)
def EffectiveStress(self, porepres):
if self.assigned:
raise ValueError('EffectiveStress model must be created before assigning '
'material model to MaterialPointSimulator')
from matmodlab2.materials.effective_stress import EffectiveStressModel
self.aux_models[EffectiveStressModel.name] = EffectiveStressModel(porepres)
| StarcoderdataPython |
308658 | # Advent of Code 2020
# Day 02
# Author: irobin591
import os
import doctest
import re
with open(os.path.join(os.path.dirname(__file__), "input.txt"), 'r') as input_file:
input_data = input_file.read().strip().split('\n')
def prep_data(input_data):
# 0: min amount of char | no 1
# 1: max amount of char | no 2
# 2: char
# 3: string
re_input = re.compile(r'^([0-9]+)-([0-9]+) ([a-z]+): ([a-z]*)$')
return list(map(lambda x: re_input.match(x).groups(), input_data))
def part1(input_data):
"""
>>> part1(["1-3 a: abcde","1-3 b: cdefg","2-9 c: ccccccccc"])
2
"""
input_data = prep_data(input_data)
count = 0
import collections
for min_c, max_c, c, passwd in input_data:
x = collections.Counter(passwd)[c]
if x >= int(min_c) and x <= int(max_c):
count += 1
return count
def part2(input_data):
"""
>>> part2(["1-3 a: abcde","1-3 b: cdefg","2-9 c: ccccccccc"])
1
"""
input_data = prep_data(input_data)
count = 0
for index1, index2, c, passwd in input_data:
c1 = passwd[int(index1)-1]
c2 = passwd[int(index2)-1]
if sum([c1 == c, c2 == c]) == 1:
count += 1
return count
if __name__ == "__main__":
doctest.testmod()
print("Part One: {}".format(part1(input_data)))
print("Part Two: {}".format(part2(input_data)))
pass | StarcoderdataPython |
1755509 | #! /usr/bin/env python
import sys
sys.path.insert(0, sys.path[0]+'/source')
sys.path.insert(0, sys.path[0]+'/Objects')
sys.path.insert(0, sys.path[1]+'/Calculator')
sys.path.insert(0, sys.path[2]+'/Solver')
sys.path.insert(0, sys.path[3]+'/Models')
sys.path.insert(0, sys.path[4]+'/Models/AGN_Models')
sys.path.insert(0, sys.path[5]+'/Models/Surface_Brightness_Models')
sys.path.insert(0, sys.path[6]+'/Models/Event_Map_Models')
sys.path.insert(0, sys.path[7]+'/XTRA_pkg')
sys.path.insert(0, sys.path[8]+'/Utilities')
FLUX_MODE = False
if FLUX_MODE:
import matplotlib
matplotlib.use('Agg')
from XTRA_pkg import __logo__
from mainPipeline import makingXRayCatalogs,\
makingXRayRealization,\
makingEventMap
# print(__logo__)
print "XTRA (c) <NAME>"
if len(sys.argv) < 1:
print "(1) HALOS MODE"
print "(2) SB MAP MODE"
print "(3) EVENT MAP MODE"
try:
ans = int(sys.argv[1]) # int(raw_input("Please enter the mode number : "))
except IndexError:
print "Usage: >> [MODE NUMBER] [INPUT]"
raise SystemExit
if (ans == 1):
makingXRayCatalogs()
if (ans == 2):
makingXRayRealization()
if (ans == 3):
makingEventMap()
else:
pass
| StarcoderdataPython |
6469973 | <reponame>Allianaab2m/teammaker<filename>cogs/team.py
from discord import Message
import discord
from discord.ext.commands import Bot, Cog, Context, command
import random
class Team(Cog):
__slots__ = "bot"
def __init__(self, bot: Bot) -> None:
self.bot = bot
@command()
async def team(self, ctx: Context) -> None:
guild = ctx.guild
try:
member: list = [members for members in ctx.author.voice.channel.members]
# members_name = [members.name for members in ctx.author.voice.channel.members]
# member_per_teams: int = round(len(member)/2)
random.shuffle(member)
alpha_members = member[0::2]
beta_members = member[1::2]
alpha_members_name = [alpha_members.name for alpha_members in alpha_members]
beta_members_name = [beta_members.name for beta_members in beta_members]
await ctx.send(f"AlphaTeam:{alpha_members_name}")
await ctx.send(f"BetaTeam:{beta_members_name}")
for alpha_member in alpha_members:
alpha_role: discord.Role = discord.utils.get(guild.roles, name="alpha-team")
await alpha_member.add_roles(alpha_role)
for beta_member in beta_members:
beta_role: discord.Role = discord.utils.get(guild.roles, name="beta-team")
await beta_member.add_roles(beta_role)
except AttributeError as ae:
await ctx.send(f"エラー:VCに接続していない,もしくはsetupコマンドが正常に動作していないとかで振り分けが完了しませんでした。\n"
f"エラー内容:{ae}")
def setup(bot: Bot) -> None:
bot.add_cog(Team(bot))
| StarcoderdataPython |
8001768 | import cv2
from PIL import Image
import numpy as np
import os.path as osp
from utils import getInstanceColorImage, make_palette, color_seg
import argparse
def getDispSeman(ins_img_mask):
disp_img = cv2.imread("disparity/6_l.png")
disp_img = cv2.cvtColor(disp_img, cv2.COLOR_BGR2GRAY)
sub_disp_img = ins_img_mask * disp_img
cv2.imwrite("sub_disp_img.jpg", sub_disp_img)
def getSimpleMask(obj_num, instance_id):
instance_id[instance_id != obj_num] = 0
ins_img_mask = np.clip(instance_id, 0, 1)
return ins_img_mask
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--id', type=int)
args = parser.parse_args()
pid = args.id
sem_dir_name = "Semantic"
img_dir_name = "image"
pic_name = "{}_l.png".format(pid)
img_name = "{}_l.jpg".format(pid)
sem_path = osp.join(sem_dir_name, pic_name)
img_path = osp.join(img_dir_name, img_name)
sem_img = Image.open(sem_path, "r")
sem_img = np.array(sem_img)
semantic_id = (sem_img >> 8).astype(np.uint8)
instance_id = sem_img.astype(np.uint8)
instance_id += semantic_id
color_num = np.max(semantic_id) + np.max(instance_id)
palette = make_palette(color_num)
ins_img = color_seg(instance_id, palette)
cv2.imwrite("Semantic_color/{}_l.jpg".format(pid), ins_img)
obj_num = 34
instance_id[instance_id != obj_num] = 0
instance_id = Image.fromarray(instance_id)
instance_id.save("2_l_simple_mask.png")
img = cv2.imread(img_path)
ins_img_mask = getSimpleMask(34, np.array(instance_id))
sub_img_mask = cv2.merge([ins_img_mask, ins_img_mask, ins_img_mask])
sub_img = sub_img_mask * img
cv2.imwrite("{}_l.jpg".format(pid), sub_img)
| StarcoderdataPython |
5059481 | from django.contrib.sites.models import Site
from django.core.files.base import ContentFile
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import timezone
from django.utils.translation import gettext as _
from django_comments.models import Comment
import pytz
from qatrack.attachments.models import Attachment
from qatrack.qa.models import Frequency, TestListInstance
from qatrack.qa.tests import utils
from qatrack.reports import qc
from qatrack.units.models import Site as USite
class TestTestListInstanceSummaryReport(TestCase):
def test_filter_form_valid(self):
"""If queryset.count() > MAX_TLIS then filter_form should get an error added"""
rep = qc.TestListInstanceSummaryReport()
rep.MAX_TLIS = -1
ff = rep.get_filter_form()
resp = rep.filter_form_valid(ff)
assert resp is False
assert '__all__' in ff.errors and "Please reduce" in ff.errors['__all__'][0]
def test_get_queryset(self):
assert qc.TestListInstanceSummaryReport().get_queryset().model._meta.model_name == "testlistinstance"
def test_get_filename(self):
assert qc.TestListInstanceSummaryReport().get_filename('pdf') == 'test-list-instance-summary.pdf'
def test_get_utc_site(self):
site = USite.objects.create(name="site")
sites = qc.TestListInstanceSummaryReport().get_unit_test_collection__unit__site_details([site, 'null'])
assert sites == ('Site(s)', 'site, Other')
def test_get_utc_freq(self):
freq = Frequency.objects.create(name="freq", window_start=0, window_end=0)
freqs = qc.TestListInstanceSummaryReport().get_unit_test_collection__frequency_details([freq, 'null'])
assert freqs == ('Frequencies', 'freq, Ad Hoc')
@override_settings(TIME_ZONE="America/Toronto")
def test_get_work_completed_html(self):
rep = qc.TestListInstanceSummaryReport()
rep.report_format = "html"
tz = pytz.timezone("America/Toronto")
work_completed = tz.localize(timezone.datetime(2019, 1, 1, 12))
tli = utils.create_test_list_instance(work_completed=work_completed)
wc = rep.get_work_completed(tli)
assert "01 Jan 2019" in wc
assert "href" in wc
@override_settings(TIME_ZONE="America/Toronto")
def test_get_work_completed_plain(self):
rep = qc.TestListInstanceSummaryReport()
rep.report_format = "csv"
tz = pytz.timezone("America/Toronto")
work_completed = tz.localize(timezone.datetime(2019, 1, 1, 12))
tli = utils.create_test_list_instance(work_completed=work_completed)
wc = rep.get_work_completed(tli)
assert "01 Jan 2019" in wc
assert "href" not in wc
@override_settings(TIME_ZONE="America/Toronto")
def test_get_pass_fail_html(self):
rep = qc.TestListInstanceSummaryReport()
rep.report_format = "html"
tli = utils.create_test_list_instance()
pf = rep.get_pass_fail_status(tli)
assert "<span" in pf
@override_settings(TIME_ZONE="America/Toronto")
def test_get_pass_fail_plain(self):
rep = qc.TestListInstanceSummaryReport()
rep.report_format = "csv"
tli = utils.create_test_list_instance()
pf = rep.get_pass_fail_status(tli)
assert pf == '' # no test instances, just want to make sure no html tags in status for plain text report
def test_get_tlis_for_site(self):
site = USite.objects.create(name="site")
unit = utils.create_unit(site=site)
utc = utils.create_unit_test_collection(unit=unit)
tli = utils.create_test_list_instance(unit_test_collection=utc)
unit2 = utils.create_unit(site=None)
utc2 = utils.create_unit_test_collection(unit=unit2)
utils.create_test_list_instance(unit_test_collection=utc2)
qs = TestListInstance.objects.all()
tlis = qc.TestListInstanceSummaryReport().get_tlis_for_site(qs, site)
assert list([x.pk for x in tlis]) == [tli.pk]
def test_get_tlis_for_null_site(self):
site = USite.objects.create(name="site")
unit = utils.create_unit(site=site)
utc = utils.create_unit_test_collection(unit=unit)
utils.create_test_list_instance(unit_test_collection=utc)
unit2 = utils.create_unit(site=None)
utc2 = utils.create_unit_test_collection(unit=unit2)
tli2 = utils.create_test_list_instance(unit_test_collection=utc2)
qs = TestListInstance.objects.all()
tlis = qc.TestListInstanceSummaryReport().get_tlis_for_site(qs, None)
assert list([x.pk for x in tlis]) == [tli2.pk]
def test_to_table(self):
site = USite.objects.create(name="site")
unit = utils.create_unit(site=site)
utc = utils.create_unit_test_collection(unit=unit)
utils.create_test_list_instance(unit_test_collection=utc)
unit2 = utils.create_unit(site=None)
utc2 = utils.create_unit_test_collection(unit=unit2)
utils.create_test_list_instance(unit_test_collection=utc2)
rep = qc.TestListInstanceSummaryReport()
rep.report_format = "csv"
context = rep.get_context()
table = rep.to_table(context)
header_row = table.index([
'Site',
'Unit',
'Test list',
'Due Date',
'Work Completed',
'Pass/Fail Status',
'Link',
])
# should be two tlis after header
assert len(table[header_row + 1:]) == 2
class TestTestListInstanceDetailsReport(TestCase):
def test_filter_form_valid(self):
"""If queryset.count() > MAX_TLIS then filter_form should get an error added"""
rep = qc.TestListInstanceDetailsReport()
rep.MAX_TLIS = -1
ff = rep.get_filter_form()
resp = rep.filter_form_valid(ff)
assert resp is False
assert '__all__' in ff.errors and "Please reduce" in ff.errors['__all__'][0]
def test_get_queryset(self):
assert qc.TestListInstanceDetailsReport().get_queryset().model._meta.model_name == "testlistinstance"
def test_get_filename(self):
fname = qc.TestListInstanceDetailsReport().get_filename('pdf')
assert fname == 'test-list-instance-details.pdf'
def test_get_unit_test_collection_details(self):
utc = utils.create_unit_test_collection()
det = qc.TestListInstanceDetailsReport().get_unit_test_collection_details([utc.pk])
assert det == ('Unit / Test List', '%s - %s' % (utc.unit.name, utc.name))
def test_generate_html(self):
site = USite.objects.create(name="site")
unit = utils.create_unit(site=site)
utc = utils.create_unit_test_collection(unit=unit)
utils.create_test_list_instance(unit_test_collection=utc)
unit2 = utils.create_unit(site=None)
utc2 = utils.create_unit_test_collection(unit=unit2)
tli2 = utils.create_test_list_instance(unit_test_collection=utc2)
comment = Comment(
submit_date=timezone.now(),
user=tli2.created_by,
content_object=tli2,
comment='test comment',
site=Site.objects.latest("pk"),
)
comment.save()
rep = qc.TestListInstanceDetailsReport(report_opts={'unit_test_collection': [utc.pk]})
rep.report_format = "pdf"
rep.to_html()
def test_to_table(self):
site = USite.objects.create(name="site")
unit = utils.create_unit(site=site)
utc = utils.create_unit_test_collection(unit=unit)
tli = utils.create_test_list_instance(unit_test_collection=utc)
# make this tli autoreviewed
tli.all_reviewed = True
tli.reviewed_by = None
tli.save()
unit2 = utils.create_unit(site=None)
utc2 = utils.create_unit_test_collection(unit=unit2)
tli2 = utils.create_test_list_instance(unit_test_collection=utc2)
# give tli2 some history
tli3 = utils.create_test_list_instance(
unit_test_collection=utc2,
work_completed=tli2.work_completed - timezone.timedelta(days=2),
)
ti = utils.create_test_instance(test_list_instance=tli3)
# tli comment
comment = Comment(
submit_date=timezone.now(),
user=tli2.created_by,
content_object=tli2,
comment='test comment',
site=Site.objects.latest("pk"),
)
comment.save()
attachment = Attachment(
attachment=ContentFile("content", "content.pdf"),
created_by=tli.created_by,
testlistinstance=tli2,
)
attachment.save()
attachment = Attachment(
attachment=ContentFile("content", "content.pdf"),
created_by=tli.created_by,
testinstance=ti,
)
attachment.save()
rep = qc.TestListInstanceDetailsReport(report_opts={'unit_test_collection': [utc.pk, utc2.pk]})
rep.report_format = "csv"
context = rep.get_context()
table = rep.to_table(context)
ntlis = table.count([
_('Test'),
_('Value'),
_('Reference'),
_('Tolerance'),
_('Pass/Fail'),
_('Review Status'),
_('Comment'),
_('Attachments'),
])
# should be three tlis
assert ntlis == 3
class TestTestInstanceDetailsReport(TestCase):
def test_filter_form_valid(self):
"""If queryset.count() > MAX_TLIS then filter_form should get an error added"""
rep = qc.TestInstanceDetailsReport()
rep.MAX_TIS = -1
ff = rep.get_filter_form()
resp = rep.filter_form_valid(ff)
assert resp is False
assert '__all__' in ff.errors and "Please reduce" in ff.errors['__all__'][0]
def test_get_queryset(self):
assert qc.TestInstanceDetailsReport().get_queryset().model._meta.model_name == "testinstance"
def test_get_filename(self):
assert qc.TestInstanceDetailsReport().get_filename('pdf') == 'test-instance-details.pdf'
def test_get_unit_test_info__test_details(self):
test = utils.create_test()
tests = qc.TestInstanceDetailsReport().get_unit_test_info__test_details([test.pk])
assert tests == ('Test', test.name)
def test_get_organization_details(self):
org = qc.TestInstanceDetailsReport().get_organization_details('one_per_row')
assert org == ('Organization', 'One Test Instance Per Row')
def test_generate_html_group_by_unit_test_date(self):
site = USite.objects.create(name="site")
unit = utils.create_unit(site=site)
utc = utils.create_unit_test_collection(unit=unit)
tli = utils.create_test_list_instance(unit_test_collection=utc)
ti = utils.create_test_instance(test_list_instance=tli)
rep = qc.TestInstanceDetailsReport(
report_opts={
'unit_test_info__test': [ti.unit_test_info.test.pk],
'organization': 'group_by_unit_test_date'
}
)
rep.report_format = "pdf"
assert 'not supported for' in rep.to_html()
def test_generate_html_one_per_row(self):
site = USite.objects.create(name="site")
unit = utils.create_unit(site=site)
utc = utils.create_unit_test_collection(unit=unit)
tli = utils.create_test_list_instance(unit_test_collection=utc)
ti = utils.create_test_instance(test_list_instance=tli)
rep = qc.TestInstanceDetailsReport(
report_opts={
'unit_test_info__test': [ti.unit_test_info.test.pk],
'organization': 'one_per_row'
}
)
rep.report_format = "pdf"
rep.to_html()
def test_to_table_one_per_row_csv(self):
site = USite.objects.create(name="site")
unit = utils.create_unit(site=site)
utc = utils.create_unit_test_collection(unit=unit)
tli = utils.create_test_list_instance(unit_test_collection=utc)
ti = utils.create_test_instance(test_list_instance=tli)
utils.create_test_instance(
test_list_instance=tli,
unit_test_info=ti.unit_test_info,
work_completed=tli.work_completed - timezone.timedelta(days=1),
)
rep = qc.TestInstanceDetailsReport(
report_opts={
'unit_test_info__test': [ti.unit_test_info.test.pk],
'organization': 'one_per_row'
}
)
rep.report_format = "csv"
context = rep.get_context()
table = rep.to_table(context)
header_row = table.index([
_("Work Completed"),
_("Test"),
_("Unit"),
_("Site"),
_("Value"),
_("Reference"),
_("Tolerance"),
_("Skipped"),
_("Performed By"),
_("Comment"),
])
# should be two tis after header
assert len(table[header_row + 1:]) == 2
def test_to_table_group_by_unit_test_date_csv(self):
site = USite.objects.create(name="site")
unit = utils.create_unit(site=site)
utc = utils.create_unit_test_collection(unit=unit)
tli = utils.create_test_list_instance(unit_test_collection=utc)
ti = utils.create_test_instance(test_list_instance=tli)
utils.create_test_instance(
test_list_instance=tli,
unit_test_info=ti.unit_test_info,
work_completed=tli.work_completed - timezone.timedelta(days=1),
)
ti3 = utils.create_test_instance(
test_list_instance=tli,
work_completed=tli.work_completed - timezone.timedelta(days=1),
)
rep = qc.TestInstanceDetailsReport(
report_opts={
'unit_test_info__test': [ti.unit_test_info.test.pk, ti3.unit_test_info.test.pk],
'organization': 'group_by_unit_test_date'
}
)
rep.report_format = "csv"
context = rep.get_context()
table = rep.to_table(context)
org_row = table.index(['Organization:', 'Group by Unit/Test/Date'])
# should be two rows after blank row
assert len(table[org_row + 2:]) == 2
# and 11 columns
assert len(table[org_row + 3]) == 11
class TestDueDateReport(TestCase):
def test_get_queryset(self):
assert qc.NextDueDatesReport().get_queryset().model._meta.model_name == "unittestcollection"
def test_next_due_dates_get_filename(self):
assert qc.NextDueDatesReport().get_filename('pdf') == 'next-due-dates-for-qc.pdf'
def test_next_due_and_overdue_filename(self):
assert qc.DueAndOverdueQCReport().get_filename('pdf') == 'due-and-overdue-qc.pdf'
def test_get_unit__site_details(self):
site = USite.objects.create(name="site")
sites = qc.NextDueDatesReport().get_unit__site_details([site, 'null'])
assert sites == ('Site(s)', 'site, Other')
def test_get_unit_details(self):
site = USite.objects.create(name="site")
unit = utils.create_unit(site=site)
units = qc.NextDueDatesReport().get_unit_details([unit.pk])
assert units == ('Unit(s)', '%s - %s' % (unit.site.name, unit.name))
def test_generate_next_due_dates_html(self):
site = USite.objects.create(name="site")
unit = utils.create_unit(site=site)
utc = utils.create_unit_test_collection(unit=unit)
tli = utils.create_test_list_instance(unit_test_collection=utc)
utils.create_test_instance(test_list_instance=tli)
rep = qc.NextDueDatesReport()
rep.report_format = "pdf"
rep.to_html()
def test_generate_due_and_overdue_html(self):
site = USite.objects.create(name="site")
unit = utils.create_unit(site=site)
utc = utils.create_unit_test_collection(unit=unit)
tli = utils.create_test_list_instance(unit_test_collection=utc)
utils.create_test_instance(test_list_instance=tli)
rep = qc.DueAndOverdueQCReport()
rep.report_format = "pdf"
rep.to_html()
def test_to_table(self):
site = USite.objects.create(name="site")
unit = utils.create_unit(site=site)
utc = utils.create_unit_test_collection(unit=unit)
utils.create_test_list_instance(unit_test_collection=utc)
unit2 = utils.create_unit(site=None)
utc2 = utils.create_unit_test_collection(unit=unit2)
utils.create_test_list_instance(unit_test_collection=utc2)
rep = qc.NextDueDatesReport()
rep.report_format = "csv"
context = rep.get_context()
table = rep.to_table(context)
header_count = table.count([
_("Unit"), _("Name"),
_("Frequency"),
_("Due Date"),
_("Window"),
_("Assigned To"),
_("Perform")
])
assert header_count == 2
class TestAssignedQCReport(TestCase):
def test_get_queryset(self):
assert qc.AssignedQCReport().get_queryset().model._meta.model_name == "unittestcollection"
def test_get_filename(self):
assert qc.AssignedQCReport().get_filename('pdf') == 'qc-assignment-summary.pdf'
def test_get_unit__site_details(self):
site = USite.objects.create(name="site")
sites = qc.AssignedQCReport().get_unit__site_details([site, 'null'])
assert sites == ('Site(s)', 'site, Other')
def test_get_unit_details(self):
site = USite.objects.create(name="site")
unit = utils.create_unit(site=site)
units = qc.AssignedQCReport().get_unit_details([unit.pk])
assert units == ('Unit(s)', '%s - %s' % (unit.site.name, unit.name))
def test_generate_summary_html(self):
site = USite.objects.create(name="site")
unit = utils.create_unit(site=site)
utils.create_unit_test_collection(unit=unit)
rep = qc.AssignedQCReport()
rep.report_format = "pdf"
rep.to_html()
def test_to_table(self):
site = USite.objects.create(name="site")
unit = utils.create_unit(site=site)
utils.create_unit_test_collection(unit=unit)
tlm = utils.create_test_list_membership()
unit2 = utils.create_unit(site=None)
utils.create_unit_test_collection(unit=unit2, test_collection=tlm.test_list)
rep = qc.AssignedQCReport(report_opts={'active': True})
rep.report_format = "csv"
context = rep.get_context()
table = rep.to_table(context)
header_row = table.index([
_("Site"),
_("Unit"),
_("Test list (Cycle)"),
_("Frequency"),
_("Assigned To"),
_("Link"),
])
assert len(table[header_row + 1:]) == 2
class TestAssignedQCDetailsReport(TestCase):
def test_filter_form_valid(self):
"""If queryset.count() > MAX_TLIS then filter_form should get an error added"""
rep = qc.AssignedQCDetailsReport()
rep.MAX_UTCS = -1
ff = rep.get_filter_form()
resp = rep.filter_form_valid(ff)
assert resp is False
assert '__all__' in ff.errors and "Please reduce" in ff.errors['__all__'][0]
def test_get_queryset(self):
assert qc.AssignedQCDetailsReport().get_queryset().model._meta.model_name == "unittestcollection"
def test_get_filename(self):
assert qc.AssignedQCDetailsReport().get_filename('pdf') == 'qc-assignment-details.pdf'
def test_generate_summary_html(self):
site = USite.objects.create(name="site")
unit = utils.create_unit(site=site)
utils.create_unit_test_collection(unit=unit)
rep = qc.AssignedQCDetailsReport()
rep.report_format = "pdf"
rep.to_html()
def test_to_table(self):
site = USite.objects.create(name="site")
unit = utils.create_unit(site=site)
tlm = utils.create_test_list_membership()
utils.create_unit_test_collection(unit=unit, test_collection=tlm.test_list)
unit2 = utils.create_unit(site=None)
utils.create_unit_test_collection(unit=unit2, test_collection=tlm.test_list)
rep = qc.AssignedQCDetailsReport(report_opts={'active': True})
rep.report_format = "csv"
context = rep.get_context()
rep.to_table(context)
| StarcoderdataPython |
9761595 | <filename>src/beautifulstew.py
from bs4 import BeautifulSoup
def xml_2_soup(input_file):
try:
with open(input_file) as file:
data = file.read()
data_unescaped = html.unescape(data)
soup = BeautifulSoup(data_unescaped, "lxml")
return soup
except:
logging.error('xml_soup_maker failed to open %s', input_file)
pass
# makes soup out of input uri
def soup_maker(input_uri):
# Opens html file, reads markup, and outputs soup
with open(input_uri, 'r') as file:
data = file.read()
soup = BeautifulSoup(data, "lxml")
return soup
def get_all_css(soup, target_tag, output_option):
# Takes soup, and returns list of target tag items as either text or html markup
output_list = []
for tag in soup.find_all(class_=target_tag):
node_name = tag.get('id')
if output_option == True:
output_list.append(tag)
print(tag)
else:
output_list.append(tag.text)
print(tag.text)
print(output_list)
return output_list
def get_img_src(soup, include_internal):
# Finds img src links and adds to list, option to exclude internal links
output_list = []
for tag in soup.find_all('img'):
source = tag['src']
if include_internal == False:
if 'http' in source:
output_list.append(source)
else:
pass
else:
output_list.append(source)
print(output_list)
return output_list
| StarcoderdataPython |
9789629 | from .base import load_acute_inflammations
from .base import load_cervical_cancer
from .base import load_diabetes
from .base import load_diagnostic_breast_cancer
from .base import load_fertility
from .base import load_heart_disease
from .base import load_mammographic_masses
from .base import load_pima_indians_diabetes
from .base import load_prognostic_breast_cancer
from .base import load_thoracic_surgery
__all__ = [
'load_acute_inflammations',
'load_cervical_cancer',
'load_diabetes',
'load_diagnostic_breast_cancer',
'load_fertility',
'load_heart_disease',
'load_mammographic_masses',
'load_pima_indians_diabetes',
'load_prognostic_breast_cancer',
'load_thoracic_surgery'
]
| StarcoderdataPython |
1974608 | <filename>masters/master.chromium.lkgr/master_lkgr_cfg.py
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from buildbot.process.properties import WithProperties
from master import gitiles_poller
from master import master_config
from master import master_utils
from master.factory import remote_run_factory
import master_site_config
ActiveMaster = master_site_config.ChromiumLKGR
defaults = {}
helper = master_config.Helper(defaults)
B = helper.Builder
F = helper.Factory
S = helper.Scheduler
def m_remote_run(recipe, **kwargs):
return remote_run_factory.RemoteRunFactory(
active_master=ActiveMaster,
repository='https://chromium.googlesource.com/chromium/tools/build.git',
recipe=recipe,
factory_properties={'path_config': 'kitchen'},
**kwargs)
defaults['category'] = '1lkcr'
# Global scheduler
S(name='chromium_lkcr', branch='lkcr')
################################################################################
## Windows
################################################################################
# ASan/Win bot.
B('Win ASan Release', 'win_asan_rel', scheduler='chromium_lkcr')
F('win_asan_rel', m_remote_run('chromium'))
# ASan/Win coverage bot.
B('Win ASan Release Coverage', 'win_asan_rel_cov', scheduler='chromium_lkcr')
F('win_asan_rel_cov', m_remote_run('chromium'))
# ASan/Win media bot.
B('Win ASan Release Media', 'win_asan_rel_media', scheduler='chromium_lkcr')
F('win_asan_rel_media', m_remote_run('chromium'))
# Win SyzyASan bot.
B('Win SyzyASAN LKGR', 'win_syzyasan_lkgr', 'compile', 'chromium_lkcr')
F('win_syzyasan_lkgr', m_remote_run('chromium', timeout=7200))
################################################################################
## Mac
################################################################################
asan_mac_gyp = 'asan=1 v8_enable_verify_heap=1 enable_ipc_fuzzer=1 '
B('Mac ASAN Release', 'mac_asan_rel', 'compile', 'chromium_lkcr')
F('mac_asan_rel', m_remote_run('chromium'))
B('Mac ASAN Release Media', 'mac_asan_rel_media', 'compile', 'chromium_lkcr')
F('mac_asan_rel_media', m_remote_run('chromium'))
B('Mac ASAN Debug', 'mac_asan_dbg', 'compile', 'chromium_lkcr')
F('mac_asan_dbg', m_remote_run('chromium'))
################################################################################
## Linux
################################################################################
B('ASAN Release', 'linux_asan_rel', 'compile', 'chromium_lkcr')
F('linux_asan_rel', m_remote_run('chromium',
# We started seeing 29 minute links, bug 360158
timeout=2400))
B('ASAN Release Media', 'linux_asan_rel_media',
'compile', 'chromium_lkcr')
F('linux_asan_rel_media', m_remote_run('chromium',
# We started seeing 29 minute links, bug 360158
timeout=2400))
B('ASAN Debug', 'linux_asan_dbg', 'compile', 'chromium_lkcr')
F('linux_asan_dbg', m_remote_run('chromium'))
B('ChromiumOS ASAN Release', 'linux_chromiumos_asan_rel', 'compile',
'chromium_lkcr')
F('linux_chromiumos_asan_rel', m_remote_run('chromium',
# We started seeing 29 minute links, bug 360158
timeout=2400))
# The build process is described at
# https://sites.google.com/a/chromium.org/dev/developers/testing/addresssanitizer#TOC-Building-with-v8_target_arch-arm
B('ASan Debug (32-bit x86 with V8-ARM)',
'linux_asan_dbg_ia32_v8_arm',
'compile', 'chromium_lkcr')
F('linux_asan_dbg_ia32_v8_arm', m_remote_run('chromium'))
B('ASan Release (32-bit x86 with V8-ARM)',
'linux_asan_rel_ia32_v8_arm',
'compile', 'chromium_lkcr')
F('linux_asan_rel_ia32_v8_arm', m_remote_run('chromium'))
B('ASan Release Media (32-bit x86 with V8-ARM)',
'linux_asan_rel_media_ia32_v8_arm',
'compile', 'chromium_lkcr')
F('linux_asan_rel_media_ia32_v8_arm', m_remote_run('chromium'))
# TSan bots.
B('TSAN Release', 'linux_tsan_rel', 'compile', 'chromium_lkcr')
F('linux_tsan_rel', m_remote_run('chromium'))
B('TSAN Debug', 'linux_tsan_dbg', 'compile', 'chromium_lkcr')
F('linux_tsan_dbg', m_remote_run('chromium'))
# MSan bots.
B('MSAN Release (no origins)', 'linux_msan_rel_no_origins', 'compile',
'chromium_lkcr')
F('linux_msan_rel_no_origins', m_remote_run('chromium'))
B('MSAN Release (chained origins)', 'linux_msan_rel_chained_origins', 'compile',
'chromium_lkcr')
F('linux_msan_rel_chained_origins', m_remote_run('chromium'))
# UBSan bots.
B('UBSan Release', 'linux_ubsan_rel', 'compile', 'chromium_lkcr')
# UBSan builds very slowly with edge level coverage
F('linux_ubsan_rel', m_remote_run('chromium', timeout=5400))
B('UBSan vptr Release', 'linux_ubsan_vptr_rel', 'compile', 'chromium_lkcr')
F('linux_ubsan_vptr_rel', m_remote_run('chromium'))
def Update(_config, active_master, c):
lkcr_poller = gitiles_poller.GitilesPoller(
'https://chromium.googlesource.com/chromium/src',
branches=['lkcr'])
c['change_source'].append(lkcr_poller)
return helper.Update(c)
| StarcoderdataPython |
108007 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools, os
import kicost
import re
SHOW_LAST_HISTORY=3
try:
from setuptools import setup
from setuptools.command.develop import develop
from setuptools.command.install import install
except ImportError:
from distutils.core import setup
from distutils.core.command.develop import develop
from distutils.core.command.install import install
def post_install_setup():
# Run the KiCost integration script.
try:
import sys
if sys.platform.startswith("win32"):
# For Windows it is necessary one additional library (used to create the shortcut).
print('Installing additional library need for Windows setup...')
try:
if sys.version_info < (3,0):
from pip._internal import main as pipmain
else:
from pip import main as pipmain
pipmain(['install', 'pywin32'])
except:
print('Error to install Windows additional Python library. KiCost configuration related to Windows registry may not work.')
# Run setup: shortcut, BOM module to Eeschema and OS context menu.
try:
from .kicost.kicost_config import kicost_setup
kicost_setup()
except:
print('Running the external configuration command...')
from subprocess import call
call(['kicost', '--setup'])
except:
print('Error to run KiCost integration script.')
class PostDevelopCommand(develop):
"""Post-installation for development mode."""
def run(self):
post_install_setup
develop.run(self)
class PostInstallCommand(install):
"""Post-installation for installation mode."""
def run(self):
post_install_setup
install.run(self)
# Update the information files that stay in the computer.
with open('README.rst') as readme_file:
readme = readme_file.read()
with open(os.path.join('kicost','HISTORY.rst')) as history_file:
history = history_file.read()
try:
history_full = history.replace('.. :changelog:', '')
update_format = r'History\s\-+\s(.|\n|\r|\_)*?((.|\n|\r)*?\s{2,}){'+str(SHOW_LAST_HISTORY)+'}'
history_lastest = re.findall(update_format, history_full)[0][0]
if history_lastest:
if SHOW_LAST_HISTORY==1:
history_lastest = history_lastest.replace('History', 'Latest update')
else:
history_lastest = history_lastest.replace('History', 'Latest updates')
history = history_lastest + '\n\nAccess https://github.com/xesscorp/KiCost/blob/master/HISTORY.rst for full development history.'
else:
history = history_full
except:
history = history_full
pass
# KiCost Python packages requirements to run-time.
requirements = [
'beautifulsoup4 >= 4.3.2', # Deal with HTML and XML tags.
'lxml >= 3.7.2',
'XlsxWriter >= 0.7.3', # Write the XLSX output file.
'future', # For print statements.
'tqdm >= 4.30.0', # Progress bar.
'requests >= 2.18.4', # Scrape, API and web modules.
'CurrencyConverter >= 0.13', # Used to convert price to a not available currency in one distributor.
'babel >= 2.6', # For currency format by the language in the spreadsheet.
# 'wxPython >= 4.0', # Graphical package/library needed to user guide.
]
# KiCost Python packages requirements to debug and tests.
test_requirements = [
# Put package test requirements here.
]
# Extra files needed by KiCost.
data_files=[
#('kicost', ['kicost/kicost.ico']), # Icon to the user guide. Added via `MANIFEST.in`.
]
setup(
name='kicost',
version=kicost.__version__,
description="Build cost spreadsheet for a KiCad project.",
long_description=readme + '\n\n' + history,
# long_description_content_type="text/reStructuredText",
author=kicost.__author__,
author_email=kicost.__email__,
url='https://xesscorp.github.io/KiCost',
project_urls={
'Doc': 'https://xesscorp.github.io/KiCost',
'Git': 'https://github.com/xesscorp/KiCost',
},
packages=setuptools.find_packages(),
entry_points={'console_scripts':['kicost = kicost.__main__:main']},
package_dir={'kicost':'kicost'},
include_package_data=True,
package_data={'kicost': ['*.gif', '*.png']},
# data_files=data_files,
scripts=[],
install_requires=requirements,
license="MIT",
zip_safe=False,
keywords='KiCAD, BOM, electronics',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
test_suite='tests',
tests_require=test_requirements,
cmdclass={
'develop': PostDevelopCommand,
'install': PostInstallCommand,
}
)
| StarcoderdataPython |
5191986 | <filename>doac/conf.py
from django.conf import settings
import datetime
class Settings:
def __init__(self, options):
self.handlers = options.get("HANDLERS", None)
if not self.handlers:
self.handlers = (
"doac.handlers.bearer.BearerHandler",
)
self.realm = options.get("REALM", "oauth")
self.access_token = options.get("ACCESS_TOKEN", {})
self.setup_access_token()
self.auth_code = options.get("AUTHORIZATION_CODE", {})
self.setup_auth_code()
self.auth_token = options.get("AUTHORIZATION_TOKEN", {})
self.setup_auth_token()
self.client = options.get("CLIENT", {})
self.setup_client()
self.refresh_token = options.get("REFRESH_TOKEN", {})
self.setup_refresh_token()
def setup_access_token(self):
at = self.access_token
token = {}
token["expires"] = at.get("EXPIRES", datetime.timedelta(hours=2))
token["length"] = at.get("LENGTH", 100)
self.access_token = token
def setup_auth_code(self):
ac = self.auth_code
token = {}
token["expires"] = ac.get("EXPIRES", datetime.timedelta(minutes=15))
token["length"] = ac.get("LENGTH", 100)
self.auth_code = token
def setup_auth_token(self):
at = self.auth_token
token = {}
token["expires"] = at.get("EXPIRES", datetime.timedelta(minutes=15))
token["length"] = at.get("LENGTH", 100)
self.auth_token = token
def setup_client(self):
cli = self.client
client = {}
client["length"] = cli.get("LENGTH", 50)
self.client = client
def setup_refresh_token(self):
rt = self.refresh_token
token = {}
token["expires"] = rt.get("EXPIRES", datetime.timedelta(days=60))
token["length"] = rt.get("LENGTH", 100)
self.refresh_token = token
options_dict = getattr(settings, "OAUTH_CONFIG", {})
options = Settings(options_dict)
| StarcoderdataPython |
1704894 | <filename>tgsettings.py
tg_key='telegram-bot-key'
tmp_dir = '/tmp'
tmp_uids = tmp_dir + "/uids.txt"
log_file = "/var/log/tgbot/tgbot.log"
debug = True
markdown = True
html = False | StarcoderdataPython |
1964275 | from __future__ import absolute_import
#!/usr/bin/env python
import numpy as np
from scipy.stats import norm
import scipy.linalg as spl
from ..utils import (multiple_mahalanobis, z_score, multiple_fast_inv,
check_cast_bin8)
from nose.tools import assert_true, assert_equal, assert_raises
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
assert_array_equal)
def test_z_score():
p = np.random.rand(10)
z = z_score(p)
assert_array_almost_equal(norm.sf(z), p)
def test_mahalanobis():
x = np.random.rand(100) / 100
A = np.random.rand(100, 100) / 100
A = np.dot(A.transpose(), A) + np.eye(100)
mah = np.dot(x, np.dot(np.linalg.inv(A), x))
assert_almost_equal(mah, multiple_mahalanobis(x, A), decimal=1)
def test_mahalanobis2():
x = np.random.randn(100, 3)
Aa = np.zeros([100, 100, 3])
for i in range(3):
A = np.random.randn(120, 100)
A = np.dot(A.T, A)
Aa[:, :, i] = A
i = np.random.randint(3)
mah = np.dot(x[:, i], np.dot(np.linalg.inv(Aa[:, :, i]), x[:, i]))
f_mah = (multiple_mahalanobis(x, Aa))[i]
assert_almost_equal(mah, f_mah)
def test_multiple_fast_inv():
shape = (10, 20, 20)
X = np.random.randn(*shape)
X_inv_ref = np.zeros(shape)
for i in range(shape[0]):
X[i] = np.dot(X[i], X[i].T)
X_inv_ref[i] = spl.inv(X[i])
X_inv = multiple_fast_inv(X)
assert_array_almost_equal(X_inv_ref, X_inv)
def assert_equal_bin8(actual, expected):
res = check_cast_bin8(actual)
assert_equal(res.shape, actual.shape)
assert_true(res.dtype.type == np.uint8)
assert_array_equal(res, expected)
def test_check_cast_bin8():
# Function to return np.uint8 array with check whether array is binary.
for in_dtype in np.sctypes['int'] + np.sctypes['uint']:
assert_equal_bin8(np.array([0, 1, 1, 1], in_dtype), [0, 1, 1, 1])
assert_equal_bin8(np.array([[0, 1], [1, 1]], in_dtype),
[[0, 1], [1, 1]])
assert_raises(ValueError, check_cast_bin8,
np.array([0, 1, 2], dtype=in_dtype))
for in_dtype in np.sctypes['float']:
assert_equal_bin8(np.array([0, 1, 1, -0], np.float), [0, 1, 1, 0])
assert_equal_bin8(np.array([[0, 1], [1, -0]], np.float),
[[0, 1], [1, 0]])
assert_raises(ValueError, check_cast_bin8,
np.array([0, 0.1, 1], dtype=in_dtype))
assert_raises(ValueError, check_cast_bin8,
np.array([0, -1, 1], dtype=in_dtype))
| StarcoderdataPython |
1684532 | import tensorflow as tf
import numpy as np
import pyomo.environ as pyo
from relumip import AnnModel
from relumip.utils.visualization import plot_results_2d
# Load the trained tensorflow model which will be embedded into the optimization problem.
tf_model = tf.keras.models.load_model('data/peaks_3x10.h5')
# Create a pyomo model into which the ANN will be embedded.
model = pyo.ConcreteModel()
model.construct()
# All network variables will be added to a user-defined block within the model.
model.ann = pyo.Block()
# The network input and output variables have to be defined by the user.
# For the network input, finite variable bounds have to be supplied (they can be inferred from the data used to train
# the model, for example).
model.ann.Input1 = pyo.Var(within=pyo.Reals, bounds=(-3, 3))
model.ann.Input2 = pyo.Var(within=pyo.Reals, bounds=(-3, 3))
model.ann.Output = pyo.Var(bounds=(-10000, 10000), within=pyo.Reals)
# Input and output variables are stored in lists to be passes to the AnnModel.
input_vars = [model.ann.Input1, model.ann.Input2]
output_vars = [model.ann.Output]
# A solver instance has to be defined for bound tightening. Make sure that an appropriate MIP solver is installed.
solver = pyo.SolverFactory('glpk')
# Now the AnnModel instance can be created.
ann_model = AnnModel(tf_model=tf_model, modeling_language='PYOMO')
# Input and output variables are connected to the network.
# The block dedicated for the ANN model has to be passed as well.
ann_model.connect_network_input(opt_model=model.ann, input_vars=input_vars)
ann_model.connect_network_output(opt_model=model.ann, output_vars=output_vars)
# This call generates the network formulation inside the block.
# The bound tightening strategy has to be specified, for Pyomo the options are 'MIP' or 'LP' (default).
ann_model.embed_network_formulation(bound_tightening_strategy='LP', solver=solver)
# In this example, no additional model components besides the ANN are considered.
# We choose to minimize the network output and display the solved model.
model.obj = pyo.Objective(expr=model.ann.Output, sense=pyo.minimize)
res = solver.solve(model)
model.display()
# To visualize the computed results, a test data set is generated within the ANN input domain and the tensorflow model
# is evaluated on it. The solution point computed above is extracted and shown on the response surface plot.
sample_input = 6 * np.random.rand(10000, 2) - 3
sample_output = tf_model.predict(sample_input)
sol_point = [input_vars[0].value, input_vars[1].value, output_vars[0].value]
plot_results_2d(sample_input, sample_output, sol_point=sol_point)
# The model parameters computed during bound tightening can be saved for future use of the same model. See the
# 'load_precomputed_parameters_example.py' file on more information on how to load precomputed parameters
ann_model.save_param('data/peaks3x10_param')
| StarcoderdataPython |
4899576 | import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
import numpy as np
X = np.array([[1, 2], [1.5, 1.8], [5, 8], [8, 8], [1, 0.6], [9, 11]])
plt.scatter(X[:, 0], X[:, 1], s=150)
plt.show()
colors = 10 * ["g", "r", "c", "b", "k"]
class K_Means:
def __init__(self, k=2, tol=0.001, max_iter=300):
self.k = k
self.tol = tol
self.max_iter = max_iter
def fit(self, data):
self.centroids = {}
for i in range(self.k):
self.centroids[i] = data[i]
for i in range(self.max_iter):
self.classifications = {}
for i in range(self.k):
self.classifications[i] = []
for featureset in data:
distances = [np.linalg.norm(featureset-self.centroids[centroid]) for centroid in self.centroids]
classification = distances.index(min(distances))
self.classifications[classification].append(featureset)
prev_centroids = dict(self.centroids)
for classification in self.classifications:
self.centroids[classification] = np.average(self.classifications[classification], axis=0)
| StarcoderdataPython |
6686462 | __all__ = ['AsyncPythonInterpreter']
try:
import fcntl
except:
fcntl = None
import os
import sys
import socket
from StringIO import StringIO
from netrepr import NetRepr, RemoteObjectPool, RemoteObjectReference
import objc
from Foundation import *
IMPORT_MODULES = ['netrepr', 'remote_console', 'remote_pipe', 'remote_bootstrap']
source = StringIO()
for fn in IMPORT_MODULES:
for line in file(fn+'.py', 'rU'):
source.write(line)
source.write('\n\n')
SOURCE = repr(source.getvalue()) + '\n'
def bind_and_listen(hostport):
if isinstance(hostport, str):
host, port = hostport.split(':')
hostport = (host, int(port))
serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# set close-on-exec
if hasattr(fcntl, 'FD_CLOEXEC'):
old = fcntl.fcntl(serversock.fileno(), fcntl.F_GETFD)
fcntl.fcntl(serversock.fileno(), fcntl.F_SETFD, old | fcntl.FD_CLOEXEC)
# allow the address to be re-used in a reasonable amount of time
if os.name == 'posix' and sys.platform != 'cygwin':
serversock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversock.bind(hostport)
serversock.listen(5)
return serversock
class AsyncPythonInterpreter(NSObject):
commandReactor = objc.IBOutlet('commandReactor')
def init(self):
self = super(AsyncPythonInterpreter, self).init()
self.host = None
self.port = None
self.interpreterPath = None
self.scriptPath = None
self.commandReactor = None
self.serverSocket = None
self.serverFileHandle = None
self.buffer = ''
self.serverFileHandle = None
self.remoteFileHandle = None
self.childTask = None
return self
def initWithHost_port_interpreterPath_scriptPath_commandReactor_(self, host, port, interpreterPath, scriptPath, commandReactor):
self = self.init()
self.host = host
self.port = port
self.interpreterPath = interpreterPath
self.scriptPath = scriptPath
self.commandReactor = commandReactor
self.serverSocket = None
return self
def awakeFromNib(self):
defaults = NSUserDefaults.standardUserDefaults()
def default(k, v, typeCheck=None):
rval = defaults.objectForKey_(k)
if typeCheck is not None and rval is not None:
try:
rval = typeCheck(rval)
except TypeError:
NSLog(u'%s failed type check %s with value %s', k, typeCheck.__name__, rval)
rval = None
if rval is None:
defaults.setObject_forKey_(v, k)
rval = v
return rval
self.host = default(u'AsyncPythonInterpreterInterpreterHost', u'127.0.0.1', str)
self.port = default(u'AsyncPythonInterpreterInterpreterPort', 0, int)
self.interpreterPath = default(u'AsyncPythonInterpreterInterpreterPath', u'/usr/bin/python', unicode)
self.scriptPath = type(self).bundleForClass().pathForResource_ofType_(u'tcpinterpreter', u'py')
def connect(self):
#NSLog(u'connect')
self.serverSocket = bind_and_listen((self.host, self.port))
self.serverFileHandle = NSFileHandle.alloc().initWithFileDescriptor_(self.serverSocket.fileno())
nc = NSNotificationCenter.defaultCenter()
nc.addObserver_selector_name_object_(
self,
'remoteSocketAccepted:',
NSFileHandleConnectionAcceptedNotification,
self.serverFileHandle)
self.serverFileHandle.acceptConnectionInBackgroundAndNotify()
self.remoteFileHandle = None
for k in os.environ.keys():
if k.startswith('PYTHON'):
del os.environ[k]
self.childTask = NSTask.launchedTaskWithLaunchPath_arguments_(self.interpreterPath, [self.scriptPath, repr(self.serverSocket.getsockname())])
nc.addObserver_selector_name_object_(
self,
'childTaskTerminated:',
NSTaskDidTerminateNotification,
self.childTask)
return self
def remoteSocketAccepted_(self, notification):
#NSLog(u'remoteSocketAccepted_')
self.serverFileHandle.closeFile()
self.serverFileHandle = None
ui = notification.userInfo()
self.remoteFileHandle = ui.objectForKey_(NSFileHandleNotificationFileHandleItem)
nc = NSNotificationCenter.defaultCenter()
nc.addObserver_selector_name_object_(
self,
'remoteFileHandleReadCompleted:',
NSFileHandleReadCompletionNotification,
self.remoteFileHandle)
self.writeBytes_(SOURCE)
self.remoteFileHandle.readInBackgroundAndNotify()
self.commandReactor.connectionEstablished_(self)
NSNotificationCenter.defaultCenter().postNotificationName_object_(u'AsyncPythonInterpreterOpened', self)
def remoteFileHandleReadCompleted_(self, notification):
#NSLog(u'remoteFileHandleReadCompleted_')
ui = notification.userInfo()
newData = ui.objectForKey_(NSFileHandleNotificationDataItem)
if newData is None:
self.close()
NSLog(u'Error: %@', ui.objectForKey_(NSFileHandleError))
return
bytes = newData.bytes()[:]
if len(bytes) == 0:
self.close()
return
self.remoteFileHandle.readInBackgroundAndNotify()
start = len(self.buffer)
buff = self.buffer + newData.bytes()[:]
#NSLog(u'current buffer: %s', buff)
lines = []
while True:
linebreak = buff.find('\n', start) + 1
if linebreak == 0:
break
lines.append(buff[:linebreak])
buff = buff[linebreak:]
start = 0
#NSLog(u'lines: %s', lines)
self.buffer = buff
for line in lines:
self.commandReactor.lineReceived_fromConnection_(line, self)
def writeBytes_(self, bytes):
#NSLog(u'Writing bytes: %s' bytes)
try:
self.remoteFileHandle.writeData_(NSData.dataWithBytes_length_(bytes, len(bytes)))
except objc.error:
self.close()
#NSLog(u'bytes written.')
def childTaskTerminated_(self, notification):
#NSLog(u'childTaskTerminated_')
self.close()
def closeServerFileHandle(self):
#NSLog(u'closeServerFileHandle')
if self.serverFileHandle is not None:
try:
self.serverFileHandle.closeFile()
except objc.error:
pass
self.serverFileHandle = None
def closeRemoteFileHandle(self):
#NSLog(u'closeRemoteFileHandle')
if self.remoteFileHandle is not None:
try:
self.remoteFileHandle.closeFile()
except objc.error:
pass
self.remoteFileHandle = None
def terminateChildTask(self):
#NSLog(u'terminateChildTask')
if self.childTask is not None:
try:
self.childTask.terminate()
except objc.error:
pass
self.childTask = None
def close(self):
#NSLog(u'close')
NSNotificationCenter.defaultCenter().removeObserver_(self)
self.finalClose()
NSNotificationCenter.defaultCenter().postNotificationName_object_(u'AsyncPythonInterpreterClosed', self)
def finalClose(self):
if self.commandReactor is not None:
self.commandReactor.connectionClosed_(self)
self.commandReactor = None
self.closeServerFileHandle()
self.closeRemoteFileHandle()
self.terminateChildTask()
def test_console():
from PyObjCTools import AppHelper
from ConsoleReactor import ConsoleReactor
host = '127.0.0.1'
port = 0
interpreterPath = sys.executable
scriptPath = unicode(os.path.abspath('tcpinterpreter.py'))
commandReactor = ConsoleReactor.alloc().init()
interp = AsyncPythonInterpreter.alloc().initWithHost_port_interpreterPath_scriptPath_commandReactor_(host, port, interpreterPath, scriptPath, commandReactor)
interp.connect()
class ThisEventLoopStopper(NSObject):
def interpFinished_(self, notification):
AppHelper.stopEventLoop()
stopper = ThisEventLoopStopper.alloc().init()
NSNotificationCenter.defaultCenter().addObserver_selector_name_object_(stopper, 'interpFinished:', u'AsyncPythonInterpreterClosed', interp)
AppHelper.runConsoleEventLoop(installInterrupt=True)
def main():
test_console()
if __name__ == '__main__':
main()
| StarcoderdataPython |
5186860 | <filename>deepnodal/python/functions/hypervisor.py
"""
Hypervisor module for Tensorflow.
"""
# <NAME>
#-------------------------------------------------------------------------------
from deepnodal.python.functions.supervisor import *
from deepnodal.python.concepts.master import *
#-------------------------------------------------------------------------------
class hypervisor (supervisor, master, stem):
"""
A hypervisor is a class that distributes computations for supervised
learning across multiple devices.
It is three things in one class:
A supervisor: it updates parameters according to gradient-based optimisation.
A master: it enslaves supervisors to evaluate the gradients on data subbatches.
A stem: it is effectively the final `trunk' that clones networks as required.
For syntactic purposes, it is called exactly in the same way as the supervisor
except at it's instantiation:
sup = supervisor() and sup = hypervisor() are identical
sup = supervisor('name', 'dev') and sup = hypervisor('name', 'dev') are identical.
sup = hypervisor('name', devs = 2) or sup = hypervisor(devs = 2) sets up a
hypervisor instance that distributes supervised learning across 2 GPU devices.
"""
def_name = 'hypervisor'
unit_dev = None # Boolean flag to denote single device (i.e. supervisor)
devs = None # GPU devices
n_devs = None # len(devs)
clones = None # Network clones
slaves = None # Supervisor slaves
param_ops = None # Parameter operations for the master to update slaves.
moments = None # Moment mappings
n_moments = None # len(Moment)
#-------------------------------------------------------------------------------
def __init__(self, name = None, dev = None, devs = None):
"""
To instantiate for using 2 GPUs, invoke:
sup = hypervisor(devs = 2) or sup = hypervisor('name', devs = 2)
"""
supervisor.__init__(self)
master.__init__(self)
stem.__init__(self)
self.set_name(name)
self.set_dev(dev, devs)
#-------------------------------------------------------------------------------
def set_name(self, name = None):
self.name = name
# Set names of supervisors (through master)
master.set_name(self, self.name)
# Set names of schedules (through overseer)
supervisor.set_name(self, self.name)
# Set names of clones (through stem)
stem.set_name(self, self.name)
#-------------------------------------------------------------------------------
def set_dev(self, dev = None, devs = None):
"""
If devs in an integer, it represents the number of GPUs to use.
"""
# Here dev is either None or a number
self.dev = dev
self.devs = devs
if type(self.devs) is int:
self.n_devs = self.devs
self.devs = [Device('gpu', i) for i in range(self.n_devs)]
if self.devs is not None:
if self.dev is None:
self.dev = Device('cpu', 0)
self.n_devs = 0
self.unit_dev = self.devs is None
if not(self.unit_dev):
self.n_devs = len(self.devs)
return self.unit_dev
#-------------------------------------------------------------------------------
def set_work(self, work = None):
# If not(self.unit_dev), the slaves and clones are instantiated here
argout = supervisor.set_work(self, work)
if self.unit_dev: return argout
# Clone networks
self.clones = [self.work.clone() for i in range(self.n_devs)]
# Remove weight initialisers from clones (GPUs are less flexible)
for clone in self.clones:
for subnet in clone.subnets:
subnet.set_weights(None)
# Declare clones as subobjects to stem
self.set_subobjects(self.clones)
# Enslave supervisors
self.slaves = [supervisor() for i in range(self.n_devs)]
# Declare slaves as subworkers to master
self.set_subworkers(self.slaves)
# Rename and redevice clones and slaves
for i in range(self.n_devs):
self.clones[i].set_name(self.work.name + "/clone_" + str(i))
self.clones[i].set_dev(self.devs[i])
self.slaves[i].set_name(self.name + "/slave_" + str(i))
self.slaves[i].set_dev(self.devs[i])
self.slaves[i].set_work(self.clones[i])
#-------------------------------------------------------------------------------
def set_errorq(self, erq = None, *erq_args, **erq_kwds):
argout = supervisor.set_errorq(self, erq, *erq_args, **erq_kwds)
if self.unit_dev: return argout
return [_slave.set_errorq(erq, *erq_args, **erq_kwds) for _slave in self.slaves]
#-------------------------------------------------------------------------------
def set_costfn(self, cfn = None, *cfn_args, **cfn_kwds):
argout = supervisor.set_costfn(self, cfn, *cfn_args, **cfn_kwds)
if self.unit_dev: return argout
return [_slave.set_costfn(cfn, *cfn_args, **cfn_kwds) for _slave in self.slaves]
#-------------------------------------------------------------------------------
def __call__(self, ist = None, gst = None, skip_metrics = False, _called = True, **kwds):
if self.work is None: return
if self.unit_dev:
argout = supervisor.__call__(self, ist, gst, skip_metrics, _called, **kwds)
return argout
"""
At this point, there is only one correct order to proceed.
1. Setup the original network - there is no reason for it to be changed even if some
class member properties are redundant at this stage (e.g. network.outputs)
2. While setting up the hypervisor as an overseer and trainer, call overloaded
__call__ functions to include specification parameters (including identity
operations for split inputs) for slave specifications commanded by the hypervisor
(e.g. inputs, optimiser, schedules), and while setting up as a supervisor, the same
for the labels.
3. Re-assign the original network outputs with concatenated outputs from those slaves
4. Before setting up hypersupervisor performance metrics (e.g. cost, loss, gradients),
complete the setup of all slaves and associated clones.
5. Setup hypervisor performance metrics and gradients as averages from slaves.
6. In addition to the apply operations setup parameter update operations for clones.
"""
# 1. and 2.
supervisor.__call__(self, ist, gst, True, _called, **kwds)
# 3. and 4.
[_slave.__call__(self.ist, self.gst, skip_metrics, **kwds) for _slave in self.slaves]
self._call_outputs(True)
# 5. and 6.
self._call_metrics(skip_metrics)
self.set_called(_called)
return self.ist, self.gst
#-------------------------------------------------------------------------------
def _call_is_training(self, ist = None): # overloading trainer._call_is_training(self, ist)
argout = supervisor._call_is_training(self, ist)
if self.unit_dev: return argout
return [_slave._call_is_training(self.ist) for _slave in self.slaves]
#-------------------------------------------------------------------------------
def _call_inputs(self): # overloading trainer._call_inputs()
argout = supervisor._call_inputs(self)
if self.unit_dev: return argout
# Setup inputs_to_clones through diverging by splitting input batch
self.inputs_to_clones = [None] * len(self.inputs)
for i, _input in enumerate(self.inputs):
if self.dev is None:
self.inputs_to_clones[i] = Creation('diverge')(_input, self.n_devs, axis = 0,
name = self.name + '/batch/inputs_to_clones')
else:
with Device(self.dev):
self.inputs_to_clones[i] = Creation('diverge')(_input, self.n_devs, axis = 0,
name = self.name + '/batch/inputs_to_clones')
# Re-index by clone
self.inputs_by_clones = [None] * self.n_devs
for i in range(self.n_devs):
self.inputs_by_clones[i] = [None] * len(self.inputs)
for j in range(len(self.inputs)):
self.inputs_by_clones[i][j] = self.inputs_to_clones[j][i]
# Set identity object specifications to the clones
for _inputs, _clone in zip(self.inputs_by_clones, self.clones):
_clone.set_inputs(_inputs)
# Now the clones will take care of the rest at the network._call_inputs() stage
return argout
#-------------------------------------------------------------------------------
def _call_outputs(self, reassign = False): # overloading trainer._call_outputs()
argout = supervisor._call_outputs(self)
if self.unit_dev: return argout
if not(reassign): return argout
slave_outputs = [_slave.outputs for _slave in self.slaves]
slave_outputs_by_slave = [None] * self.n_outputs
for i in range(self.n_outputs):
slave_outputs_by_slave[i] = [None] * self.n_devs
for j in range(self.n_devs):
slave_outputs_by_slave[i][j] = slave_outputs[j][i]
for i in range(self.n_outputs):
if self.dev is None:
self.outputs[i] = Creation('con')(slave_outputs_by_slave[i], axis=0,
name = self.name + "/" + self.work.name + "/update_ops/" + self.output_names[i])
else:
with Device(self.dev):
self.outputs[i] = Creation('con')(slave_outputs_by_slave[i], axis=0,
name = self.name + "/" + self.work.name + "/update_ops/" + self.output_names[i])
return self.outputs
#-------------------------------------------------------------------------------
def _call_learning_rate(self, gst = None): # overloading trainer._call_learning_rate
argout = supervisor._call_learning_rate(self, gst)
if self.unit_dev: return argout
for _slave in self.slaves:
_slave.set_learning_rate(self._lrn, *self._lrn_args, **self._lrn_kwds)
return argout
#-------------------------------------------------------------------------------
def _call_optimiser(self): # overloading trainer._call_optimiser()
argout = supervisor._call_optimiser(self)
if self.unit_dev: return argout
for _slave in self.slaves:
_slave.set_optimiser(self._opt, *self._opt_args, **self._opt_kwds)
return argout
#-------------------------------------------------------------------------------
def set_session(self, session = None): # overloading trainer.set_session(session)
argout = supervisor.set_session(self, session)
if self.unit_dev: return argout
for _slave in self.slaves:
_slave.set_session(self.session)
return argout
#-------------------------------------------------------------------------------
def _setup_schedules(self, gst = None): # overloading overseer._setup_schedules(gst)
argout = supervisor._setup_schedules(self, gst)
if self.unit_dev: return argout
for _slave in self.slaves:
_slave.set_schedules(self.schedules)
#-------------------------------------------------------------------------------
def use_schedule(self, using_schedule = -1): # overloading overseer.use_schedule(using_schedule)
argout = supervisor.use_schedule(self, using_schedule)
if self.unit_dev: return argout
for _slave in self.slaves:
_slave.use_schedule(self.using_schedule)
return argout
#-------------------------------------------------------------------------------
def _call_labels(self): # overloading supervisor._call_labels()
argout = supervisor._call_labels(self)
if self.unit_dev: return argout
# Setup labels_to_slaves through diverging by splitting label batch
if self.dev is None:
self.labels_to_slaves = Creation('diverge')(self.labels, self.n_devs, axis = 0,
name = self.name + '/batch/labels_to_slaves')
else:
with Device(self.dev):
self.labels_to_slaves = Creation('diverge')(self.labels, self.n_devs, axis = 0,
name = self.name + '/batch/labels_to_slaves')
# Set identity object specifications to the slaves
for _labels, _slave in zip(list(self.labels_to_slaves), self.slaves):
_slave.set_labels(_labels)
# Now the slaves will take care of the rest at the supervisor._setup_labels() stage
return argout
#-------------------------------------------------------------------------------
def _call_errors(self): # overloading supervisor._call_errors()
if self.unit_dev:
return supervisor._call_errors(self)
# If not(self.unit_dev), hypervisor doesn't care about its own hat values
# because it never evaluates them.
slave_errors = [_slave.errors for _slave in self.slaves]
if type(slave_errors) is not list:
if self.dev is None:
self.errors = Creation('mean')(Creation('pack')(slave_errors,
name = self.name + "/metrics/error_quotients"),
name = self.name + "/metrics/error_quotient")
else:
with Device(self.dev):
self.errors = Creation('mean')(Creation('pack')(slave_errors,
name = self.name + "/metrics/error_quotients"),
name = self.name + "/metrics/error_quotient")
K = ['']
else:
slave_errors_by_slave = [None] * len(self.erq_args[0])
for i in range(len(self.erq_args[0])):
slave_errors_by_slave[i] = [None] * self.n_devs
for j in range(self.n_devs):
slave_errors_by_slave[i][j] = slave_errors[j][i]
self.errors = [None] * len(self.erq_args[0])
K = [None] * len(self.erq_args[0])
for i, k in enumerate(self.erq_args[0]):
if self.dev is None:
self.errors[i] = Creation('mean')(Creation('pack')(slave_errors_by_slave[i],
name = self.name + "/metrics/error_quotients_" + str(k)),
name = self.name + "/metrics/error_quotient_" + str(k))
else:
with Device(self.dev):
self.errors[i] = Creation('mean')(Creation('pack')(slave_errors_by_slave[i],
name = self.name + "/metrics/error_quotients_" + str(k)),
name = self.name + "/metrics/error_quotient_" + str(k))
K[i] = '_{}'.format(k)
self.error_metrics = [None] * len(self.errors)
for i in range(len(self.errors)):
self.error_metrics[i] = self.add_metric()
self.error_metrics[i].set_label('ERROR' + K[i], 'train', 'tests')
self.error_metrics[i].__call__(self.errors[i])
return self.errors
#-------------------------------------------------------------------------------
def _call_costfn(self): # overloading supervisor._call_costfn()
if self.unit_dev:
return supervisor._call_costfn(self)
slave_costs = [_slave.cost for _slave in self.slaves]
if self.dev is None:
self.cost = Creation('mean')(Creation('pack')(slave_costs,
name = self.name + "/metrics/costs"),
name = self.name + "/metrics/cost")
else:
with Device(self.dev):
self.cost = Creation('mean')(Creation('pack')(slave_costs,
name = self.name + "/metrics/costs"),
name = self.name + "/metrics/cost")
self.cost_metric = self.add_metric()
self.cost_metric.set_label('COST', 'train', 'tests')
self.cost_metric.__call__(self.cost)
return self.cost
#-------------------------------------------------------------------------------
def _call_losses(self): # overloading supervisor._call_losses()
if self.unit_dev:
return supervisor._call_losses(self)
slave_losses = [_slave.loss for _slave in self.slaves]
if self.dev is None:
self.loss = Creation('mean')(Creation('pack')(slave_losses,
name = self.name + "/metrics/losses"),
name = self.name + "/metrics/loss")
else:
with Device(self.dev):
self.loss = Creation('mean')(Creation('pack')(slave_losses,
name = self.name + "/metrics/losses"),
name = self.name + "/metrics/loss")
self.loss_metric = self.add_metric()
self.loss_metric.set_label('LOSS', 'train', 'tests')
self.loss_metric.__call__(self.loss)
return self.loss
#-------------------------------------------------------------------------------
def _call_slave_means(self, values_by_slave, value_names):
n_values = len(value_names)
slave_values = [None] * n_values
for j in range(n_values):
slave_values[j] = [None] * self.n_devs
for i in range(self.n_devs):
value_name = self.name + "/slave_" + str(i) + "/" + value_names[i]
if self.slaves[i].dev is None:
slave_values[j][i] = Creation('aug_dims')(values_by_slave[i][j], 0,
name=value_name)
else:
with Device(self.slaves[i].dev):
slave_values[j][i] = Creation('aug_dims')(values_by_slave[i][j], 0,
name=value_name)
mean_values = [None] * n_values
for i in range(n_values):
value_name = self.name + "/batch/" + value_names[i]
if self.dev is None:
mean_values[i] = Creation('mean')(Creation('con')(slave_values[i], axis=0,
name=value_name + "_con"), axis=0,
name=value_name + "_mean")
else:
with Device(self.dev):
mean_values[i] = Creation('mean')(Creation('con')(slave_values[i], axis=0,
name=value_name + "_con"), axis=0,
name=value_name + "_mean")
return slave_values, mean_values
#-------------------------------------------------------------------------------
def _call_gradients(self): # overloading supervisor._call_gradients()
argout = supervisor._call_gradients(self, skip_reg_grad=not self.unit_dev)
if self.unit_dev: return argout
# Note the slaves gradients will already include any regularisation deltas
slave_gradients = [_slave.gradients for _slave in self.slaves]
self.slave_grad, self.gradients = self._call_slave_means(
slave_gradients, self.gradient_names)
for i, grad in enumerate(self.gradients):
self.grad_and_vars[i] = list(self.grad_and_vars[i])
self.grad_and_vars[i][0] = grad
self.grad_and_vars[i] = tuple(self.grad_and_vars[i])
return self.gradients
#-------------------------------------------------------------------------------
def _call_preta_ops(self): # overloading supervisor._call_preta_ops()
"""
For multidevice superivsed learning, this comprises of three stages:
1. Assign clones with parameters (weights & biases) of original model.
2. Calculate loss gradients of master (and therefore all slaves)
3. Apply gradient updates to master parameters
- only step 1 needs particularly special treatment, extending preta_ops
"""
if self.unit_dev:
return supervisor._call_preta_ops(self)
self._call_param_ops()
self._call_moment_ops()
# Parameter updates are schedule-dependent
self.lrate_ops = [None] * self.n_schedules # learning rate ops
self.preta_ops = [None] * self.n_schedules # pre-training ops
for i in range(self.n_schedules):
with variable_scope(self.name + "/schedules/schedule_"+str(i), reuse=Flag('auto_reuse')):
self.lrate_ops[i] = self.learning_rate.assign(self.schedules[i].learning_rate)
self.preta_ops[i] = Creation('combine')(self.lrate_ops[i], self.batch_size_op,
self.param_ops, self.moment_preta_ops)
return self.preta_ops
#-------------------------------------------------------------------------------
def _call_param_ops(self):
# Collate operations that assign master parameters
self.param_ops = [None] * self.n_devs * self.n_params
k = -1
for _slave in self.slaves:
for i in range(self.n_params):
k += 1
var_scope = self.name + '/assign_to_slaves/' + self.variable_names[i]
with Scope('var', var_scope, Flag('auto_reuse')):
if self.dev is None:
self.param_ops[k] = _slave.variables[i].assign(self.variables[i])
else:
with Device(self.dev):
self.param_ops[k] = _slave.variables[i].assign(self.variables[i])
return self.param_ops
#-------------------------------------------------------------------------------
def _call_moment_ops(self): # overloading supervisor._call_preta_ops()
# Call moment means and collate operations that assign master moments
slave_moments = [None] * len(self.slaves)
for i, slave in enumerate(self.slaves):
slave_moments[i] = [list(moment_dict.values())[0] \
for moment_dict in slave.moments]
self.slave_moments, self.mean_moments = self._call_slave_means(
slave_moments, self.moment_names)
self.moment_preta_ops = []
self.moment_posta_ops = []
if not self.n_moments:
return self.moment_preta_ops, self.moment_posta_ops
self.moment_preta_ops = [None] * self.n_devs * self.n_moments
self.moment_posta_ops = [None] * self.n_devs * self.n_moments
k = -1
for _slave in self.slaves:
for i in range(self.n_moments):
k += 1
master_object = list(self.moments[i].values())[0]
slave_object = list(_slave.moments[i].values())[0]
mean_object = self.mean_moments[i]
var_scope_to = self.name + '/assign_to_slaves/' + self.moment_names[i]
var_scope_from = self.name + '/assign_from_slaves/' + self.moment_names[i]
with Scope('var', var_scope_to, Flag('auto_reuse')):
if self.dev is None:
self.moment_preta_ops[k] = slave_object.assign(master_object)
else:
with Device(self.dev):
self.moment_preta_ops[k] = slave_object.assign(master_object)
with Scope('var', var_scope_from, Flag('auto_reuse')):
if self.dev is None:
self.moment_posta_ops[k] = master_object.assign(mean_object)
else:
with Device(self.dev):
self.moment_posta_ops[k] = master_object.assign(mean_object)
return self.moment_preta_ops, self.moment_posta_ops
#-------------------------------------------------------------------------------
def _call_posta_ops(self): # overloading supervisor._call_post_ops
argout = supervisor._call_posta_ops(self)
if self.unit_dev: return argout
# Call moment averages
self.posta_ops = self.moment_preta_ops
# Combine post-training ops with apply-ops
self.train_ops = [None] * self.n_schedules
for i in range(self.n_schedules):
if self.dev is None:
with variable_scope(self.name + "/schedules/schedule_"+str(i) + "/train", reuse=Flag('auto_reuse')):
self.train_ops[i] = Creation('combine')(self.apply_ops[i], self.posta_ops)
else:
with Device(self.dev):
with variable_scope(self.name + "/schedules/schedule_"+str(i) + "/train", reuse=Flag('auto_reuse')):
self.train_ops[i] = Creation('combine')(self.apply_ops[i], self.posta_ops)
return self.train_ops
#-------------------------------------------------------------------------------
def use_schedule(self, using_schedule = -1, _update_dropout=True):
""" overloads overseer.use_schedules """
if self.unit_dev:
return supervisor.use_schedule(self, using_schedule, _update_dropout)
update_schedule = supervisor.use_schedule(self, using_schedule, False)
if not update_schedule or not _update_dropout : return update_schedule
for slave in self.slaves:
slave.work.set_dropout(self.session, self.schedules[self.using_schedule].dro)
return update_schedule
#-------------------------------------------------------------------------------
def test(self, *args, **kwds): # overloading supervisor.test(*args)
if self.param_ops is not None: # all we need to do update the slave parameters
self.session.run(self.param_ops, feed_dict = {})
return supervisor.test(self, *args, **kwds)
#-------------------------------------------------------------------------------
| StarcoderdataPython |
6699360 | <filename>utils/score_utils.py
import numpy as np
# calculate mean score for AVA dataset
def mean_score(scores):
si = np.arange(1, 11, 1)
mean = np.sum(si * scores,axis=-1)
return mean
# calculate standard deviation of scores for AVA dataset
def std_score(scores):
if len(scores.shape) < 3:
scores = np.expand_dims(scores, axis=0)
si = np.ones((scores.shape[0],10)) * np.arange(1, 11, 1)
mean = mean_score(scores)
a = (si - mean.T) ** 2
std = np.sqrt(np.sum((a) * scores,axis=-1))
return std[0]
def earth_mover_distance(y_true, y_pred):
cdf_ytrue = np.cumsum(y_true, axis=-1)
cdf_ypred = np.cumsum(y_pred, axis=-1)
samplewise_emd = np.sqrt(np.mean(np.square(np.abs(cdf_ytrue - cdf_ypred)), axis=-1))
return np.mean(samplewise_emd)
| StarcoderdataPython |
3361129 | #! python3
import docx
def getDocxText(filename):
doc = docx.Document(filename)
fullText = []
for para in doc.paragraphs:
fullText.append(para.text)
return '\n\n'.join(fullText)
| StarcoderdataPython |
4889325 | <reponame>mmulich/wildbook-ia
# -*- coding: utf-8 -*-
import numpy as np # NOQA
import utool as ut
# import logging
import itertools as it
import copy
import six
import collections
from wbia import constants as const
from wbia.algo.graph import nx_dynamic_graph
# from wbia.algo.graph import _dep_mixins
from wbia.algo.graph import mixin_viz
from wbia.algo.graph import mixin_helpers
from wbia.algo.graph import mixin_dynamic
from wbia.algo.graph import mixin_priority
from wbia.algo.graph import mixin_loops
from wbia.algo.graph import mixin_matching
from wbia.algo.graph import mixin_groundtruth
from wbia.algo.graph import mixin_simulation
from wbia.algo.graph import mixin_wbia
from wbia.algo.graph import nx_utils as nxu
import pandas as pd
from wbia.algo.graph.state import POSTV, NEGTV, INCMP, UNREV, UNKWN
from wbia.algo.graph.state import UNINFERABLE
from wbia.algo.graph.state import SAME, DIFF, NULL
import networkx as nx
import logging
import threading
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger('wbia')
DEBUG_CC = False
# DEBUG_CC = True
def _rectify_decision(evidence_decision, meta_decision):
"""
If evidence decision is not explicitly set, then meta decision is used to
make a guess. Raises a ValueError if decisions are in incompatible states.
"""
# Default to the decision based on the media evidence
decision = evidence_decision
# Overwrite the graph decision with the meta decision if necessary
if meta_decision == SAME:
if decision in UNINFERABLE:
decision = POSTV
elif decision == NEGTV:
raise ValueError('evidence=negative and meta=same')
elif meta_decision == DIFF:
if decision in UNINFERABLE:
decision = NEGTV
elif decision == POSTV:
raise ValueError('evidence=positive and meta=diff')
return decision
class Feedback(object):
def _check_edge(infr, edge):
aid1, aid2 = edge
if aid1 not in infr.aids_set:
raise ValueError('aid1=%r is not part of the graph' % (aid1,))
if aid2 not in infr.aids_set:
raise ValueError('aid2=%r is not part of the graph' % (aid2,))
def add_feedback_from(infr, items, verbose=None, **kwargs):
if verbose is None:
verbose = infr.verbose > 5
if isinstance(items, pd.DataFrame):
if list(items.index.names) == ['aid1', 'aid2']:
for edge, data in items.iterrows():
infr.add_feedback(edge=edge, verbose=verbose, **data)
else:
raise ValueError('Cannot interpret pd.DataFrame without edge index')
else:
# Dangerous if item length > 3
for item in items:
args = []
if len(item) == 1:
# Case where items=[edge1, edge2]
if isinstance(item[0], int) or len(item[0]) != 2:
raise ValueError('invalid edge')
if len(item) == 2:
# Case where items=[(edge1, state), (edge2, state)]
if ut.isiterable(item[0]):
edge = item[0]
args = item[1:]
else:
edge = item
else:
raise ValueError('invalid edge')
# Case where items=[(u, v, state), (u, v, state)]
if len(item) > 3:
raise ValueError('pass in data as a dataframe or ' 'use kwargs')
infr.add_feedback(edge, *args, verbose=verbose, **kwargs)
def edge_decision(infr, edge):
r"""
Gets a decision on an edge, either explicitly or implicitly
CommandLine:
python -m wbia.algo.graph.core edge_decision
Doctest:
>>> from wbia.algo.graph.core import * # NOQA
>>> from wbia.algo.graph import demo
>>> infr = demo.demodata_infr(num_pccs=1, p_incon=1)
>>> decision = infr.edge_decision((1, 2))
>>> print('decision = %r' % (decision,))
>>> assert decision == POSTV
>>> decision = infr.edge_decision((199, 299))
>>> print('decision = %r' % (decision,))
>>> assert decision == UNREV
"""
evidence_decision = infr.get_edge_attr(
edge, 'evidence_decision', on_missing='default', default=UNREV
)
meta_decision = infr.get_edge_attr(
edge, 'meta_decision', on_missing='default', default=NULL
)
decision = _rectify_decision(evidence_decision, meta_decision)
return decision
def edge_decision_from(infr, edges):
r"""
Gets a decision for multiple edges
"""
edges = list(edges)
evidence_decisions = infr.gen_edge_values(
'evidence_decision', edges, on_missing='default', default=UNREV
)
meta_decisions = infr.gen_edge_values(
'meta_decision', edges, on_missing='default', default=NULL
)
for ed, md in zip(evidence_decisions, meta_decisions):
yield _rectify_decision(ed, md)
def add_node_feedback(infr, aid, **attrs):
infr.print('Writing annot aid=%r %s' % (aid, ut.repr2(attrs)))
ibs = infr.ibs
ibs.set_annot_quality_texts([aid], [attrs['quality_texts']])
ibs.set_annot_viewpoint_code([aid], [attrs['viewpoint_code']])
ibs.overwrite_annot_case_tags([aid], [attrs['case_tags']])
ibs.set_annot_multiple([aid], [attrs['multiple']])
@profile
def add_feedback(
infr,
edge,
evidence_decision=None,
tags=None,
user_id=None,
meta_decision=None,
confidence=None,
timestamp_c1=None,
timestamp_c2=None,
timestamp_s1=None,
timestamp=None,
verbose=None,
priority=None,
):
r"""
Doctest:
>>> from wbia.algo.graph.core import * # NOQA
>>> infr = testdata_infr('testdb1')
>>> infr.add_feedback((5, 6), POSTV)
>>> infr.add_feedback((5, 6), NEGTV, tags=['photobomb'])
>>> infr.add_feedback((1, 2), INCMP)
>>> print(ut.repr2(infr.internal_feedback, nl=2))
>>> assert len(infr.external_feedback) == 0
>>> assert len(infr.internal_feedback) == 2
>>> assert len(infr.internal_feedback[(5, 6)]) == 2
>>> assert len(infr.internal_feedback[(1, 2)]) == 1
"""
prev_verbose = infr.verbose
if verbose is not None:
infr.verbose = verbose
edge = aid1, aid2 = nxu.e_(*edge)
if aid1 is None or aid2 is None:
logger.error('add_feedback(aid1=None, aid2=None) called, ignoring')
return
if not infr.has_edge(edge):
if True:
# Allow new aids
if not infr.graph.has_node(aid1):
infr.add_aids([aid1])
if not infr.graph.has_node(aid2):
infr.add_aids([aid2])
infr._check_edge(edge)
infr.graph.add_edge(aid1, aid2)
if evidence_decision is None:
evidence_decision = UNREV
if meta_decision is None:
meta_decision = const.META_DECISION.CODE.NULL
if confidence is None:
confidence = const.CONFIDENCE.CODE.UNKNOWN
if timestamp is None:
timestamp = ut.get_timestamp('int', isutc=True)
msg = 'add_feedback ({}, {}), '.format(aid1, aid2)
loc = locals()
msg += ', '.join(
[
str(val)
# key + '=' + str(val)
for key, val in (
(key, loc[key])
for key in [
'evidence_decision',
'tags',
'user_id',
'confidence',
'meta_decision',
]
)
if val is not None
]
)
infr.print(msg, 2, color='white')
if meta_decision == NULL:
# TODO: check previous meta_decision and use that if its consistent
# with the evidence decision.
pass
decision = _rectify_decision(evidence_decision, meta_decision)
if decision == UNREV:
# Unreviewing an edge deletes anything not yet committed
if edge in infr.external_feedback:
raise ValueError('External edge reviews cannot be undone')
if edge in infr.internal_feedback:
del infr.internal_feedback[edge]
# Remove the edge from the queue if it is in there.
if infr.queue:
if edge in infr.queue:
del infr.queue[edge]
# Keep track of sequential reviews and set properties on global graph
num_reviews = infr.get_edge_attr(edge, 'num_reviews', default=0)
review_id = next(infr.review_counter)
feedback_item = {
'tags': tags,
'evidence_decision': evidence_decision,
'meta_decision': meta_decision,
'timestamp_c1': timestamp_c1,
'timestamp_c2': timestamp_c2,
'timestamp_s1': timestamp_s1,
'timestamp': timestamp,
'confidence': confidence,
'user_id': user_id,
'num_reviews': num_reviews + 1,
'review_id': review_id,
}
infr.internal_feedback[edge].append(feedback_item)
infr.set_edge_attr(edge, feedback_item)
if infr.test_mode:
prev_decision = infr._get_current_decision(edge)
infr._dynamic_test_callback(edge, decision, prev_decision, user_id)
# must happen after dynamic test callback
infr.set_edge_attr(edge, {'decision': decision})
if infr.params['inference.enabled']:
assert (
infr.dirty is False
), 'need to recompute before dynamic inference continues'
# Update priority queue based on the new edge
action = infr.add_review_edge(edge, decision)
if infr.test_mode:
infr.test_state['action'] = action
if False:
infr._print_debug_ccs()
else:
action = None
infr.dirty = True
infr._add_review_edge(edge, decision)
if infr.params['inference.enabled'] and infr.refresh:
# only add to criteria if this wasn't requested as a fix edge
if priority is not None and priority <= 1.0:
meaningful = bool({'merge', 'split'} & set(action))
infr.refresh.add(meaningful, user_id, decision)
if infr.test_mode:
infr.metrics_list.append(infr.measure_metrics())
infr.verbose = prev_verbose
def _print_debug_ccs(infr):
assert all(
[ut.allsame(infr.node_labels(*cc)) for cc in infr.positive_components()]
)
sorted_ccs = sorted([set(cc) for cc in infr.pos_graph.connected_components()])
msg = (
'['
+ ', '.join(
[
repr(cc)
if infr.is_consistent(cc)
else ut.highlight_text(repr(cc), 'red')
for cc in sorted_ccs
]
)
+ ']'
)
logger.info(msg)
@ut.classproperty
def feedback_keys(Infr):
""" edge attribute keys used for feedback """
return Infr.feedback_data_keys + ['num_reviews', 'review_id']
@ut.classproperty
def feedback_data_keys(Infr):
""" edge attribute keys used for feedback """
return [
'evidence_decision',
'tags',
'user_id',
'meta_decision',
'timestamp_c1',
'timestamp_c2',
'timestamp_s1',
'timestamp',
'confidence',
]
@profile
def apply_feedback_edges(infr):
r"""
Transforms the feedback dictionaries into nx graph edge attributes
CommandLine:
python -m wbia.algo.graph.core apply_feedback_edges
Doctest:
>>> from wbia.algo.graph.core import * # NOQA
>>> infr = testdata_infr('testdb1')
>>> infr.reset_feedback()
>>> infr.params['inference.enabled'] = False
>>> #infr.add_feedback((1, 2), 'unknown', tags=[])
>>> infr.add_feedback((1, 2), INCMP, tags=[])
>>> infr.apply_feedback_edges()
>>> print('edges = ' + ut.repr4(dict(infr.graph.edges)))
>>> result = str(infr)
>>> print(result)
<AnnotInference(nNodes=6, nEdges=3, nCCs=4)>
"""
infr.print('apply_feedback_edges', 1)
# Transforms dictionary feedback into numpy array
edges = []
attr_lists = {key: [] for key in infr.feedback_keys}
for edge, vals in infr.all_feedback_items():
# hack for feedback rectification
feedback_item = infr._rectify_feedback_item(vals)
feedback_item['review_id'] = next(infr.review_counter)
feedback_item['num_reviews'] = len(vals)
# if feedback_item['decision'] == 'unknown':
# continue
set1 = set(feedback_item.keys())
set2 = set(attr_lists.keys())
if set1 != set2:
raise AssertionError(
'Bad feedback keys: '
+ ut.repr2(ut.set_overlap_items(set1, set2, 'got', 'want'), nl=1)
# ut.repr2(sorted(feedback_item.keys()), sv=True) + ' ' +
# ut.repr2(sorted(attr_lists.keys()), sv=True)
)
for key, val in feedback_item.items():
attr_lists[key].append(val)
edges.append(edge)
assert ut.allsame(list(map(len, attr_lists.values())))
assert len(edges) == len(next(iter(attr_lists.values())))
# Put pair orders in context of the graph
infr.print('_set_feedback_edges(nEdges=%d)' % (len(edges),), 3)
# Ensure edges exist
for edge in edges:
if not infr.graph.has_edge(*edge):
infr.graph.add_edge(*edge)
# take evidence_decision and meta_decision into account
decisions = [
_rectify_decision(ed, md)
for ed, md in zip(
attr_lists['evidence_decision'], attr_lists['meta_decision']
)
]
for state, es in ut.group_items(edges, decisions).items():
infr._add_review_edges_from(es, state)
for key, val_list in attr_lists.items():
infr.set_edge_attrs(key, ut.dzip(edges, val_list))
if infr.params['inference.enabled']:
infr.apply_nondynamic_update()
def _rectify_feedback(infr, feedback):
return {
edge: infr._rectify_feedback_item(vals) for edge, vals in feedback.items()
}
def _rectify_feedback_item(infr, vals):
""" uses most recently use strategy """
return vals[-1]
def all_feedback_items(infr):
for edge, vals in six.iteritems(infr.external_feedback):
yield edge, vals
for edge, vals in six.iteritems(infr.internal_feedback):
yield edge, vals
def all_feedback(infr):
all_feedback = ut.ddict(list)
all_feedback.update(infr.all_feedback_items())
return all_feedback
def clear_feedback(infr, edges=None):
""" Delete all edges properties related to feedback """
if edges is None:
edges = infr.graph.edges()
edges = list(edges)
infr.print('clear_feedback len(edges) = %r' % (len(edges)), 2)
infr.external_feedback = ut.ddict(list)
infr.internal_feedback = ut.ddict(list)
# Kill all feedback, remote edge labels, but leave graph edges alone
keys = infr.feedback_keys + ['inferred_state']
ut.nx_delete_edge_attr(infr.graph, keys, edges)
# Move reviewed edges back into the unreviewed graph
for key in (POSTV, NEGTV, INCMP):
subgraph = infr.review_graphs[key]
prev_edges = ut.compress(edges, list(subgraph.has_edges(edges)))
subgraph.remove_edges_from(prev_edges)
infr.review_graphs[UNREV].add_edges_from(prev_edges)
infr.pos_redun_nids.clear()
infr.neg_redun_metagraph.clear()
infr.nid_to_errors.clear()
if __debug__:
infr.assert_disjoint_invariant()
def clear_edges(infr):
"""
Removes all edges from the graph
"""
for graph in infr.review_graphs.values():
graph.remove_edges_from(list(graph.edges()))
infr.graph.remove_edges_from(list(infr.graph.edges()))
infr.pos_redun_nids.clear()
infr.neg_redun_metagraph.clear()
infr.nid_to_errors.clear()
def reset_feedback(infr, mode='annotmatch', apply=True):
""" Resets feedback edges to state of the SQL annotmatch table """
infr.print('reset_feedback mode=%r' % (mode,), 1)
infr.clear_feedback()
if mode == 'annotmatch':
infr.external_feedback = infr.read_wbia_annotmatch_feedback()
elif mode == 'staging':
infr.external_feedback = infr.read_wbia_staging_feedback()
else:
raise ValueError('no mode=%r' % (mode,))
infr.internal_feedback = ut.ddict(list)
if apply:
infr.apply_feedback_edges()
def reset(infr, state='empty'):
"""
Removes all edges from graph and resets name labels.
Ignore:
>>> from wbia.algo.graph.core import * # NOQA
>>> from wbia.algo.graph import demo
>>> infr = demo.demodata_infr(num_pccs=5)
>>> assert len(list(infr.edges())) > 0
>>> infr.reset(state='empty')
>>> assert len(list(infr.edges())) == 0
"""
infr.clear_edges()
infr.clear_feedback()
if state == 'empty':
# Remove all edges, and component names
infr.clear_name_labels()
elif state == 'orig':
raise NotImplementedError('unused')
infr.reset_name_labels()
else:
raise ValueError('Unknown state=%r' % (state,))
def reset_name_labels(infr):
""" Resets all annotation node name labels to their initial values """
infr.print('reset_name_labels', 1)
orig_names = infr.get_node_attrs('orig_name_label')
infr.set_node_attrs('name_label', orig_names)
def clear_name_labels(infr):
""" Sets all annotation node name labels to be unknown """
infr.print('clear_name_labels()', 1)
# make distinct names for all nodes
distinct_names = {node: -aid for node, aid in infr.get_node_attrs('aid').items()}
infr.set_node_attrs('name_label', distinct_names)
class NameRelabel(object):
def node_label(infr, aid):
return infr.pos_graph.node_label(aid)
def node_labels(infr, *aids):
return infr.pos_graph.node_labels(*aids)
def _next_nid(infr):
if getattr(infr, 'nid_counter', None) is None:
nids = nx.get_node_attributes(infr.graph, 'name_label')
infr.nid_counter = max(nids)
infr.nid_counter += 1
new_nid = infr.nid_counter
return new_nid
def _rectify_names(infr, old_names, new_labels):
"""
Finds the best assignment of old names based on the new groups each is
assigned to.
old_names = [None, None, None, 1, 2, 3, 3, 4, 4, 4, 5, None]
new_labels = [ 1, 2, 2, 3, 4, 5, 5, 6, 3, 3, 7, 7]
"""
infr.print('rectifying name lists', 3)
from wbia.scripts import name_recitifer
newlabel_to_oldnames = ut.group_items(old_names, new_labels)
unique_newlabels = list(newlabel_to_oldnames.keys())
grouped_oldnames_ = ut.take(newlabel_to_oldnames, unique_newlabels)
# Mark annots that are unknown and still grouped by themselves
still_unknown = [len(g) == 1 and g[0] is None for g in grouped_oldnames_]
# Remove nones for name rectifier
grouped_oldnames = [
[n for n in oldgroup if n is not None] for oldgroup in grouped_oldnames_
]
new_names = name_recitifer.find_consistent_labeling(
grouped_oldnames, verbose=infr.verbose >= 3, extra_prefix=None
)
unknown_labels = ut.compress(unique_newlabels, still_unknown)
new_flags = [n is None for n in new_names]
# isinstance(n, six.string_types) and n.startswith('_extra_name')
# for n in new_names
# ]
label_to_name = ut.dzip(unique_newlabels, new_names)
needs_assign = ut.compress(unique_newlabels, new_flags)
return label_to_name, needs_assign, unknown_labels
def _rectified_relabel(infr, cc_subgraphs):
"""
Reuses as many names as possible
"""
# Determine which names can be reused
from wbia.scripts import name_recitifer
infr.print('grouping names for rectification', 3)
grouped_oldnames_ = [
list(nx.get_node_attributes(subgraph, 'name_label').values())
for count, subgraph in enumerate(cc_subgraphs)
]
# Make sure negatives dont get priority
grouped_oldnames = [
[n for n in group if len(group) == 1 or n > 0] for group in grouped_oldnames_
]
infr.print(
'begin rectification of %d grouped old names' % (len(grouped_oldnames)), 2
)
new_labels = name_recitifer.find_consistent_labeling(
grouped_oldnames, verbose=infr.verbose >= 3
)
infr.print('done rectifying new names', 2)
new_flags = [
not isinstance(n, int) and n.startswith('_extra_name') for n in new_labels
]
for idx in ut.where(new_flags):
new_labels[idx] = infr._next_nid()
for idx, label in enumerate(new_labels):
if label < 0 and len(grouped_oldnames[idx]) > 1:
# Remove negative ids for grouped items
new_labels[idx] = infr._next_nid()
return new_labels
@profile
def relabel_using_reviews(infr, graph=None, rectify=True):
r"""
Relabels nodes in graph based on positive connected components
This will change all of the names on the nodes to be consistent while
preserving any existing names as best as possible. If rectify=False,
this will be faster, but the old names will not be preserved and each
PCC will be assigned an arbitrary name.
Note:
if something messes up you can call infr.reset_labels_to_wbia() to
reset node labels to their original values --- this will almost
always put the graph in an inconsistent state --- but then you can
this with rectify=True to fix everything up.
Args:
graph (nx.Graph, optional): only edges in `graph` are relabeled
defaults to current graph.
rectify (bool, optional): if True names attempt to remain
consistent otherwise there are no restrictions on name labels
other than that they are distinct.
"""
infr.print('relabel_using_reviews', 2)
if graph is None:
graph = infr.graph
# Get subgraphs and check consistency
cc_subgraphs = []
num_inconsistent = 0
for cc in infr.positive_components(graph=graph):
cc_subgraphs.append(infr.graph.subgraph(cc))
if not infr.is_consistent(cc):
num_inconsistent += 1
infr.print('num_inconsistent = %r' % (num_inconsistent,), 2)
if infr.verbose >= 2:
cc_sizes = list(map(len, cc_subgraphs))
pcc_size_hist = ut.dict_hist(cc_sizes)
pcc_size_stats = ut.get_stats(cc_sizes)
if len(pcc_size_hist) < 8:
infr.print('PCC size hist = %s' % (ut.repr2(pcc_size_hist),))
infr.print('PCC size stats = %s' % (ut.repr2(pcc_size_stats),))
if rectify:
# Rectified relabeling, preserves grouping and labeling if possible
new_labels = infr._rectified_relabel(cc_subgraphs)
else:
# Arbitrary relabeling, only preserves grouping
if graph is infr.graph:
# Use union find labels
new_labels = {
count: infr.node_label(next(iter(subgraph.nodes())))
for count, subgraph in enumerate(cc_subgraphs)
}
else:
new_labels = {
count: infr._next_nid() for count, subgraph in enumerate(cc_subgraphs)
}
for count, subgraph in enumerate(cc_subgraphs):
new_nid = new_labels[count]
node_to_newlabel = ut.dzip(subgraph.nodes(), [new_nid])
infr.set_node_attrs('name_label', node_to_newlabel)
num_names = len(cc_subgraphs)
infr.print('done relabeling', 3)
return num_names, num_inconsistent
def connected_component_status(infr):
r"""
Returns:
dict: num_inconsistent, num_names_max
CommandLine:
python -m wbia.algo.graph.core connected_component_status
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.algo.graph.core import * # NOQA
>>> infr = testdata_infr('testdb1')
>>> infr.add_feedback_from([(2, 3, NEGTV), (5, 6, NEGTV), (1, 2, POSTV)])
>>> status = infr.connected_component_status()
>>> print(ut.repr3(status))
"""
infr.print('checking status', 3)
num_inconsistent = len(infr.recovery_ccs)
num_names_max = infr.pos_graph.number_of_components()
status = dict(num_names_max=num_names_max, num_inconsistent=num_inconsistent)
infr.print('done checking status', 3)
return status
class MiscHelpers(object):
def _rectify_nids(infr, aids, nids):
if nids is None:
if infr.ibs is None:
nids = [-aid for aid in aids]
else:
nids = infr.ibs.get_annot_nids(aids)
elif ut.isscalar(nids):
nids = [nids] * len(aids)
return nids
def remove_aids(infr, aids):
"""
Remove annotations from the graph.
Returns:
dict: split: indicates which PCCs were split by this action.
Note:
This may cause unintended splits!
Ignore:
>>> from graphid import demo, util
>>> infr = demo.demodata_infr(num_pccs=5, pos_redun=1)
>>> infr.refresh_candidate_edges()
>>> infr.pin_node_layout()
>>> before = infr.copy()
>>> aids = infr.aids[::5]
>>> splits = infr.remove_aids(aids)
>>> assert len(splits['old']) > 0
>>> infr.assert_invariants()
>>> # xdoc: +REQUIRES(--show)
>>> util.qtensure()
>>> after = infr
>>> before.show(fnum=1, pnum=(1, 2, 1), pickable=True)
>>> after.show(fnum=1, pnum=(1, 2, 2), pickable=True)
"""
infr.print('remove_aids len(aids)={}'.format(len(aids)), level=3)
# Determine which edges are going to be removed
remove_edges = nxu.edges_outgoing(infr.graph, aids)
old_groups = list(infr.positive_components())
# Remove from tertiary bookkeeping structures
remove_idxs = list(ut.take(ut.make_index_lookup(infr.aids), aids))
ut.delete_items_by_index(infr.orig_name_labels, remove_idxs)
ut.delete_items_by_index(infr.aids, remove_idxs)
infr.aids_set = set(infr.aids)
# Remove from secondary bookkeeping structures
ut.delete_dict_keys(infr.external_feedback, remove_edges)
ut.delete_dict_keys(infr.internal_feedback, remove_edges)
# Remove from core bookkeeping structures
infr.graph.remove_nodes_from(aids)
for graph in infr.review_graphs.values():
graph.remove_nodes_from(aids)
infr.queue.delete_items(remove_edges)
# TODO: should refactor to preform a dyanmic step, but in this case is
# less work to use a bazooka to shoot a fly.
infr.apply_nondynamic_update()
# I'm unsure if relabeling is necessary
infr.relabel_using_reviews()
new_groups = list(infr.positive_components())
# logger.info('old_groups = {!r}'.format(old_groups))
# logger.info('new_groups = {!r}'.format(new_groups))
delta = ut.grouping_delta(old_groups, new_groups)
splits = delta['splits']
n_old = len(splits['old'])
n_new = len(list(ut.flatten(splits['new'])))
infr.print(
'removing {} aids split {} old PCCs into {} new PCCs'.format(
len(aids), n_old, n_new
)
)
return splits
# logger.info(ub.repr2(delta, nl=2))
def add_aids(infr, aids, nids=None):
"""
CommandLine:
python -m wbia.algo.graph.core add_aids --show
Doctest:
>>> from wbia.algo.graph.core import * # NOQA
>>> aids_ = [1, 2, 3, 4, 5, 6, 7, 9]
>>> infr = AnnotInference(ibs=None, aids=aids_, autoinit=True)
>>> aids = [2, 22, 7, 9, 8]
>>> nids = None
>>> infr.add_aids(aids, nids)
>>> result = infr.aids
>>> print(result)
>>> assert len(infr.graph) == len(infr.aids)
...
[1, 2, 3, 4, 5, 6, 7, 9, 22, 8]
"""
nids = infr._rectify_nids(aids, nids)
assert len(aids) == len(nids), 'must correspond'
if infr.aids is None:
nids = infr._rectify_nids(aids, nids)
# Set object attributes
infr.aids = aids
infr.aids_set = set(infr.aids)
infr.orig_name_labels = nids
else:
aid_to_idx = ut.make_index_lookup(infr.aids)
orig_idxs = ut.dict_take(aid_to_idx, aids, None)
new_flags = ut.flag_None_items(orig_idxs)
new_aids = ut.compress(aids, new_flags)
new_nids = ut.compress(nids, new_flags)
# Extend object attributes
infr.aids.extend(new_aids)
infr.orig_name_labels.extend(new_nids)
infr.aids_set.update(new_aids)
infr.update_node_attributes(new_aids, new_nids)
if infr.graph is not None:
infr.graph.add_nodes_from(aids)
for subgraph in infr.review_graphs.values():
subgraph.add_nodes_from(aids)
nids = set(infr.pos_graph.node_labels(*aids))
infr.neg_metagraph.add_nodes_from(nids)
def update_node_attributes(infr, aids=None, nids=None):
if aids is None:
aids = infr.aids
nids = infr.orig_name_labels
assert aids is not None, 'must have aids'
assert nids is not None, 'must have nids'
node_to_aid = {aid: aid for aid in aids}
node_to_nid = {aid: nid for aid, nid in zip(aids, nids)}
ut.assert_eq_len(node_to_nid, node_to_aid)
infr.graph.add_nodes_from(aids)
for subgraph in infr.review_graphs.values():
subgraph.add_nodes_from(aids)
infr.set_node_attrs('aid', node_to_aid)
infr.set_node_attrs('name_label', node_to_nid)
infr.set_node_attrs('orig_name_label', node_to_nid)
# TODO: depricate these, they will always be identity I think
def initialize_graph(infr, graph=None):
infr.print('initialize_graph', 1)
if graph is None:
infr.graph = infr._graph_cls()
else:
infr.graph = graph
infr.review_graphs[POSTV] = nx_dynamic_graph.DynConnGraph()
infr.review_graphs[NEGTV] = infr._graph_cls()
infr.review_graphs[INCMP] = infr._graph_cls()
infr.review_graphs[UNKWN] = infr._graph_cls()
infr.review_graphs[UNREV] = infr._graph_cls()
if graph is not None:
for u, v, d in graph.edges(data=True):
evidence_decision = d.get('evidence_decision', UNREV)
meta_decision = d.get('meta_decision', NULL)
decision = _rectify_decision(evidence_decision, meta_decision)
if decision in {POSTV, NEGTV, INCMP, UNREV, UNKWN}:
infr.review_graphs[decision].add_edge(u, v)
else:
raise ValueError('Unknown decision=%r' % (decision,))
infr.update_node_attributes()
@profile
def log_message(infr, msg, level=1, color=None):
if color is None:
color = 'blue'
if True:
# Record the name of the calling function
parent_name = ut.get_parent_frame().f_code.co_name
msg = '[{}] '.format(parent_name) + msg
if True:
# Append the message to an internal log deque
infr.logs.append((msg, color))
if len(infr.logs) == infr.logs.maxlen:
infr.log_index = max(infr.log_index - 1, 0)
if infr.verbose >= level:
# Print the message to stdout
loglevel = logging.INFO
ut.cprint('[infr] ' + msg, color)
else:
loglevel = logging.DEBUG
if infr.logger:
# Send the message to a python logger
infr.logger.log(loglevel, msg)
logger.info(msg)
print = log_message
def latest_logs(infr, colored=False):
index = infr.log_index
infr.log_index = len(infr.logs)
if colored:
return [infr.logs[x] for x in range(index, len(infr.logs))]
else:
return [infr.logs[x][0] for x in range(index, len(infr.logs))]
def dump_logs(infr):
logger.info('--- <LOG DUMP> ---')
for msg, color in infr.logs:
ut.cprint('[infr] ' + msg, color)
logger.info('--- <\\LOG DUMP> ---')
class AltConstructors(object):
_graph_cls = nx_dynamic_graph.NiceGraph
# _graph_cls = nx.Graph
# nx.Graph
# _graph_cls = nx.DiGraph
@classmethod
def from_pairs(cls, aid_pairs, attrs=None, ibs=None, verbose=False):
import networkx as nx
G = cls._graph_cls()
assert not any([a1 == a2 for a1, a2 in aid_pairs]), 'cannot have self-edges'
G.add_edges_from(aid_pairs)
if attrs is not None:
for key in attrs.keys():
nx.set_edge_attributes(G, name=key, values=ut.dzip(aid_pairs, attrs[key]))
infr = cls.from_netx(G, ibs=ibs, verbose=verbose)
return infr
@classmethod
def from_netx(cls, G, ibs=None, verbose=False, infer=True):
aids = list(G.nodes())
if ibs is not None:
nids = None
else:
nids = [-a for a in aids]
infr = cls(ibs, aids, nids, autoinit=False, verbose=verbose)
infr.initialize_graph(graph=G)
# hack
orig_name_labels = [infr.pos_graph.node_label(a) for a in aids]
infr.orig_name_labels = orig_name_labels
infr.set_node_attrs('orig_name_label', ut.dzip(aids, orig_name_labels))
if infer:
infr.apply_nondynamic_update()
return infr
@classmethod
def from_qreq_(cls, qreq_, cm_list, autoinit=False):
"""
Create a AnnotInference object using a precomputed query / results
"""
# raise NotImplementedError('do not use')
aids = ut.unique(ut.flatten([qreq_.qaids, qreq_.daids]))
nids = qreq_.get_qreq_annot_nids(aids)
ibs = qreq_.ibs
infr = cls(ibs, aids, nids, verbose=False, autoinit=autoinit)
infr.cm_list = cm_list
infr.qreq_ = qreq_
return infr
def status(infr, extended=False):
status_dict = ut.odict(
[
('nNodes', len(infr.aids)),
('nEdges', infr.graph.number_of_edges()),
('nCCs', infr.pos_graph.number_of_components()),
('nPostvEdges', infr.pos_graph.number_of_edges()),
('nNegtvEdges', infr.neg_graph.number_of_edges()),
('nIncmpEdges', infr.incomp_graph.number_of_edges()),
('nUnrevEdges', infr.unreviewed_graph.number_of_edges()),
('nPosRedunCCs', len(infr.pos_redun_nids)),
('nNegRedunPairs', infr.neg_redun_metagraph.number_of_edges()),
('nInconsistentCCs', len(infr.nid_to_errors)),
# ('nUnkwnEdges', infr.unknown_graph.number_of_edges()),
]
)
if extended:
def count_within_between(edges):
n_within = 0
n_between = 0
for u, v in edges:
nid1, nid2 = infr.pos_graph.node_labels(u, v)
if nid1 == nid2:
n_within += 1
else:
n_between += 1
return n_within, n_between
a, b = count_within_between(infr.neg_graph.edges())
status_dict['nNegEdgesWithin'] = a
status_dict['nNegEdgesBetween'] = b
a, b = count_within_between(infr.incomp_graph.edges())
status_dict['nIncompEdgesWithin'] = a
status_dict['nIncompEdgesBetween'] = b
a, b = count_within_between(infr.unreviewed_graph.edges())
status_dict['nUnrevEdgesWithin'] = a
status_dict['nUrevEdgesBetween'] = b
return status_dict
def __nice__(infr):
if infr.graph is None:
return 'nAids=%r, G=None' % (len(infr.aids))
else:
fmt = 'nNodes={}, nEdges={}, nCCs={}'
msg = fmt.format(
len(infr.aids),
infr.graph.number_of_edges(),
infr.pos_graph.number_of_components(),
# infr.incomp_graph.number_of_edges(),
# infr.unreviewed_graph.number_of_edges(),
)
return msg
# return 'nAids={}, nEdges={}, nCCs={}'.format(
# len(infr.aids),
# infr.graph.number_of_edges(),
# infr.pos_graph.number_of_components()
# )
@six.add_metaclass(ut.ReloadingMetaclass)
class AnnotInference(
ut.NiceRepr,
# Old internal stuffs
AltConstructors,
MiscHelpers,
Feedback,
NameRelabel,
# New core algorithm stuffs
mixin_dynamic.NonDynamicUpdate,
mixin_dynamic.Recovery,
mixin_dynamic.Consistency,
mixin_dynamic.Redundancy,
mixin_dynamic.DynamicUpdate,
mixin_priority.Priority,
mixin_matching.CandidateSearch,
mixin_matching.InfrLearning,
mixin_matching.AnnotInfrMatching,
# General helpers
mixin_helpers.AssertInvariants,
mixin_helpers.DummyEdges,
mixin_helpers.Convenience,
mixin_helpers.AttrAccess,
# Simulation and Loops
mixin_simulation.SimulationHelpers,
mixin_loops.InfrReviewers,
mixin_loops.InfrLoops,
# Visualization
mixin_viz.GraphVisualization,
# plugging into IBEIS
mixin_groundtruth.Groundtruth,
mixin_wbia.IBEISIO,
mixin_wbia.IBEISGroundtruth,
# _dep_mixins._AnnotInfrDepMixin,
):
"""
class for maintaining state of an identification
Terminology and Concepts:
CommandLine:
wbia make_qt_graph_interface --show --aids=1,2,3,4,5,6,7
wbia AnnotInference:0 --show
wbia AnnotInference:1 --show
wbia AnnotInference:2 --show
wbia AnnotInference:0 --loginfr
Doctest:
>>> from wbia.algo.graph.core import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb(defaultdb='PZ_MTEST')
>>> aids = [1, 2, 3, 4, 5, 6]
>>> infr = AnnotInference(ibs, aids, autoinit=True, verbose=1000)
>>> result = ('infr = %s' % (infr,))
>>> print(result)
>>> ut.quit_if_noshow()
>>> use_image = True
>>> infr.initialize_visual_node_attrs()
>>> # Note that there are initially no edges
>>> infr.show_graph(use_image=use_image)
>>> ut.show_if_requested()
infr = <AnnotInference(nNodes=6, nEdges=0, nCCs=6)>
Example:
>>> # SCRIPT
>>> from wbia.algo.graph.core import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb(defaultdb='PZ_MTEST')
>>> aids = [1, 2, 3, 4, 5, 6, 7, 9]
>>> infr = AnnotInference(ibs, aids, autoinit=True)
>>> result = ('infr = %s' % (infr,))
>>> print(result)
>>> ut.quit_if_noshow()
>>> use_image = False
>>> infr.initialize_visual_node_attrs()
>>> # Note that there are initially no edges
>>> infr.show_graph(use_image=use_image)
>>> # But we can add nodes between the same names
>>> infr.ensure_mst()
>>> infr.show_graph(use_image=use_image)
>>> # Add some feedback
>>> infr.add_feedback((1, 4), NEGTV)
>>> infr.apply_feedback_edges()
>>> infr.show_graph(use_image=use_image)
>>> ut.show_if_requested()
Example:
>>> # SCRIPT
>>> from wbia.algo.graph.core import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb(defaultdb='PZ_MTEST')
>>> aids = [1, 2, 3, 4, 5, 6, 7, 9]
>>> infr = AnnotInference(ibs, aids, autoinit=True)
>>> result = ('infr = %s' % (infr,))
>>> print(result)
>>> ut.quit_if_noshow()
>>> use_image = False
>>> infr.initialize_visual_node_attrs()
>>> infr.ensure_mst()
>>> # Add some feedback
>>> infr.add_feedback((1, 4), NEGTV)
>>> try:
>>> infr.add_feedback((1, 10), NEGTV)
>>> except ValueError:
>>> pass
>>> try:
>>> infr.add_feedback((11, 12), NEGTV)
>>> except ValueError:
>>> pass
>>> infr.apply_feedback_edges()
>>> infr.show_graph(use_image=use_image)
>>> ut.show_if_requested()
Ignore:
>>> import wbia
>>> import utool as ut
>>> ibs = wbia.opendb(defaultdb='PZ_MTEST')
>>> infr = wbia.AnnotInference(ibs, 'all')
>>> class_ = infr
>>> fpath = None
>>> static_attrs = ut.check_static_member_vars(class_, fpath)
>>> uninitialized = set(infr.__dict__.keys()) - set(static_attrs)
"""
def __getstate__(self):
state = self.__dict__.copy()
# Dont pickle generators
state['_gen'] = None
state['logger'] = None
return state
def __init__(infr, ibs, aids=[], nids=None, autoinit=True, verbose=False):
"""
Ignore:
pass
"""
# infr.verbose = verbose
infr.name = None
infr.verbose = verbose
# wbia controller and initial nodes
# TODO: aids can be abstracted as a property that simply looks at the
# nodes in infr.graph.
if isinstance(ibs, six.string_types):
import wbia
ibs = wbia.opendb(ibs)
# setup logging
infr.logger = None
do_logging = ut.get_argflag(('--loginfr', '--log-infr'))
# do_logging = True
if do_logging:
if ibs is not None:
from os.path import join
# import ubelt as ub
# logdir = ibs.get_logdir_local()
logdir = '.'
logname = 'AnnotInference' + ut.timestamp()
logger = logging.getLogger(logname)
if not logger.handlers:
fh = logging.FileHandler(join(logdir, logname + '.log'))
logger.info('logger.handlers = {!r}'.format(logger.handlers))
logger.addHandler(fh)
# logger.setLevel(logging.INFO)
logger.setLevel(logging.DEBUG)
infr.logger = logger
infr.logs = collections.deque(maxlen=10000)
infr.log_index = 0
infr.print('__init__ queue', level=1)
# If not dirty, new feedback should dynamically maintain a consistent
# state. If dirty it means we need to recompute connected compoments
# before we can continue with dynamic review.
infr.dirty = False
infr.readonly = False
infr.ibs = ibs
infr.aids = None
infr.aids_set = None
infr.orig_name_labels = None
# Underlying graph structure
infr.graph = None
infr.review_graphs = {
POSTV: None,
NEGTV: None,
INCMP: None,
UNKWN: None,
UNREV: None,
}
infr.print('__init__ structures', level=1)
# Criterion
infr.queue = ut.PriorityQueue()
infr.refresh = None
infr.review_counter = it.count(0)
infr.nid_counter = None
# Dynamic Properties (requires bookkeeping)
infr.nid_to_errors = {}
infr.recovery_ccs = []
# Recover graph holds positive edges of inconsistent PCCs
infr.recover_graph = nx_dynamic_graph.DynConnGraph()
# Set of PCCs that are positive redundant
infr.pos_redun_nids = set([])
# Represents the metagraph of negative edges between PCCs
infr.neg_redun_metagraph = infr._graph_cls()
# NEW VERSION: metagraph of PCCs with ANY number of negative edges
# between them. The weight on the edge should represent the strength.
infr.neg_metagraph = infr._graph_cls()
infr.print('__init__ feedback', level=1)
# This should represent The feedback read from a database. We do not
# need to do any updates to an external database based on this data.
infr.external_feedback = ut.ddict(list)
# Feedback that has not been synced with the external database.
# Once we sync, this is merged into external feedback.
infr.internal_feedback = ut.ddict(list)
# Bookkeeping
infr.edge_truth = {}
infr.task_probs = ut.ddict(dict)
# A generator that maintains the state of the algorithm
infr._gen = None
infr._gen_lock = threading.Lock()
# Computer vision algorithms
infr.ranker = None
infr.verifiers = None
infr.print('__init__ configuration', level=1)
# TODO: move to params
infr.task_thresh_dict = {
'zebra_grevys': {
'match_state': {
POSTV: 0.7732, # GGR2 - 0.7732, Kaia CA - 0.7312
NEGTV: 0.8605, # GGR2 - 0.8605, Kaia CA - 0.8892
INCMP: np.inf,
},
'photobomb_state': {'pb': np.inf, 'nopb': np.inf},
},
'zebra_mountain': {
'match_state': {
POSTV: 0.7767,
NEGTV: 0.8640,
INCMP: 0.8375,
},
'photobomb_state': {'pb': np.inf, 'nopb': np.inf},
},
'zebra_plains': {
'match_state': {POSTV: np.inf, NEGTV: np.inf, INCMP: np.inf},
'photobomb_state': {'pb': np.inf, 'nopb': np.inf},
},
'giraffe_reticulated': {
'match_state': {
POSTV: np.inf, # GGR2 - 0.7460
NEGTV: np.inf, # GGR2 - 0.8876
INCMP: np.inf,
},
'photobomb_state': {'pb': np.inf, 'nopb': np.inf},
},
}
infr.task_thresh = None
# Parameters / Configurations / Callbacks
infr.callbacks = {
'request_review': None,
'review_ready': None,
'review_finished': None,
}
infr.params = {
'manual.n_peek': 1,
'manual.autosave': True,
'ranking.enabled': True,
'ranking.ntop': 5,
'algo.max_outer_loops': None,
'algo.quickstart': False,
'algo.hardcase': False,
# Dynamic Inference
'inference.enabled': True,
'inference.update_attrs': True,
# Termination / Refresh
'refresh.window': 20,
'refresh.patience': 72,
'refresh.thresh': 0.052,
'refresh.method': 'binomial',
# Redundancy
# if redun.enabled is True, then redundant edges will be ignored by
# # the priority queue and extra edges needed to achieve minimum
# redundancy will be searched for if the queue is empty.
'redun.enabled': True,
# positive/negative k
'redun.pos': 2,
'redun.neg': 2,
# does positive/negative augmentation
'redun.enforce_pos': True,
'redun.enforce_neg': True,
# prevents user interaction in final phase
'redun.neg.only_auto': True,
# Only review CCs connected by confidence less than this value
# a good values is 'pretty_sure'
'queue.conf.thresh': None,
# Autoreviewer params
'autoreview.enabled': True,
'autoreview.prioritize_nonpos': True,
}
infr._viz_image_config = {
'in_image': False,
'thumbsize': 221,
}
infr.print('__init__ storage', level=1)
infr.verifier_params = {} # TODO
infr.ranker_params = {
'K': 5,
}
# Developer modes (consoldate this)
infr.test_mode = False
infr.simulation_mode = False
# set to the current phase of the main loop
# (mostly for testing)
infr.phase = None
infr.loop_phase = None
# Testing state
infr.metrics_list = None
infr.test_state = None
infr.test_gt_pos_graph = None
infr.nid_to_gt_cc = None
infr.node_truth = None
infr.real_n_pcc_mst_edges = None
# External: Can we remove these?
infr.cm_list = None
infr.vsone_matches = {}
infr.qreq_ = None
infr.manual_wgt = None
infr.print('__init__ aids', level=1)
if aids == 'all':
aids = ibs.get_valid_aids()
infr.add_aids(aids, nids)
infr.print('__init__ autoinit', level=1)
if autoinit:
infr.initialize_graph()
if isinstance(autoinit, six.string_types):
infr.reset_feedback(autoinit)
infr.print('__init__ done', level=1)
def subparams(infr, prefix):
"""
Returns dict of params prefixed with <prefix>.
The returned dict does not contain the prefix
Doctest:
>>> from wbia.algo.graph.core import *
>>> import wbia
>>> infr = wbia.AnnotInference(None)
>>> result = ut.repr2(infr.subparams('refresh'))
>>> print(result)
{'method': 'binomial', 'patience': 72, 'thresh': 0.052, 'window': 20}
"""
prefix_ = prefix + '.'
subparams = {
k[len(prefix_) :]: v for k, v in infr.params.items() if k.startswith(prefix_)
}
return subparams
def copy(infr):
# shallow copy ibs
infr2 = AnnotInference(
infr.ibs,
copy.deepcopy(infr.aids),
copy.deepcopy(infr.orig_name_labels),
autoinit=False,
verbose=infr.verbose,
)
# shallow algorithm classes
infr2.verifiers = infr.verifiers
infr2.ranker = infr.ranker
infr2.graph = infr.graph.copy()
infr2.external_feedback = copy.deepcopy(infr.external_feedback)
infr2.internal_feedback = copy.deepcopy(infr.internal_feedback)
infr2.cm_list = copy.deepcopy(infr.cm_list)
infr2.qreq_ = copy.deepcopy(infr.qreq_)
infr2.nid_counter = infr.nid_counter
infr2.recover_graph = copy.deepcopy(infr.recover_graph)
infr2.pos_redun_nids = copy.deepcopy(infr.pos_redun_nids)
infr2.neg_redun_metagraph = copy.deepcopy(infr.neg_redun_metagraph)
infr2.neg_metagraph = copy.deepcopy(infr.neg_metagraph)
infr2.review_graphs = copy.deepcopy(infr.review_graphs)
infr2.nid_to_errors = copy.deepcopy(infr.nid_to_errors)
infr2.recovery_ccs = copy.deepcopy(infr.recovery_ccs)
infr2.readonly = infr.readonly
infr2.dirty = infr.dirty
infr2.test_mode = infr.test_mode
infr2.test_mode = infr.test_mode
infr2.simulation_mode = infr.simulation_mode
infr.queue = copy.deepcopy(infr.queue)
infr.params = copy.deepcopy(infr.params)
infr2._viz_image_config = infr._viz_image_config.copy()
if infr.test_mode:
infr2.test_state = copy.deepcopy(infr.test_state)
infr2.metrics_list = copy.deepcopy(infr.metrics_list)
return infr2
def subgraph(infr, aids):
"""
Makes a new inference object that is a subset of the original.
Note, this is not robust, be careful. The subgraph should be treated as
read only. Do not commit any reviews made from here.
"""
orig_name_labels = list(infr.gen_node_values('orig_name_label', aids))
infr2 = AnnotInference(
infr.ibs, aids, orig_name_labels, autoinit=False, verbose=infr.verbose
)
# deep copy the graph structure
infr2.graph = infr.graph.subgraph(aids).copy()
infr2.readonly = True
infr2.verifiers = infr.verifiers
infr2.ranker = infr.ranker
infr.params = copy.deepcopy(infr.params)
infr2._viz_image_config = infr._viz_image_config.copy()
# infr2._viz_init_nodes = infr._viz_image_config
# infr2._viz_image_config_dirty = infr._viz_image_config_dirty
infr2.edge_truth = {
e: infr.edge_truth[e] for e in infr2.graph.edges() if e in infr.edge_truth
}
# TODO: internal/external feedback
infr2.nid_counter = infr.nid_counter
infr2.dirty = True
infr2.cm_list = None
infr2.qreq_ = None
# TODO:
# infr2.nid_to_errors {} # = copy.deepcopy(infr.nid_to_errors)
# infr2.recover_graph = copy.deepcopy(infr.recover_graph)
# infr2.pos_redun_nids = copy.deepcopy(infr.pos_redun_nids)
# infr2.neg_redun_metagraph = copy.deepcopy(infr.neg_redun_metagraph)
infr2.review_graphs = {}
for k, g in infr.review_graphs.items():
if g is None:
infr2.review_graphs[k] = None
elif k == POSTV:
infr2.review_graphs[k] = g.subgraph(aids, dynamic=True)
else:
infr2.review_graphs[k] = g.subgraph(aids)
return infr2
def set_config(infr, config, **kw):
pass
def testdata_infr(defaultdb='PZ_MTEST'):
import wbia
ibs = wbia.opendb(defaultdb=defaultdb)
aids = [1, 2, 3, 4, 5, 6]
infr = AnnotInference(ibs, aids, autoinit=True)
return infr
| StarcoderdataPython |
313339 | import lasagne.layers as L
import lasagne.nonlinearities as NL
import lasagne.init
import theano.tensor as TT
import theano
import lasagne
from rllab.core.lasagne_powered import LasagnePowered
from rllab.core.serializable import Serializable
from rllab.core.network import MLP
from rllab.misc import ext
from rllab.misc import logger
import scipy.io as sio
import numpy as np
import pickle
class Mlp_Discriminator(LasagnePowered, Serializable):
def __init__(
self,
iteration,
disc_window=16,
disc_joints_dim=20,
learning_rate=0.005,
train_threshold=0.25, # train when average_disc_loss > train_threshold
a_max=1.0,
a_min=1.0,
batch_size = 64,
iter_per_train = 1,
decent_portion=0.8,
hidden_sizes=(32, 32),
hidden_nonlinearity=NL.tanh,
output_nonlinearity=None,
downsample_factor=1,
disc_network=None,
reg=0.08,
mocap_framerate=120,
mujoco_apirate=20,
exper_spec='none'
):
Serializable.quick_init(self, locals())
self.batch_size=64
self.iter_per_train=iter_per_train
self.disc_window = disc_window
self.downsample_factor = downsample_factor
self.disc_joints_dim = disc_joints_dim
self.disc_window_downsampled = (self.disc_window-1)//self.downsample_factor + 1
self.disc_dim = self.disc_window_downsampled*self.disc_joints_dim
self.end_iter = int(iteration*decent_portion)
self.iter_count = 0
self.learning_rate = learning_rate
self.train_threshold=train_threshold
self.reg =reg
self.rate_factor=mocap_framerate//mujoco_apirate
self.disc_data = {'avg_loss_data':[],
'avg_loss_gen':[]}
self.exper_spec=exper_spec
out_dim = 1
target_var = TT.imatrix('targets')
# create network
if disc_network is None:
disc_network = MLP(
input_shape=(self.disc_dim,),
output_dim=out_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
)
self._disc_network = disc_network
disc_score = disc_network.output_layer
self.disc_score = disc_network.output_layer
obs_var = disc_network.input_layer.input_var
disc_var, = L.get_output([disc_score])
self._disc_var = disc_var
exp_reward = TT.nnet.sigmoid(disc_var)
LasagnePowered.__init__(self, [disc_score])
self._f_disc = ext.compile_function(
inputs=[obs_var],
outputs=[exp_reward],
log_name="f_discriminate_forward",
)
params = L.get_all_params(disc_network, trainable=True)
batch_loss = TT.nnet.binary_crossentropy(TT.nnet.sigmoid(disc_var), target_var)
batch_entropy = self.logit_bernoulli_entropy(disc_var)
loss = (batch_loss-self.reg * batch_entropy).mean()
updates = lasagne.updates.adam(loss, params, learning_rate=self.learning_rate)
self._f_disc_train = ext.compile_function(
inputs=[obs_var, target_var],
outputs=[loss],
updates=updates,
log_name="f_discriminate_train"
)
self._f_disc_loss = ext.compile_function(
inputs=[obs_var, target_var],
outputs=[loss],
log_name="f_discriminate_loss"
)
self.data = self.load_data()
self.a = np.linspace(a_min, a_max, self.end_iter)
def get_reward(self, observation):
if len(observation.shape)==1:
observation = observation.reshape((1, observation.shape[0]))
disc_ob = self.get_disc_obs(observation)
# print(self.disc_dim)
# print(disc_ob.shape)
assert(disc_ob.shape[1] == self.disc_dim)
reward = self._f_disc(disc_ob)[0]
return reward[0][0]
def train(self, observations):
'''
observations: length trj_num list of np.array with shape (trj_length, dim)
'''
#print("state len: ", len(observations))
logger.log("fitting discriminator...")
loss={"obs":[], "mocap":[]}
for i in range(self.iter_per_train):
batch_obs = self.get_batch_obs(observations, self.batch_size)
#print(batch_obs[10]/3.14*180)
batch_mocap = self.get_batch_mocap(self.batch_size)
disc_obs = self.get_disc_obs(batch_obs)
disc_mocap = batch_mocap
#print("\n\n\n")
#print(disc_obs[10])
#print("\n\n")
#print(disc_mocap[10])
#print("\n\n\n")
X = np.vstack((disc_obs, disc_mocap))
targets = np.zeros([2*self.batch_size, 1])
targets[self.batch_size :]=1
obs_loss = self._f_disc_loss(disc_obs, np.zeros([self.batch_size, 1]))
mocap_loss = self._f_disc_loss(disc_mocap, np.ones([self.batch_size, 1]))
if np.mean(obs_loss) > self.train_threshold:
self._f_disc_train(X, targets)
logger.log("fitted!")
else:
logger.log("yield training: avg_loss under threshold")
loss["obs"].append(obs_loss)
loss["mocap"].append(mocap_loss)
avg_disc_loss_obs = np.mean(loss["obs"])
avg_disc_loss_mocap = np.mean(loss["mocap"])
logger.record_tabular("averageDiscriminatorLoss_mocap", avg_disc_loss_mocap)
logger.record_tabular("averageDiscriminatorLoss_obs", avg_disc_loss_obs)
self.disc_data['avg_loss_data'].append(avg_disc_loss_mocap)
self.disc_data['avg_loss_gen'].append(avg_disc_loss_obs)
pickle.dump(self.disc_data, open("model/"+self.exper_spec+"/disc_data.pickle","wb"))
def load_data(self, fileName='MocapData.mat'):
# X (n, dim) dim must equals to the disc_obs
data=sio.loadmat(fileName)['data'][0]
#X = np.concatenate([np.asarray(frame) for frame in data],0)
onepose = data[5][342]
X = np.vstack([onepose,]*1000)
self.usedDim = [4,5,6,51,52,53,27,28,29,18,19,20,17,14,38,26,15,16,32,33]
usedDim = self.usedDim
X = X[:,usedDim]
if (X.shape[1] != self.disc_joints_dim):
print("\n", X.shape[1], self.disc_joints_dim)
#print(X)
return X
def get_batch_mocap(self, batch_size):
'''
return np.array of shape (batch_size, mocap_dim*window)
'''
mask = np.random.randint(0, self.data.shape[0]-self.disc_window*self.rate_factor, size=batch_size)
temp =[]
for i in range(self.disc_window_downsampled):
temp.append(self.data[mask+i*self.downsample_factor*self.rate_factor])
batch_mocap = np.hstack(temp)
assert(batch_mocap.shape[0]==batch_size)
assert(batch_mocap.shape[1]==self.disc_dim)
#print(batch_mocap[10])
return batch_mocap
# def get_disc_mocap(self, mocap_batch):
# '''
# param mocap_batch np.array of shape (batch_size, mocap_dim*window)
# return np.array of ashape (batch_size, disc_dim)
# '''
# temp = mocap_batch[:, self.usedDim]
# return temp
def inc_iter(self):
self.iter_count+=1
def get_a(self):
if self.iter_count < self.end_iter:
return self.a[self.iter_count]
else:
return self.a[-1]
def get_batch_obs(self, observations, batch_size):
'''
params observations: length trj_num list of np.array with shape (trj_length, dim)
params batch_size: batch_size of obs
return a np.array with shape (batch_size, observation_dim)
'''
observations = np.vstack(observations)
ob_dim = observations.shape[1]
mask = np.random.randint(0, observations.shape[0]-self.disc_window, size=batch_size)
temp = []
for i in range(self.disc_window):
temp.append(observations[mask+i])
batch_obs = np.hstack(temp)
assert(batch_obs.shape[0]==batch_size)
assert(len(batch_obs.shape)==2)
assert(batch_obs.shape[1]==self.disc_window*ob_dim)
return batch_obs
def get_disc_obs(self, observation):
"""
param observation nparray with shape (n, window*obs_dim)
return observation nparray with shape(n, disc_dim)
"""
temp = [self.convertToMocap(s.reshape((self.disc_window, -1))).reshape(-1) for s in observation]
return np.asarray(temp)
def convertToMocap(self, states):
frames = []
# print(states.shape)
c=180.0/np.pi
# Write each frame
states=states*c
for state,frame in zip(states,range(len(states))):
if frame % self.downsample_factor ==0:
# Fill in the data that we have
s = list(state)
f = np.zeros(62)
# right humerus
f[4] = s[17+7]
f[5] = s[16+7]
f[6] = s[15+7]
# left humerus
f[51] = s[21+7]
f[52] = s[20+7]
f[53] = s[19+7]
# left femur
f[27] = s[11+7]
f[28] = s[10+7]
f[29] = s[9+7]
# right femur
f[18] = s[5+7]
f[19] = s[4+7]
f[20] = s[3+7]
# radius
f[17] = s[22+7]
f[14] = s[18+7]
# tibia
f[38] = s[12+7]
f[26] = s[6+7]
# left foot
f[15] = s[14+7]
f[16] = s[13+7]
# right foot
f[32] = s[8+7]
f[33] = s[7+7]
frames.append(f)
return np.asarray(frames)[:,self.usedDim]
def set_all_params(self, params):
L.set_all_param_values(L.get_all_layers(self.disc_score), params)
def get_all_params(self):
return L.get_all_param_values(self.disc_score)
def logit_bernoulli_entropy(self, disc_var):
ent = (1.-TT.nnet.sigmoid(disc_var))*disc_var + TT.nnet.softplus(-disc_var)
return ent | StarcoderdataPython |
11252249 | import os.path
import pandas as pd
from stock.globalvar import HIST_DIR
def load_csv(symbol):
path = os.path.join(HIST_DIR["stock"], symbol)
df = pd.read_csv(path, index_col=0, engine='c', dtype=str)
return df
| StarcoderdataPython |
3347153 | <filename>setup.py
"""setuptools module for solana.py."""
from setuptools import find_namespace_packages, setup
extras_require = {
"dev": [
"black",
"pytest",
"pylint",
"pytest-tornasync",
"mypy",
"pydocstyle",
"flake8",
"isort",
"pytest-docker",
"sphinx",
"twine",
"setuptools",
"bump2version",
]
}
with open("README.md", "r") as file_handle:
README_MD = file_handle.read()
setup(
name="solana",
# *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
version="0.9.0",
author="<NAME>",
author_mail="<EMAIL>",
description="""Solana.py""",
long_description=README_MD,
long_description_content_type="text/markdown",
include_package_data=True,
install_requires=[
"base58>=2.0.1, <3.0.0",
"construct>=2.10.56, <3.0.0",
"PyNaCl>=1.4.0, <2.0.0",
"requests>=2.24.0, <3.0.0",
"typing_extensions",
],
extras_require=extras_require,
python_requires=">=3.7, <4",
keywords="solana blockchain web3",
license="MIT",
package_data={"solana": ["py.typed"]},
packages=find_namespace_packages(exclude=["tests", "tests.*"]),
url="https://github.com/michaelhly/solanapy",
zip_safe=False, # required per mypy
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)
| StarcoderdataPython |
12841010 | from ..models.base_agent import BaseAgent
class MockAgent(BaseAgent):
def __init__(self, testapp, moves, game_id, player):
self.testapp = testapp
self.args = []
self.kwargs = []
self.moves = iter(moves)
self.past_end = False
super().__init__(game_id, player)
def play_round(self, *args, **kwargs):
self.args.append(args)
self.kwargs.append(kwargs)
try:
return self.put_board(next(self.moves))
except StopIteration:
self.past_end = True
return {}
def test_home_endpoint(testapp):
response = testapp.get('/')
assert response.status_code == 200
def test_games_endpoint(testapp):
response = testapp.get('/v1.0/games')
assert response.status_code == 200
assert 'ids' in response.json
def test_agent_play_no_moves(testapp):
game = testapp.post_json('/v1.0/games').json
player1 = MockAgent(testapp, [], game['id'], 1)
player2 = MockAgent(testapp, [], game['id'], 2)
assert player1.agent_id != player2.agent_id
assert player1.args.pop() == ()
assert player1.kwargs.pop() == {}
assert player1.past_end
assert not player2.args
assert not player2.kwargs
assert not player2.past_end
# def test_agent_play_through(testapp):
# player1_moves = [tuple(map(bytes, (
# (12, 6, 2, 10, 4, 2, 6, 12),
# (8, 8, 8, 8, 8, 8, 8, 8),
# (0, 0, 0, 0, 0, 0, 0, 0),
# (0, 0, 0, 0, 0, 0, 0, 0),
# (0, 0, 0, 0, 0, 0, 0, 0),
# (0, 0, 0, 0, 9, 0, 0, 0),
# (9, 9, 9, 9, 0, 9, 9, 9),
# (13, 7, 3, 11, 5, 3, 7, 13)))), tuple(map(bytes, (
#
# (12, 6, 2, 10, 4, 2, 6, 12),
# (8, 8, 8, 8, 8, 0, 8, 8),
# (0, 0, 0, 0, 0, 8, 0, 0),
# (0, 0, 0, 0, 0, 0, 0, 11),
# (0, 0, 0, 0, 0, 0, 0, 0),
# (0, 0, 0, 0, 9, 0, 0, 0),
# (9, 9, 9, 9, 0, 9, 9, 9),
# (13, 7, 3, 0, 5, 3, 7, 13))))]
# player1_moves = [player1_moves[0]]
# player2_moves = [tuple(map(bytes, (
# (12, 6, 2, 4, 10, 2, 6, 12),
# (8, 8, 8, 0, 8, 8, 8, 8),
# (0, 0, 0, 8, 0, 0, 0, 0),
# (0, 0, 0, 0, 0, 0, 0, 0),
# (0, 0, 0, 0, 0, 0, 0, 0),
# (0, 0, 9, 0, 0, 0, 0, 0),
# (9, 9, 0, 9, 9, 9, 9, 9),
# (13, 7, 3, 5, 11, 3, 7, 13)))), tuple(map(bytes, (
#
# (12, 6, 2, 4, 0, 2, 6, 12),
# (8, 8, 8, 0, 8, 8, 8, 8),
# (0, 0, 0, 8, 0, 0, 0, 0),
# (0, 0, 0, 0, 0, 0, 0, 0),
# (10, 0, 0, 0, 0, 0, 0, 0),
# (0, 0, 9, 0, 0, 0, 0, 0),
# (9, 9, 0, 9, 9, 9, 9, 9),
# (13, 7, 3, 5, 11, 3, 7, 13))))]
# player2_moves = []
# game = testapp.post_json('/v1.0/games').json
# player1 = MockAgent(testapp, player1_moves, game['id'], 1)
# player2 = MockAgent(testapp, player2_moves, game['id'], 2)
# assert len(player1.args) == 1
# assert len(player2.args) == 1
# assert not player1.past_end
# assert player2.past_end
| StarcoderdataPython |
9733082 | #
# pystylus/ast/importnode.py
#
"""
Contains the two import nodes in the stylus AST
"""
class ImportNode:
"""
A stylus import statement created from the line "@import xxx" where the xxx
does NOT end with ".css". This is a command to stylus to import another
file into the current one.
"""
def __init__(self, name):
self.name = name
class LiteralImportNode:
"""
A 'literal' @import statement that will be rendered as-is into the css
output. This is created if the stylus line has the form "@import xxx.css".
"""
def __init__(self, name):
self.name = name
| StarcoderdataPython |
120693 | <gh_stars>1-10
import discord
import helper
import numpy
numpy.random.seed(9999)
import tensorflow
from tensorflow import keras
async def find_friends(user, guild, dictionary):
"""
Creates dictionary of floats representing similarity percentage mapped to Discord user mentions based on the user and guild
Arguments:
user (User): The target Discord user
guild (Guild): The Discord server the user belongs to
dictionary (Dictionary): The dictionary to reference words as ints
"""
user_messages, other_messages = await helper.gather_messages(user, guild)
other_messages = dict((id, messages) for (id, messages) in other_messages.items() if len(messages) >= 10)
user_ints = []
other_ints = {}
user_ints = dictionary.messages_to_ints(user_messages)
user_ints = keras.preprocessing.sequence.pad_sequences(user_ints, value = dictionary.get_number("<PAD>"), padding = "post", maxlen = 2000) # max character count for a discord message is 2000
for id, messages in other_messages.items():
other_ints[id] = dictionary.messages_to_ints(messages)
labels = numpy.array([1] * len(user_ints), dtype=int)
#print(user_ints)
epochs = max(10, 30 - int(len(labels) / 100))
model = create_model(len(dictionary.word_map))
print("Fitting model")
model.fit(user_ints, labels, epochs = epochs, verbose = 1)
print("Making predictions")
return await make_predictions(other_ints, model)
def create_model(vocab):
"""
Creates a Keras model
Arguments:
vocab (int): The size of the vocabulary
"""
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tensorflow.nn.relu))
model.add(keras.layers.Dense(16, activation=tensorflow.nn.relu))
model.add(keras.layers.Dense(1, activation=tensorflow.nn.sigmoid))
model.compile(optimizer = "adam", loss = "binary_crossentropy", metrics = ["acc"])
return model
async def make_predictions(users_ints, model):
"""
Creates a dictionary of predictions
Arguments:
user_ints ({int : [[int]]}): A dictionary of user messages converted to a list of numpy int arrays
model (Model): The Keras model to make predictions
"""
predictions = {}
for id, ints in users_ints.items():
sum, count = 0, 0
for int_message in ints:
prediction = helper.reduce_nest(model.predict(int_message))
if prediction > 0:
count += 1
sum += prediction
if count > 0:
predictions[id] = sum / count
else:
predictions[id] = 0
return predictions | StarcoderdataPython |
78914 | import ray
import torch
from collections import namedtuple, defaultdict
from itertools import count, chain
from flatland.envs.agent_utils import RailAgentStatus
from agent.PPO.PPORollout import PPORollout, PPOTransition
class PPORunner():
def _select_actions(self, state, done):
valid_handles = list()
internal_state = dict()
interesting_handles = set()
for handle in state.keys():
if done[handle]: continue
interesting_handles.add(handle)
for opp_handle in self.env.obs_builder.encountered[handle]:
interesting_handles.add(opp_handle)
# asks for a lot of extra observations
for handle in interesting_handles:
if handle in state:
internal_state[handle] = state[handle]
else:
internal_state[handle] = torch.tensor(self.env.obs_builder._get_internal(handle), dtype=torch.float)
for handle in state.keys(): # not blind
if done[handle]: continue
valid_handles.append(handle)
self.neighbours_state[handle].clear()
for opp_handle in self.env.obs_builder.encountered[handle]:
if opp_handle == -1:
# zeros only for the sake of unified tensor size. TODO there is attention now...
self.neighbours_state[handle].append(torch.zeros(self.env.obs_builder.state_sz))
else:
self.neighbours_state[handle].append(internal_state[opp_handle])
action_dict, log_probs = self.controller.select_action_batch(valid_handles, state, self.neighbours_state)
return action_dict, log_probs
def _save_transitions(self, state, action_dict, log_probs, next_state, reward, done, step):
self.prev_valid_state.update(state)
self.prev_valid_action.update(action_dict)
self.prev_valid_action_log_prob.update(log_probs)
for handle in state.keys():
self.prev_step[handle] = step
for handle in next_state.keys(): # all not blind
if not handle in self.prev_valid_state: # just departed
continue
self.rollout[handle].append_transition(PPOTransition(
self.prev_valid_state[handle],
self.prev_valid_action[handle],
self.prev_valid_action_log_prob[handle],
next_state[handle],
reward[handle],
done[handle],
torch.stack(self.neighbours_state[handle]),
step + 1 - self.prev_step[handle],
))
def _wrap(self, d, dtype=torch.float):
for key, value in d.items():
d[key] = torch.tensor(value, dtype=dtype)
return d
# samples one episode
def run(self, env, controller):
self.env = env
self.controller = controller
state = self._wrap(self.env.reset())
done = defaultdict(int)
self.prev_valid_state = state
self.prev_valid_action = dict()
self.rollout = defaultdict(PPORollout)
self.prev_valid_action_log_prob = dict()
self.neighbours_state = defaultdict(list)
self.prev_step = torch.zeros(len(self.env.agents), dtype=torch.long)
steps_done = 0
while True:
action_dict, log_probs = self._select_actions(state, done)
next_state, reward, done, info, _ = self.env.step(action_dict)
next_state, reward, done = self._wrap(next_state), self._wrap(reward), self._wrap(done)
self._save_transitions(state, action_dict, log_probs, next_state, reward, done, steps_done)
state = next_state
steps_done += 1
if done['__all__']:
break;
percent_done = sum([1 for agent in self.env.agents if agent.status == RailAgentStatus.DONE_REMOVED]) / self.env.n_agents
return self.rollout, { "reward": self.env.get_total_reward(),
"percent_done": percent_done,
"steps_done": steps_done}
| StarcoderdataPython |
1837783 | from __future__ import annotations
from collections.abc import Collection
from typing import Callable, Protocol, TypeVar
Element = TypeVar("Element")
Comparator = Callable[[Element, Element], bool]
MIN_COMPARATOR: Comparator = lambda x, y: x < y
MAX_COMPARATOR: Comparator = lambda x, y: x < y
class Node(Protocol[Element]):
element: Element
# FEATURE: At the moment you cannot define associated types in a Protocol...
# https://github.com/python/typing/issues/548
# https://github.com/python/mypy/issues/7790
class Heap(Collection[Element], Protocol[Element]):
comparator: Comparator[Element]
def decrease_node(self, node: Node[Element]) -> None:
pass
def delete_node(self, node: Node[Element]) -> None:
pass
def merge(self, heap: Heap[Element]) -> None:
pass
def peek_node(self) -> Node[Element]:
pass
def pop_node(self) -> Node[Element]:
pass
def push_node(self, node: Node[Element]) -> None:
pass
| StarcoderdataPython |
3404703 | <gh_stars>10-100
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os.path
import argparse
import vgg_pth
import resnet_pth
import torch.nn
import vgg_msp
import resnet_msp
import mindspore.nn
def convert_vgg(pretrained_file, result):
vgg16_pth = vgg_pth.vgg16()
if torch.cuda.is_available():
vgg16_pth.load_state_dict(torch.load(pretrained_file))
else:
vgg16_pth.load_state_dict(torch.load(pretrained_file, map_location=torch.device("cpu")))
vgg16_msp = vgg_msp.vgg16()
for p_pth, p_msp in zip(vgg16_pth.parameters(), vgg16_msp.get_parameters()):
p_msp.set_data(mindspore.Tensor(p_pth.detach().numpy()))
mindspore.save_checkpoint(vgg16_msp, result)
def convert_resnet(pretrained_file, result):
resnet50_pth = resnet_pth.resnet50()
resnet50_msp = resnet_msp.resnet50()
if torch.cuda.is_available():
resnet50_pth.load_state_dict(torch.load(pretrained_file), strict=False)
else:
resnet50_pth.load_state_dict(torch.load(pretrained_file, map_location=torch.device("cpu")), strict=False)
p_pth_list = list()
for p_pth in resnet50_pth.parameters():
p_pth_list.append(p_pth.cpu().detach().numpy())
bn_list = list()
for m in resnet50_pth.modules():
if isinstance(m, torch.nn.BatchNorm2d):
bn_list.append(m.running_mean.cpu().numpy())
bn_list.append(m.running_var.cpu().numpy())
p_index = 0
bn_index = 0
for n_msp, p_msp in resnet50_msp.parameters_and_names():
if "moving_" not in n_msp:
p_msp.set_data(mindspore.Tensor(p_pth_list[p_index]))
p_index += 1
else:
p_msp.set_data(mindspore.Tensor(bn_list[bn_index]))
bn_index += 1
mindspore.save_checkpoint(resnet50_msp, result)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model", choices=["vgg", "resnet"], type=str)
parser.add_argument("--pth_file", type=str, default="vgg16_20M.pth", help="input pth file")
parser.add_argument("--msp_file", type=str, default="vgg16_pretrained.ckpt", help="output msp file")
args = parser.parse_args()
if not os.path.exists(args.pth_file):
raise FileNotFoundError(args.pth_file)
if args.model == "vgg":
convert_vgg(args.pth_file, args.msp_file)
elif args.model == "resnet":
convert_resnet(args.pth_file, args.msp_file)
else:
print("unknown model")
print("success")
| StarcoderdataPython |
8005069 | import sys
import torch.nn as nn
import torch.nn.functional as F
from layers import GraphConvolution
import torch
from torch.nn.parameter import Parameter
class Co_GCN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, dropout=0.3, indep_weights=False):
super(Co_GCN, self).__init__()
self.gc1 = GraphConvolution(input_size, hidden_size, indep_weights=indep_weights)
self.gc2 = GraphConvolution(hidden_size, output_size, indep_weights=indep_weights)
self.dropout = dropout
self.m = nn.Softmax()
def forward(self, adj, x):
self.gc1.pi = Parameter(self.m(self.gc1.pi))
# print(self.gc1.pi)
x = F.relu(self.gc1(adj, x))
# print(x)
x = F.dropout(x, self.dropout)
x = self.gc2(adj, x)
return x
class MLP(nn.Module):
def __init__(self, input_size=200, hidden_size=150, output_size=2, dropout=0):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, output_size)
self.dropout = dropout
def forward(self, x):
out = F.relu(self.fc1(x))
out = F.dropout(out, self.dropout)
out = self.fc2(out)
return out
| StarcoderdataPython |
329458 | <reponame>vermavis/cbtool
#!/usr/bin/env python3
#/*******************************************************************************
# Copyright (c) 2012 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#/*******************************************************************************
'''
Created on Oct 20, 2015
GCE Object Operations Library
@author: <NAME>
'''
import httplib2
import httplib2shim
import traceback
from time import time, sleep
from random import randint
from socket import gethostbyname
from lib.auxiliary.code_instrumentation import trace, cbdebug, cberr, cbwarn, cbinfo, cbcrit
from lib.auxiliary.data_ops import str2dic, is_number, DataOpsException
from lib.remote.ssh_ops import get_ssh_key
from .shared_functions import CldOpsException, CommonCloudFunctions
from oauth2client.client import GoogleCredentials
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError as GCEExceptionHttpError
class GceCmds(CommonCloudFunctions) :
# GCE uses the same image IDs for all regions and all zones.
# Attempting to discover them more than once invalidates the
# last attempt at discovering them by inadvertenly rewriting the image IDs
# with random numbers, so let's make sure we only do it once.
base_images_checked = False
'''
TBD
'''
@trace
def __init__ (self, pid, osci, expid = None) :
'''
TBD
'''
CommonCloudFunctions.__init__(self, pid, osci)
self.pid = pid
self.osci = osci
self.gceconn = False
self.instances_project= None
self.images_project = None
self.instance_info = None
self.expid = expid
self.additional_rc_contents = ''
self.http_conn = {}
@trace
def get_description(self) :
'''
TBD
'''
return "Google Compute Engine"
@trace
def connect(self, project, secret_key, zone = "us-east1-b", http_conn_id = None) :
'''
TBD
'''
try :
_status = 100
if not self.instances_project :
project = project.split(',')
if len(project) == 2 :
self.images_project, self.instances_project = project
else :
self.instances_project = project[0]
self.images_project = self.instances_project
_credentials = GoogleCredentials.get_application_default()
if _credentials.create_scoped_required():
_credentials = _credentials.create_scoped('https://www.googleapis.com/auth/compute')
_http_conn_id = "common"
if http_conn_id :
_http_conn_id = http_conn_id
if _http_conn_id not in self.http_conn :
self.http_conn[_http_conn_id] = _credentials.authorize(http = httplib2shim.Http())
self.gceconn = build('compute', 'v1', http = self.http_conn[http_conn_id])
_zone_list = self.gceconn.zones().list(project=self.instances_project).execute(http = self.http_conn[http_conn_id])["items"]
_zone_info = False
for _idx in range(0,len(_zone_list)) :
if _zone_list[_idx]["description"] == zone :
_zone_info = _zone_list[_idx]
_zone_hostname = _zone_info["region"]
_msg = "Selected zone is " + str(_zone_info["description"])
cbdebug(_msg)
break
if _zone_info :
_status = 0
else :
_fmsg = "Unknown " + self.get_description() + " zone (" + zone + ")"
except GCEExceptionHttpError as obj:
for line in traceback.format_exc().splitlines() :
cbwarn(line)
_status = int(obj.resp.status)
_fmsg = str(obj)
except Exception as msg :
for line in traceback.format_exc().splitlines() :
cbwarn(line)
_fmsg = str(msg)
_status = 23
finally :
if _status :
_msg = self.get_description() + " connection failure: " + _fmsg
cberr(_msg)
raise CldOpsException(_msg, _status)
else :
_msg = self.get_description() + " connection successful."
cbdebug(_msg)
return _status, _msg, _zone_hostname
@trace
def test_vmc_connection(self, cloud_name, vmc_name, access, credentials, key_name, \
security_group_name, vm_templates, vm_defaults, vmc_defaults) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
self.connect(access, credentials, vmc_name, vmc_name)
self.generate_rc(cloud_name, vmc_defaults, self.additional_rc_contents)
_prov_netname_found, _run_netname_found = self.check_networks(vmc_name, vm_defaults)
_key_pair_found = self.check_ssh_key(vmc_name, self.determine_key_name(vm_defaults), vm_defaults, False, vmc_name)
if not GceCmds.base_images_checked :
_detected_imageids = self.check_images(vmc_name, vm_templates, vmc_name, vm_defaults)
if not (_run_netname_found and _prov_netname_found and _key_pair_found) :
_msg = "Check the previous errors, fix it (using GCE's web"
_msg += " GUI (Google Developer's Console) or gcloud CLI utility"
_status = 1178
raise CldOpsException(_msg, _status)
GceCmds.base_images_checked = True
if len(_detected_imageids) :
_status = 0
else :
_status = 1
else :
_status = 0
except CldOpsException as obj :
for line in traceback.format_exc().splitlines() :
cbwarn(line)
_fmsg = str(obj.msg)
_status = 2
except Exception as msg :
for line in traceback.format_exc().splitlines() :
cbwarn(line)
_fmsg = str(msg)
_status = 23
finally :
_status, _msg = self.common_messages("VMC", {"name" : vmc_name }, "connected", _status, _fmsg)
return _status, _msg
@trace
def check_networks(self, vmc_name, vm_defaults) :
'''
TBD
'''
_prov_netname = vm_defaults["netname"]
_run_netname = vm_defaults["netname"]
_prov_netname_found = True
_run_netname_found = True
return _prov_netname_found, _run_netname_found
@trace
def check_images(self, vmc_name, vm_templates, http_conn_id, vm_defaults) :
'''
TBD
'''
self.common_messages("IMG", { "name": vmc_name }, "checking", 0, '')
_map_name_to_id = {}
_map_id_to_name = {}
_registered_image_list = []
_registered_images = self.gceconn.images().list(project=self.images_project).execute(http = self.http_conn[http_conn_id])
if "items" in _registered_images :
_registered_image_list = _registered_images["items"]
_registered_imageid_list = []
for _registered_image in _registered_image_list :
_registered_imageid_list.append(_registered_image["id"])
_map_name_to_id[_registered_image["name"]] = _registered_image["id"]
for _vm_role in list(vm_templates.keys()) :
_imageid = str2dic(vm_templates[_vm_role])["imageid1"]
if _imageid != "to_replace" :
if _imageid in _map_name_to_id and _map_name_to_id[_imageid] != _imageid :
vm_templates[_vm_role] = vm_templates[_vm_role].replace(_imageid, _map_name_to_id[_imageid])
else :
_map_name_to_id[_imageid] = "00000" + ''.join(["%s" % randint(0, 9) for num in range(0, 14)])
vm_templates[_vm_role] = vm_templates[_vm_role].replace(_imageid, _map_name_to_id[_imageid])
_map_id_to_name[_map_name_to_id[_imageid]] = _imageid
_detected_imageids = self.base_check_images(vmc_name, vm_templates, _registered_imageid_list, _map_id_to_name, vm_defaults)
return _detected_imageids
@trace
def discover_hosts(self, obj_attr_list, start) :
'''
TBD
'''
_host_uuid = obj_attr_list["cloud_vm_uuid"]
obj_attr_list["host_list"] = {}
obj_attr_list["hosts"] = ''
obj_attr_list["initial_hosts"] = ''.split(',')
obj_attr_list["host_count"] = len(obj_attr_list["initial_hosts"])
return True
@trace
def vmccleanup(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
self.connect(obj_attr_list["access"], obj_attr_list["credentials"], obj_attr_list["name"], obj_attr_list["name"])
self.common_messages("VMC", obj_attr_list, "cleaning up vms", 0, '')
_pre_existing_instances = False
_running_instances = True
while _running_instances :
_running_instances = False
_instance_list = self.get_instances(obj_attr_list, "vm", "all")
for _instance in _instance_list :
if _instance["name"].count("cb-" + obj_attr_list["username"] + '-' + obj_attr_list["cloud_name"].lower()) and _instance["status"] == 'RUNNING' :
self.gceconn.instances().delete(project = self.instances_project, \
zone = obj_attr_list["name"], \
instance = _instance["name"]).execute(http = self.http_conn[obj_attr_list["name"]])
_running_instances = True
sleep(int(obj_attr_list["update_frequency"]))
sleep(int(obj_attr_list["update_frequency"])*5)
self.common_messages("VMC", obj_attr_list, "cleaning up vvs", 0, '')
_volume_list = self.get_instances(obj_attr_list, "vv", "all")
if len(_volume_list) :
for _volume in _volume_list :
if _volume["name"].count("cb-" + obj_attr_list["username"] + '-' + obj_attr_list["cloud_name"].lower()) :
if not "users" in _volume :
self.gceconn.disks().delete(project = self.instances_project, \
zone = obj_attr_list["name"], \
disk = _volume["name"]).execute(http = self.http_conn[obj_attr_list["name"]])
_msg = _volume["id"] + " detached "
_msg += "... was deleted"
cbdebug(_msg)
else:
_msg = _volume["id"] + ' '
_msg += "... still attached and could not be deleted"
cbdebug(_msg)
else :
_msg = "No volumes to remove"
cbdebug(_msg)
_status = 0
except GCEExceptionHttpError as obj :
_status = int(obj.resp.status)
_fmsg = str(obj)
except CldOpsException as obj :
_fmsg = str(obj.msg)
cberr(_msg)
_status = 2
except Exception as msg :
_fmsg = str(msg)
_status = 23
finally :
_status, _msg = self.common_messages("VMC", obj_attr_list, "cleaned up", _status, _fmsg)
return _status, _msg
@trace
def vmcregister(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_time_mark_prs = int(time())
obj_attr_list["mgt_002_provisioning_request_sent"] = _time_mark_prs - int(obj_attr_list["mgt_001_provisioning_request_originated"])
if "cleanup_on_attach" in obj_attr_list and obj_attr_list["cleanup_on_attach"] == "True" :
_status, _fmsg = self.vmccleanup(obj_attr_list)
else :
_status = 0
_x, _y, _hostname = self.connect(obj_attr_list["access"], obj_attr_list["credentials"], obj_attr_list["name"], obj_attr_list["name"])
obj_attr_list["cloud_hostname"] = _hostname + "-" + obj_attr_list["name"]
obj_attr_list["cloud_ip"] = gethostbyname(_hostname.split('/')[2])
obj_attr_list["arrival"] = int(time())
if str(obj_attr_list["discover_hosts"]).lower() == "true" :
self.discover_hosts(obj_attr_list, _time_mark_prs)
else :
obj_attr_list["hosts"] = ''
obj_attr_list["host_list"] = {}
obj_attr_list["host_count"] = "NA"
_time_mark_prc = int(time())
obj_attr_list["mgt_003_provisioning_request_completed"] = _time_mark_prc - _time_mark_prs
_status = 0
except GCEExceptionHttpError as obj :
_status = int(obj.resp.status)
_fmsg = str(obj)
except CldOpsException as obj :
_fmsg = str(obj.msg)
_status = 2
except Exception as msg :
_fmsg = str(msg)
_status = 23
finally :
_status, _msg = self.common_messages("VMC", obj_attr_list, "registered", _status, _fmsg)
return _status, _msg
@trace
def vmcunregister(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_time_mark_drs = int(time())
if "mgt_901_deprovisioning_request_originated" not in obj_attr_list :
obj_attr_list["mgt_901_deprovisioning_request_originated"] = _time_mark_drs
obj_attr_list["mgt_902_deprovisioning_request_sent"] = _time_mark_drs - int(obj_attr_list["mgt_901_deprovisioning_request_originated"])
if "cleanup_on_detach" in obj_attr_list and obj_attr_list["cleanup_on_detach"] == "True" :
_status, _fmsg = self.vmccleanup(obj_attr_list)
_time_mark_prc = int(time())
obj_attr_list["mgt_903_deprovisioning_request_completed"] = _time_mark_prc - _time_mark_drs
_status = 0
except GCEExceptionHttpError as obj :
_status = int(obj.resp.status)
_fmsg = str(obj)
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except Exception as msg :
_fmsg = str(msg)
_status = 23
finally :
_status, _msg = self.common_messages("VMC", obj_attr_list, "unregistered", _status, _fmsg)
return _status, _msg
@trace
def vmcount(self, obj_attr_list):
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_nr_instances = 0
for _vmc_uuid in self.osci.get_object_list(obj_attr_list["cloud_name"], "VMC") :
_vmc_attr_list = self.osci.get_object(obj_attr_list["cloud_name"], \
"VMC", False, _vmc_uuid, \
False)
self.connect(obj_attr_list["access"], obj_attr_list["credentials"], _vmc_attr_list["name"], _vmc_attr_list["name"])
_instance_list = self.get_instances(_vmc_attr_list, "vm", "all")
for _instance in _instance_list :
if _instance["name"].count("cb-" + obj_attr_list["username"] + '-' + obj_attr_list["cloud_name"].lower()) :
_nr_instances += 1
except Exception as e :
_status = 23
_nr_instances = "NA"
_fmsg = "(While counting instance(s) through API call \"list\") " + str(e)
finally :
return _nr_instances
@trace
def get_ssh_keys(self, vmc_name, key_name, key_contents, key_fingerprint, registered_key_pairs, internal, connection) :
'''
TBD
'''
self.temp_key_metadata = {}
self.project_metadata = self.gceconn.projects().get(project=self.instances_project).execute(http = self.http_conn[connection])
if "items" in self.project_metadata["commonInstanceMetadata"] :
for _element in self.project_metadata["commonInstanceMetadata"]["items"] :
if _element["key"] == "sshKeys" :
for _component in _element["value"].split('\n') :
if len(_component.split(' ')) == 3 :
_r_key_tag, _r_key_contents, _r_key_user = _component.split(' ')
_r_key_name, _r_key_type = _r_key_tag.split(':')
self.temp_key_metadata[_r_key_name] = _r_key_tag + ' ' + _r_key_contents + ' ' + _r_key_user
_r_key_type, _r_key_contents, _r_key_fingerprint = \
get_ssh_key(_r_key_type + ' ' + _r_key_contents + ' ' + _r_key_user, self.get_description(), False)
registered_key_pairs[_r_key_name] = _r_key_fingerprint + "-NA"
#_temp_key_metadata[key_name] = key_name + ':' + _key_type + ' ' + _key_contents + ' ' + vm_defaults["login"] + "@orchestrator"
return True
@trace
def get_ip_address(self, obj_attr_list) :
'''
TBD
'''
try :
_private_ip_address = self.instance_info["networkInterfaces"][0]["networkIP"]
_public_ip_address = self.instance_info["networkInterfaces"][0]["accessConfigs"][0]["natIP"]
cbdebug("Got IPs for " + obj_attr_list["name"] + ": " + str(_private_ip_address) + ", " + str(_public_ip_address))
_public_hostname = obj_attr_list["cloud_vm_name"] + '.' + obj_attr_list["vmc_name"]
_private_hostname = obj_attr_list["cloud_vm_name"] + '.' + obj_attr_list["vmc_name"]
obj_attr_list["public_cloud_ip"] = _public_ip_address
if obj_attr_list["run_netname"] == "private" :
obj_attr_list["cloud_hostname"] = _private_hostname
obj_attr_list["run_cloud_ip"] = _private_ip_address
else :
obj_attr_list["cloud_hostname"] = _public_hostname
obj_attr_list["run_cloud_ip"] = _public_ip_address
if obj_attr_list["prov_netname"] == "private" :
obj_attr_list["prov_cloud_ip"] = _private_ip_address
else :
obj_attr_list["prov_cloud_ip"] = _public_ip_address
# NOTE: "cloud_ip" is always equal to "run_cloud_ip"
obj_attr_list["cloud_ip"] = obj_attr_list["run_cloud_ip"]
if obj_attr_list["prov_netname"].lower() == "private" :
obj_attr_list["prov_cloud_ip"] = _private_ip_address
else :
obj_attr_list["prov_cloud_ip"] = _public_ip_address
return True
except Exception as e:
cbdebug("Failed to retrieve IP for: " + obj_attr_list["name"] + ": " + str(e))
return False
@trace
def get_instances(self, obj_attr_list, obj_type = "vm", identifier = "all") :
'''
TBD
'''
try :
_instances = []
_fmsg = "Error while getting instances"
if "vmc_name" in obj_attr_list :
_actual_zone = obj_attr_list["vmc_name"]
else :
_actual_zone = obj_attr_list["name"]
if obj_type == "vm" :
if identifier == "all" :
_instance_list = self.gceconn.instances().list(project = self.instances_project, \
zone = _actual_zone).execute(http = self.http_conn[obj_attr_list["name"]])
else :
_instance_list = self.gceconn.instances().get(project = self.instances_project, \
zone = _actual_zone, instance = identifier).execute(http = self.http_conn[obj_attr_list["name"]])
else :
if identifier == "all" :
_instance_list = self.gceconn.disks().list(project = self.instances_project, \
zone = _actual_zone).execute(http = self.http_conn[obj_attr_list["name"]])
else :
_instance_list = self.gceconn.disks().get(project = self.instances_project, \
zone = _actual_zone, \
disk = identifier).execute(http = self.http_conn[obj_attr_list["name"]])
if "items" in _instance_list :
_instances = _instance_list["items"]
elif "status" in _instance_list :
_instances = _instance_list
return _instances
except GCEExceptionHttpError as obj :
_status = int(obj.resp.status)
if _status == 404 :
return []
else :
_fmsg = str(obj)
raise CldOpsException(_fmsg, _status)
except Exception as _fmsg :
for line in traceback.format_exc().splitlines() :
cbwarn(line)
return []
@trace
def get_images(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_candidate_images = None
_fmsg = "An error has occurred, but no error message was captured"
if "role" in obj_attr_list and obj_attr_list["role"] == "check" :
if obj_attr_list["imageid1"].count("ubuntu") :
obj_attr_list["images_project"] = "ubuntu-os-cloud"
elif obj_attr_list["imageid1"].count("rhel") :
obj_attr_list["images_project"] = "rhel-cloud"
elif obj_attr_list["imageid1"].count("centos") :
obj_attr_list["images_project"] = "centos-cloud"
else :
obj_attr_list["images_project"] = self.images_project
else :
obj_attr_list["images_project"] = self.images_project
if self.is_cloud_image_uuid(obj_attr_list["imageid1"]) :
_filter = "id eq " + obj_attr_list["imageid1"]
else :
_filter = "name eq " + obj_attr_list["imageid1"]
_candidate_images = self.gceconn.images().list(project = obj_attr_list["images_project"], \
filter = _filter).execute(http = self.http_conn[obj_attr_list["name"]])
_fmsg = "Please check if the defined image name is present on this "
_fmsg += self.get_description()
if "items" in _candidate_images :
obj_attr_list["imageid1"] = _candidate_images["items"][0]["name"]
obj_attr_list["boot_volume_imageid1"] = _candidate_images["items"][0]["id"]
_status = 0
except GCEExceptionHttpError as obj :
_status = int(obj.resp.status)
_fmsg = str(obj)
raise CldOpsException(_fmsg, _status)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = "Image Name (" + obj_attr_list["imageid1"] + ") not found: " + _fmsg
cberr(_msg)
raise CldOpsException(_msg, _status)
else :
return _candidate_images
@trace
def get_networks(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_status = 0
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = "Network (" + obj_attr_list["prov_netname"] + " ) not found: " + _fmsg
cberr(_msg, True)
raise CldOpsException(_msg, _status)
else :
return True
@trace
def create_ssh_key(self, vmc_name, key_name, key_type, key_contents, key_fingerprint, vm_defaults, connection) :
'''
TBD
'''
for _kn in [ key_name + " cbtool", vm_defaults["login"] + " " + vm_defaults["login"]] :
_actual_key_name, _actual_user_name = _kn.split(" ")
self.temp_key_metadata[_actual_key_name] = _actual_key_name + ':' + key_type + ' ' + key_contents + ' ' + _actual_user_name + "@orchestrator"
_key_list_str = ''
for _key in list(self.temp_key_metadata.keys()) :
_key_list_str += self.temp_key_metadata[_key] + '\n'
_key_list_str = _key_list_str[0:-1]
if "items" in self.project_metadata["commonInstanceMetadata"] :
for _element in self.project_metadata['commonInstanceMetadata']['items'] :
if _element["key"] == "sshKeys" :
_element["value"] += _key_list_str
else :
self.project_metadata['commonInstanceMetadata']["items"] = []
self.project_metadata['commonInstanceMetadata']['items'].append({"key": "sshKeys", "value" : _key_list_str})
self.gceconn.projects().setCommonInstanceMetadata(project=self.instances_project, body=self.project_metadata["commonInstanceMetadata"]).execute(http = self.http_conn[connection])
return True
@trace
def is_cloud_image_uuid(self, imageid) :
'''
TBD
'''
# Checks for len() == 18/19 no longer valid. Images of length 16 were found.
if is_number(imageid) :
return True
return False
@trace
def is_vm_running(self, obj_attr_list):
'''
TBD
'''
try :
_instance = self.get_instances(obj_attr_list, "vm", obj_attr_list["cloud_vm_name"])
if _instance :
_instance_state = _instance["status"]
else :
_instance_state = "non-existent"
if _instance_state == "RUNNING" :
self.instance_info = _instance
if "disks" in _instance :
for _disk in _instance["disks"] :
if _disk["index"] == 0 :
obj_attr_list["boot_link_imageid1"] = _disk["source"]
break
return True
else :
return False
except GCEExceptionHttpError as obj :
for line in traceback.format_exc().splitlines() :
cbwarn(line)
_status = int(obj.resp.status)
_fmsg = str(obj)
raise CldOpsException(_fmsg, _status)
except Exception as msg :
for line in traceback.format_exc().splitlines() :
cbwarn(line)
_fmsg = str(msg)
cberr(_fmsg)
_status = 23
raise CldOpsException(_fmsg, _status)
@trace
def is_vm_ready(self, obj_attr_list) :
'''
TBD
'''
cbdebug("Waiting for " + obj_attr_list["name"] + " to be running...")
if self.is_vm_running(obj_attr_list) :
cbdebug("Getting IP for " + obj_attr_list["name"])
if self.get_ip_address(obj_attr_list) :
cbdebug("IP found for " + obj_attr_list["name"])
obj_attr_list["last_known_state"] = "running with ip assigned"
return True
else :
cbdebug("IP not found for " + obj_attr_list["name"])
obj_attr_list["last_known_state"] = "running with ip unassigned"
return False
else :
cbdebug("VM still not running yet: " + obj_attr_list["name"])
obj_attr_list["last_known_state"] = "not running"
return False
@trace
def vm_placement(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_status = 0
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = "VM placement failed: " + _fmsg
cberr(_msg, True)
raise CldOpsException(_msg, _status)
else :
return True
@trace
def vvcreate(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
obj_attr_list["cloud_vv_instance"] = False
if "cloud_vv_type" not in obj_attr_list :
'''
GCE types as of 2018:
pd-standard
local-ssd
pd-ssd
'''
obj_attr_list["cloud_vv_type"] = "pd-standard"
_disk_type = "zones/" + obj_attr_list["vmc_name"] + "/diskTypes/" + obj_attr_list["cloud_vv_type"]
if "cloud_vv" in obj_attr_list and str(obj_attr_list["cloud_vv"]).lower() != "false":
self.common_messages("VV", obj_attr_list, "creating", _status, _fmsg)
obj_attr_list["last_known_state"] = "about to send volume create request"
_config = {
'name': obj_attr_list["cloud_vv_name"],
'description' : "used by " + obj_attr_list["cloud_vm_name"],
'sizeGb' : obj_attr_list["cloud_vv"],
'type' : _disk_type,
}
_operation = self.gceconn.disks().insert(project = self.instances_project, \
zone = obj_attr_list["vmc_name"], \
body = _config).execute(http = self.http_conn[obj_attr_list["name"]])
if self.wait_until_operation(obj_attr_list, _operation) :
_instance = self.get_instances(obj_attr_list, "vv", obj_attr_list["cloud_vv_name"])
obj_attr_list["cloud_vv_uuid"] = _instance["id"]
obj_attr_list["cloud_vv_source"] = _instance["selfLink"]
_status = 0
except CldOpsException as obj :
for line in traceback.format_exc().splitlines() :
cbwarn(line)
_status = obj.status
_fmsg = str(obj.msg)
except GCEExceptionHttpError as obj :
for line in traceback.format_exc().splitlines() :
cbwarn(line)
_status = int(obj.resp.status)
_fmsg = str(obj)
except KeyboardInterrupt :
_status = 42
_fmsg = "CTRL-C interrupt"
cbdebug("VM create keyboard interrupt...", True)
except Exception as e :
for line in traceback.format_exc().splitlines() :
cbwarn(line)
_status = 23
_fmsg = str(e)
finally :
_status, _msg = self.common_messages("VV", obj_attr_list, "created", _status, _fmsg)
return _status, _msg
@trace
def vvdestroy(self, obj_attr_list, identifier) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_wait = int(obj_attr_list["update_frequency"])
_curr_tries = 0
_max_tries = int(obj_attr_list["update_attempts"])
if "cloud_vv" in obj_attr_list and str(obj_attr_list["cloud_vv"]).lower() != "false":
_instance = self.get_instances(obj_attr_list, "vv", obj_attr_list[identifier])
if _instance :
self.common_messages("VV", obj_attr_list, "destroying", 0, '')
_operation = self.gceconn.disks().delete(project = self.instances_project, \
zone = obj_attr_list["vmc_name"], \
disk = obj_attr_list[identifier]).execute(http = self.http_conn[obj_attr_list["name"]])
self.wait_until_operation(obj_attr_list, _operation)
_status = 0
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except GCEExceptionHttpError as obj :
_status = int(obj.resp.status)
_fmsg = str(obj)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
_status, _msg = self.common_messages("VV", obj_attr_list, "destroyed", _status, _fmsg)
return _status, _msg
@trace
def vmcreate(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_operation = False
self.determine_instance_name(obj_attr_list)
obj_attr_list["cloud_vm_name"] = obj_attr_list["cloud_vm_name"].lower()
obj_attr_list["cloud_vv_name"] = obj_attr_list["cloud_vv_name"].lower().replace("_", "-")
self.determine_key_name(obj_attr_list)
obj_attr_list["last_known_state"] = "about to connect to " + self.get_description() + " manager"
self.take_action_if_requested("VM", obj_attr_list, "provision_originated")
self.connect(obj_attr_list["access"], obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], obj_attr_list["name"])
if self.is_vm_running(obj_attr_list) :
_msg = "An instance named \"" + obj_attr_list["cloud_vm_name"]
_msg += " is already running. It needs to be destroyed first."
_status = 187
cberr(_msg)
raise CldOpsException(_msg, _status)
_time_mark_prs = int(time())
obj_attr_list["mgt_002_provisioning_request_sent"] = _time_mark_prs - int(obj_attr_list["mgt_001_provisioning_request_originated"])
self.vm_placement(obj_attr_list)
obj_attr_list["last_known_state"] = "about to send create request"
self.get_images(obj_attr_list)
self.get_networks(obj_attr_list)
obj_attr_list["config_drive"] = False
_status, _fmsg = self.vvcreate(obj_attr_list)
# "Security groups" must be a list
_security_groups = []
_security_groups.append(obj_attr_list["security_groups"])
_source_disk_image = "projects/" + obj_attr_list["images_project"] + "/global/images/" + obj_attr_list["imageid1"]
_machine_type = "zones/" + obj_attr_list["vmc_name"] + "/machineTypes/" + obj_attr_list["size"]
if "cloud_rv_type" not in obj_attr_list :
obj_attr_list["cloud_rv_type"] = "pd-standard"
_root_type = "zones/" + obj_attr_list["vmc_name"] + "/diskTypes/" + obj_attr_list["cloud_rv_type"]
if "cloud_rv" in obj_attr_list and obj_attr_list["cloud_rv"] != "0":
_rv_size = obj_attr_list["cloud_rv"]
else:
_rv_size = None
_config = {
'name': obj_attr_list["cloud_vm_name"],
'machineType': _machine_type,
# Specify the boot disk and the image to use as a source.
'disks': [
{
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': _source_disk_image,
'diskType' : _root_type,
'diskSizeGb': _rv_size,
}
}
],
# Specify a network interface with NAT to access the public
# internet.
'networkInterfaces': [{
'network': 'global/networks/default',
'accessConfigs': [
{'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}
]
}],
# Allow the instance to access cloud storage and logging.
'serviceAccounts': [{
'email': 'default',
'scopes': [
'https://www.googleapis.com/auth/devstorage.read_write',
'https://www.googleapis.com/auth/logging.write'
]
}],
# Metadata is readable from the instance and allows you to
# pass configuration from deployment scripts to instances.
'metadata': {
'items': [{
'key': 'expid',
'value': obj_attr_list["experiment_id"]
}, {
'key': 'use',
'value': "cloudbench"
# }, {
# 'key': 'sshKeys',
# 'value': obj_attr_list["login"] + ':' + \
# obj_attr_list["ssh_key_type"] + ' ' + \
# obj_attr_list["ssh_key_contents"].strip('\n') + ' ' + \
# obj_attr_list["ssh_key_name"].strip('\n') + "@orchestrator"
}]
}
}
if "preemptible" in obj_attr_list and str(obj_attr_list["preemptible"]).lower() == "true" :
cbdebug("Will create a pre-emptible instance.", True)
_config["scheduling"] = { "preemptible" : True }
user_data = self.populate_cloudconfig(obj_attr_list)
if user_data :
_config["metadata"]["items"].append({"key" : "user-data", "value" : user_data})
cbdebug("Appended userdata...", True)
if str(obj_attr_list["cloud_vv_uuid"]).lower() != "none":
self.common_messages("VV", obj_attr_list, "attaching", _status, _fmsg)
_config["disks"].append({'boot': False, \
"autoDelete" : True, \
"source" : obj_attr_list["cloud_vv_source"]})
self.common_messages("VM", obj_attr_list, "creating", 0, '')
sleep(float(obj_attr_list["name"].replace("vm_",'')) + 1.0)
self.pre_vmcreate_process(obj_attr_list)
_operation = self.gceconn.instances().insert(project = self.instances_project, \
zone = obj_attr_list["vmc_name"], \
body = _config).execute(http = self.http_conn[obj_attr_list["name"]])
if self.wait_until_operation(obj_attr_list, _operation) :
self.take_action_if_requested("VM", obj_attr_list, "provision_started")
_time_mark_prc = self.wait_for_instance_ready(obj_attr_list, _time_mark_prs)
self.wait_for_instance_boot(obj_attr_list, _time_mark_prc)
obj_attr_list["host_name"] = "unknown"
_status = 0
if obj_attr_list["force_failure"].lower() == "true" :
_fmsg = "Forced failure (option FORCE_FAILURE set \"true\")"
_status = 916
else :
_fmsg = "Failed to obtain instance's (cloud-assigned) uuid. The "
_fmsg += "instance creation failed for some unknown reason."
_status = 100
except CldOpsException as obj :
for line in traceback.format_exc().splitlines() :
cberr(line)
_status = obj.status
_fmsg = str(obj.msg)
except GCEExceptionHttpError as obj :
for line in traceback.format_exc().splitlines() :
cberr(line)
_status = int(obj.resp.status)
_fmsg = str(obj)
except Exception as msg :
for line in traceback.format_exc().splitlines() :
cberr(line)
_fmsg = str(msg)
_status = 23
finally :
if _status and _operation is not False :
cbdebug("Error after VM creation. Cleanup...", True)
self.vmdestroy_repeat(obj_attr_list)
if "instance_obj" in obj_attr_list :
del obj_attr_list["instance_obj"]
_status, _msg = self.common_messages("VM", obj_attr_list, "created", _status, _fmsg)
return _status, _msg
@trace
def vmdestroy(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_time_mark_drs = int(time())
if "mgt_901_deprovisioning_request_originated" not in obj_attr_list :
obj_attr_list["mgt_901_deprovisioning_request_originated"] = _time_mark_drs
obj_attr_list["mgt_902_deprovisioning_request_sent"] = \
_time_mark_drs - int(obj_attr_list["mgt_901_deprovisioning_request_originated"])
self.connect(obj_attr_list["access"], obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], obj_attr_list["name"])
_wait = int(obj_attr_list["update_frequency"])
_max_tries = int(obj_attr_list["update_attempts"])
_curr_tries = 0
_instance = self.get_instances(obj_attr_list, "vm", obj_attr_list["cloud_vm_name"])
if _instance :
self.common_messages("VM", obj_attr_list, "destroying", 0, '')
_operation = self.gceconn.instances().delete(project = self.instances_project, \
zone = obj_attr_list["vmc_name"], \
instance = obj_attr_list["cloud_vm_name"]).execute(http = self.http_conn[obj_attr_list["name"]])
self.wait_until_operation(obj_attr_list, _operation)
while self.is_vm_running(obj_attr_list) and _curr_tries < _max_tries :
sleep(_wait)
_curr_tries += 1
else :
True
_status, _fmsg = self.vvdestroy(obj_attr_list, "cloud_vm_name")
_time_mark_drc = int(time())
obj_attr_list["mgt_903_deprovisioning_request_completed"] = \
_time_mark_drc - _time_mark_drs
_status, _fmsg = self.vvdestroy(obj_attr_list, "cloud_vv_name")
self.take_action_if_requested("VM", obj_attr_list, "deprovision_finished")
except CldOpsException as obj :
for line in traceback.format_exc().splitlines() :
cbwarn(line)
_status = obj.status
_fmsg = str(obj.msg)
except GCEExceptionHttpError as obj :
for line in traceback.format_exc().splitlines() :
cbwarn(line)
_status = int(obj.resp.status)
_fmsg = str(obj)
except Exception as msg :
for line in traceback.format_exc().splitlines() :
cberr(line)
_fmsg = str(msg)
_status = 23
finally :
_status, _msg = self.common_messages("VM", obj_attr_list, "destroyed", _status, _fmsg)
return _status, _msg
@trace
def vmcapture(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_wait = int(obj_attr_list["update_frequency"])
_curr_tries = 0
_max_tries = int(obj_attr_list["update_attempts"])
if not self.gceconn :
self.connect(obj_attr_list["access"], obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], obj_attr_list["name"])
_instance = self.get_instances(obj_attr_list, "vm", obj_attr_list["cloud_vm_name"])
if _instance :
_time_mark_crs = int(time())
# Just in case the instance does not exist, make crc = crs
_time_mark_crc = _time_mark_crs
obj_attr_list["mgt_102_capture_request_sent"] = _time_mark_crs - obj_attr_list["mgt_101_capture_request_originated"]
if obj_attr_list["captured_image_name"] == "auto" :
obj_attr_list["captured_image_name"] = obj_attr_list["imageid1"] + "_captured_at_"
obj_attr_list["captured_image_name"] += str(obj_attr_list["mgt_101_capture_request_originated"])
self.common_messages("VM", obj_attr_list, "capturing", 0, '')
obj_attr_list["captured_image_name"] = obj_attr_list["captured_image_name"].replace('_','-')
_operation = self.gceconn.instances().delete(project = self.instances_project, \
zone = obj_attr_list["vmc_name"], \
instance = obj_attr_list["cloud_vm_name"]).execute(http = self.http_conn[obj_attr_list["name"]])
self.wait_until_operation(obj_attr_list, _operation)
_config = {
"name": obj_attr_list["captured_image_name"],
"sourceDisk" : obj_attr_list["boot_link_imageid1"]
}
_operation = self.gceconn.images().insert(project = self.images_project, \
body = _config).execute(http = self.http_conn[obj_attr_list["name"]])
_vm_image_created = False
while not _vm_image_created and _curr_tries < _max_tries :
_filter = "name eq " + obj_attr_list["captured_image_name"]
_image_instances = self.gceconn.images().list(project = self.images_project, \
filter = _filter).execute(http = self.http_conn[obj_attr_list["name"]])
if "items" in _image_instances :
if _image_instances["items"][0]["status"] == "READY" :
_vm_image_created = True
_time_mark_crc = int(time())
obj_attr_list["mgt_103_capture_request_completed"] = _time_mark_crc - _time_mark_crs
break
sleep(_wait)
_curr_tries += 1
if _curr_tries > _max_tries :
_status = 1077
_fmsg = "" + obj_attr_list["name"] + ""
_fmsg += " (cloud-assigned uuid " + obj_attr_list["cloud_vm_uuid"] + ") "
_fmsg += "could not be captured after " + str(_max_tries * _wait) + " seconds.... "
else :
_status = 0
else :
_fmsg = "This instance does not exist"
_status = 1098
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except Exception as msg :
_fmsg = str(msg)
_status = 23
finally :
_status, _msg = self.common_messages("VM", obj_attr_list, "captured", _status, _fmsg)
return _status, _msg
def vmrunstate(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_ts = obj_attr_list["target_state"]
_cs = obj_attr_list["current_state"]
self.connect(obj_attr_list["access"], obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], obj_attr_list["name"])
if "mgt_201_runstate_request_originated" in obj_attr_list :
_time_mark_rrs = int(time())
obj_attr_list["mgt_202_runstate_request_sent"] = \
_time_mark_rrs - obj_attr_list["mgt_201_runstate_request_originated"]
self.common_messages("VM", obj_attr_list, "runstate altering", 0, '')
_instance = self.get_instances(obj_attr_list, "vm", obj_attr_list["cloud_vm_name"])
if _instance :
if _ts == "fail" :
_operation = self.gceconn.instances().stop(project = self.instances_project, \
zone = obj_attr_list["vmc_name"], \
instance = obj_attr_list["cloud_vm_name"]).execute(http = self.http_conn[obj_attr_list["name"]])
elif _ts == "save" :
_operation = self.gceconn.instances().stop(project = self.instances_project, \
zone = obj_attr_list["vmc_name"], \
instance = obj_attr_list["cloud_vm_name"]).execute(http = self.http_conn[obj_attr_list["name"]])
elif (_ts == "attached" or _ts == "resume") and _cs == "fail" :
_operation = self.gceconn.instances().start(project = self.instances_project, \
zone = obj_attr_list["vmc_name"], \
instance = obj_attr_list["cloud_vm_name"]).execute(http = self.http_conn[obj_attr_list["name"]])
elif (_ts == "attached" or _ts == "restore") and _cs == "save" :
_operation = self.gceconn.instances().start(project = self.instances_project, \
zone = obj_attr_list["vmc_name"], \
instance = obj_attr_list["cloud_vm_name"]).execute(http = self.http_conn[obj_attr_list["name"]])
self.wait_until_operation(obj_attr_list, _operation)
_time_mark_rrc = int(time())
obj_attr_list["mgt_203_runstate_request_completed"] = _time_mark_rrc - _time_mark_rrs
_status = 0
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except GCEExceptionHttpError as obj :
_status = int(obj.resp.status)
_fmsg = str(obj)
except Exception as msg :
_fmsg = str(msg)
_status = 23
finally :
_status, _msg = self.common_messages("VM", obj_attr_list, "runstate altered", _status, _fmsg)
return _status, _msg
@trace
def vmmigrate(self, obj_attr_list) :
'''
TBD
'''
return 0, "NOT SUPPORTED"
@trace
def vmresize(self, obj_attr_list) :
'''
TBD
'''
return 0, "NOT SUPPORTED"
@trace
def imgdelete(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
self.common_messages("IMG", obj_attr_list, "deleting", 0, '')
self.connect(obj_attr_list["access"], obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], obj_attr_list["vmc_name"])
_filter = "name eq " + obj_attr_list["imageid1"]
_image_instances = self.gceconn.images().list(project = self.images_project, \
filter = _filter).execute(http = self.http_conn[obj_attr_list["vmc_name"]])
if "items" in _image_instances :
obj_attr_list["imageid1"] = _image_instances["items"][0]["name"]
obj_attr_list["boot_volume_imageid1"] = _image_instances["items"][0]["id"]
_operation = self.gceconn.images().delete(project = self.images_project, \
image = obj_attr_list["imageid1"]).execute(http = self.http_conn[obj_attr_list["vmc_name"]])
_wait = int(obj_attr_list["update_frequency"])
_curr_tries = 0
_max_tries = int(obj_attr_list["update_attempts"])
_image_deleted = False
while not _image_deleted and _curr_tries < _max_tries :
_filter = "name eq " + obj_attr_list["imageid1"]
_image_instances = self.gceconn.images().list(project = self.images_project, \
filter = _filter).execute(http = self.http_conn[obj_attr_list["vmc_name"]])
if "items" not in _image_instances :
_image_deleted = True
else :
sleep(_wait)
_curr_tries += 1
_status = 0
except GCEExceptionHttpError as obj :
_status = int(obj.resp.status)
_fmsg = str(obj)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
_status, _msg = self.common_messages("IMG", obj_attr_list, "deleted", _status, _fmsg)
return _status, _msg
@trace
def wait_until_operation(self, obj_attr_list, opid) :
'''
TBD
'''
_msg = "Waiting for " + obj_attr_list["name"] + " operation to finish..."
cbdebug(_msg)
_curr_tries = 0
_max_tries = int(obj_attr_list["update_attempts"])
_wait = int(obj_attr_list["update_frequency"])
sleep(_wait)
while _curr_tries < _max_tries :
_start_pooling = int(time())
_op = self.gceconn.zoneOperations().get(project = self.instances_project, \
zone = obj_attr_list["vmc_name"], \
operation = opid["name"]).execute(http = self.http_conn[obj_attr_list["name"]])
if _op['status'] == 'DONE':
if 'error' in _op :
raise CldOpsException(_op["error"], 2001)
if str(obj_attr_list["cloud_vm_uuid"]).lower() == "none" :
obj_attr_list["cloud_vm_uuid"] = _op["id"]
return True
else:
sleep(_wait)
_curr_tries += 1
_fmsg = obj_attr_list["name"] + " operation did not finish after "
_fmsg += str(_max_tries * _wait) + " seconds... "
_fmsg += "Giving up."
raise CldOpsException(_op["error"], 2001)
| StarcoderdataPython |
3512903 | def rot13(s):
"ROT-13 Ceaser Cypher"
return s.translate(
str.maketrans(
"ABCDEFGHIJKLMabcdefghijklmNOPQRSTUVWXYZnopqrstuvwxyz",
"NOPQRSTUVWXYZnopqrstuvwxyzABCDEFGHIJKLMabcdefghijklm"
)
)
| StarcoderdataPython |
1680927 | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='home'),
path('about/', views.about, name='about'),
path('contact/', views.contact, name='contact'),
path('privacy/', views.privacy, name='privacy'),
path('terms/', views.terms, name='terms'),
]
| StarcoderdataPython |
1651293 | <filename>full.py
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Module to fetch images(or other types of attachments)
from VK dialog and download them to local drive.
"""
from time import time
from sys import stdout
import urllib.request
import requests
import json
def get_photos(folder, **kwargs):
"""
Fetching API request results
"""
base = "https://api.vk.com/method/messages.getHistoryAttachments"
params = {}
for key, value in kwargs.items():
params[key] = str(value)
print(key, value)
jsons = []
time_then = time()
response = requests.get(base, params=params)
jresponse = response.json()
jsons.append(jresponse)
with open('{0}.json'.format(jresponse['response']["next_from"]), 'w') as outfile:
json.dump(jresponse, outfile)
while "next_from" in jresponse['response']:
start_from = jresponse['response']["next_from"]
params['start_from'] = start_from
response = requests.get(base, params=params)
jresponse = response.json()
jsons.append(jresponse)
with open('{0}.json'.format(jresponse['response']["next_from"]), 'w') as outfile:
json.dump(jresponse, outfile)
print("Data created in %ds" % round(time()-time_then, 3))
return jsons
def download(data):
"""
Downloading, naming and saving photos locally
"""
time_then = time()
count = 0
for part in data:
for item in part['response']:
if part['response'] != [0] and item != "next_from" and item != '0':
link = data[0]['response'][str(item)]['photo']["src_big"]
count += 1
urllib.request.urlretrieve(link, '{0}/{1}.jpg'.format(folder, count))
stdout.write("\r%d done" % int(count))
stdout.flush()
stdout.write("\r \r\n")
print("Files downloaded in %ds" % round(time()-time_then, 3))
if __name__ == "__main__":
access_token = "<GENERATED APP ACCESS TOKEN HERE>"
peer_id = input("Enter dialog id: ") #Enter dialog id from prompt
# peer_id = "<DIALOG ID HERE>" or directly in code
folder = input("Enter folder name to save files into: ")
data = get_photos(folder=folder,
peer_id=peer_id,
access_token=access_token,
count=200,
media_type="photo"
)
download(data)
| StarcoderdataPython |
1780271 | # Generated by Django 3.0.4 on 2020-03-13 13:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kobe', '0006_kobepost_summittime'),
]
operations = [
migrations.RemoveField(
model_name='kobepost',
name='summitTime',
),
migrations.AlterField(
model_name='kobepost',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| StarcoderdataPython |
5088603 | from django.apps import AppConfig
class ExtractionConfig(AppConfig):
name = 'extraction'
| StarcoderdataPython |
170605 | """Tests for graphein.protein.features.nodes.amino_acids"""
# Graphein
# Author: <NAME> <<EMAIL>>, <NAME>
# License: MIT
# Project Website: https://github.com/a-r-j/graphein
# Code Repository: https://github.com/a-r-j/graphein
from functools import partial
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
from graphein.protein.config import ProteinGraphConfig
from graphein.protein.features.nodes.amino_acid import (
amino_acid_one_hot,
expasy_protein_scale,
hydrogen_bond_acceptor,
hydrogen_bond_donor,
load_expasy_scales,
)
from graphein.protein.graphs import construct_graph
from graphein.protein.resi_atoms import RESI_THREE_TO_1
def test_load_expasy_scale():
"""Example-based test for `load_expasy_scales`."""
scales = load_expasy_scales()
scale = expasy_protein_scale(n="A13LEU", d={"residue_name": "LEU"})
assert_series_equal(scale, scales["LEU"])
def test_load_meilier_embeddings():
"""Example-based test for `load_meiler_embeddings`."""
# The test implemented here should test that something about the meiler_embeddings csv file is true.
# An execution test is one that simply tests that the function executes.
# In other words, the _only_ thing we are guaranteeing here
# is that the function will execute without erroring out.
# We are not guaranteeing the correctness of the output.
# This can be modified.
def test_expasy_protein_scale():
"""Execution test for `expasy_protein_scale` function."""
d = {"residue_name": "LEU"}
n = "DUMMY"
expasy_protein_scale(n, d)
def test_amino_acid_one_hot_execution():
"""Execution test for `amino_acid_one_hot` function."""
d = {"residue_name": "LEU"}
n = "DUMMY"
amino_acid_one_hot(n, d)
def test_amino_acid_one_hot_example():
"""Example-based test on 4hhb for `amino_acid_onehot`."""
# Test np array
config = ProteinGraphConfig(node_metadata_functions=[amino_acid_one_hot])
g = construct_graph(pdb_code="4hhb", config=config)
for n, d in g.nodes(data=True):
assert sum(d["amino_acid_one_hot"]) == 1
# Test pd.Series
config = ProteinGraphConfig(
node_metadata_functions=[
partial(amino_acid_one_hot, return_array=False)
]
)
g = construct_graph(pdb_code="4hhb", config=config)
for n, d in g.nodes(data=True):
assert sum(d["amino_acid_one_hot"]) == 1
assert (
d["amino_acid_one_hot"].idxmax()
== RESI_THREE_TO_1[d["residue_name"]]
)
def test_hydrogen_bond_acceptor():
config = ProteinGraphConfig(
node_metadata_functions=[hydrogen_bond_acceptor]
)
g = construct_graph(pdb_code="4hhb", config=config)
for _, d in g.nodes(data=True):
assert "hbond_acceptors" in d.keys()
assert isinstance(d["hbond_acceptors"], pd.Series)
config = ProteinGraphConfig(
node_metadata_functions=[
partial(hydrogen_bond_acceptor, return_array=True)
]
)
g = construct_graph(pdb_code="4hhb", config=config)
for _, d in g.nodes(data=True):
assert "hbond_acceptors" in d.keys()
assert isinstance(d["hbond_acceptors"], np.ndarray)
config = ProteinGraphConfig(
node_metadata_functions=[
partial(
hydrogen_bond_acceptor, return_array=True, sum_features=False
)
]
)
g = construct_graph(pdb_code="4hhb", config=config)
for _, d in g.nodes(data=True):
assert "hbond_acceptors" in d.keys()
assert isinstance(d["hbond_acceptors"], np.ndarray)
assert d["hbond_acceptors"] < 2
config = ProteinGraphConfig(
node_metadata_functions=[hydrogen_bond_acceptor], granularity="atom"
)
g = construct_graph(pdb_code="4hhb", config=config)
for _, d in g.nodes(data=True):
assert "hbond_acceptors" in d.keys()
assert isinstance(d["hbond_acceptors"], pd.Series)
config = ProteinGraphConfig(
node_metadata_functions=[
partial(hydrogen_bond_acceptor, return_array=True)
],
granularity="atom",
)
g = construct_graph(pdb_code="4hhb", config=config)
for _, d in g.nodes(data=True):
assert "hbond_acceptors" in d.keys()
assert isinstance(d["hbond_acceptors"], np.ndarray)
config = ProteinGraphConfig(
node_metadata_functions=[
partial(
hydrogen_bond_acceptor, return_array=True, sum_features=False
)
],
granularity="atom",
)
g = construct_graph(pdb_code="4hhb", config=config)
for _, d in g.nodes(data=True):
assert "hbond_acceptors" in d.keys()
assert isinstance(d["hbond_acceptors"], np.ndarray)
assert d["hbond_acceptors"] < 2
def test_hydrogen_bond_donor():
config = ProteinGraphConfig(node_metadata_functions=[hydrogen_bond_donor])
g = construct_graph(pdb_code="4hhb", config=config)
for _, d in g.nodes(data=True):
assert "hbond_donors" in d.keys()
assert isinstance(d["hbond_donors"], pd.Series)
config = ProteinGraphConfig(
node_metadata_functions=[
partial(hydrogen_bond_donor, return_array=True)
]
)
g = construct_graph(pdb_code="4hhb", config=config)
for _, d in g.nodes(data=True):
assert "hbond_donors" in d.keys()
assert isinstance(d["hbond_donors"], np.ndarray)
config = ProteinGraphConfig(
node_metadata_functions=[
partial(hydrogen_bond_donor, return_array=True, sum_features=False)
]
)
g = construct_graph(pdb_code="4hhb", config=config)
for _, d in g.nodes(data=True):
assert "hbond_donors" in d.keys()
assert isinstance(d["hbond_donors"], np.ndarray)
assert d["hbond_donors"] < 2
# Atom graphs
config = ProteinGraphConfig(
node_metadata_functions=[hydrogen_bond_donor], granularity="atom"
)
g = construct_graph(pdb_code="4hhb", config=config)
for _, d in g.nodes(data=True):
assert "hbond_donors" in d.keys()
assert isinstance(d["hbond_donors"], pd.Series)
config = ProteinGraphConfig(
node_metadata_functions=[
partial(hydrogen_bond_donor, return_array=True)
],
granularity="atom",
)
g = construct_graph(pdb_code="4hhb", config=config)
for _, d in g.nodes(data=True):
assert "hbond_donors" in d.keys()
assert isinstance(d["hbond_donors"], np.ndarray)
config = ProteinGraphConfig(
node_metadata_functions=[
partial(hydrogen_bond_donor, return_array=True, sum_features=False)
],
granularity="atom",
)
g = construct_graph(pdb_code="4hhb", config=config)
for _, d in g.nodes(data=True):
assert "hbond_donors" in d.keys()
assert isinstance(d["hbond_donors"], np.ndarray)
assert d["hbond_donors"] < 2
# def test_aaindex_1_feat():
# """Execution test for `aaindex_1_feat`."""
# d = {"residue_name": "LEU"}
# n = "DUMMY"
# aaindex_1_feat(n, d, feature_name="KRIW790103")
| StarcoderdataPython |
5183228 | <gh_stars>0
import json
import os
import subprocess
import urllib.request
def lambda_handler(event, context):
# Download the CLI and make it executable; must use 1.33.0+
file_name, headers = urllib.request.urlretrieve("https://s3-us-west-2.amazonaws.com/confluent.cloud/ccloud-cli/binaries/1.33.0/ccloud_1.33.0_linux_amd64")
subprocess.run(["chmod", "755", file_name])
my_env = os.environ.copy()
my_env['CCLOUD_EMAIL'] = 'your Confluent credentials'
my_env['CCLOUD_PASSWORD'] = '<PASSWORD>'
# AWS Lambda functions only have access to /tmp by default;
# must override home directory location for CLI config file
my_env['HOME'] = '/tmp/'
# Return JSON-formatted array of all clusters
result = subprocess.run([file_name, 'kafka', 'cluster', 'list', '-o', 'json'], env=my_env, capture_output=True, text=True).stdout
return {
'statusCode': 200,
'body': json.loads(result)
}
| StarcoderdataPython |
3567260 | <reponame>Eurydia/Xian-assignment
n = input()
arr = list(map(int, input().split()))
print(all(i > 0 for i in arr) and any(i < 10 or i % 11 == 0 for i in arr))
| StarcoderdataPython |
6645439 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import wx
from editor.fakeplugin import FakePlugin
from nose.tools import assert_true
from robotide.controller.macrocontrollers import TestCaseController
from robotide.editor.macroeditors import TestCaseEditor
TestCaseEditor._populate = lambda self: None
class IncredibleMock(object):
def __getattr__(self, item):
return self
def __call__(self, *args, **kwargs):
return self
class MockKwEditor(object):
_expect = None
_called = None
def __getattr__(self, item):
self._active_item = item
return self
def __call__(self, *args, **kwargs):
self._called = self._active_item
def is_to_be_called(self):
self._expect = self._active_item
def has_been_called(self):
return self._active_item == self._expect == self._called
class MacroEditorTest(unittest.TestCase):
def setUp(self):
controller = TestCaseController(IncredibleMock(), IncredibleMock())
plugin = FakePlugin({}, controller)
self.tc_editor = TestCaseEditor(
plugin, wx.Frame(None), controller, None)
def test_delegation_to_kw_editor(self):
for method, kw_method in \
[('save', 'save'),
('undo', 'OnUndo'),
('redo', 'OnRedo'),
('cut', 'OnCut'),
('copy', 'OnCopy'),
('paste', 'OnPaste'),
('insert', 'OnInsert'),
('insert_rows', 'OnInsertRows'),
('delete_rows', 'OnDeleteRows'),
('delete', 'OnDelete'),
('comment', 'OnCommentRows'),
('uncomment', 'OnUncommentRows'),
('show_content_assist', 'show_content_assist')]:
kw_mock = MockKwEditor()
self.tc_editor.kweditor = kw_mock
getattr(kw_mock, kw_method).is_to_be_called()
getattr(self.tc_editor, method)()
assert_true(getattr(kw_mock, kw_method).has_been_called(),
'Should have called "%s" when calling "%s"' %
(kw_method, method))
| StarcoderdataPython |
337271 | <reponame>lesserwhirls/scipy-cwt<filename>scipy/sparse/linalg/eigen/__init__.py
"Sparse eigenvalue solvers"
from info import __doc__
from arpack import *
from lobpcg import *
__all__ = filter(lambda s:not s.startswith('_'),dir())
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
| StarcoderdataPython |
9769716 | from __future__ import absolute_import
import json
import os
import unittest
from functools import partial
import cwltool.pack
from cwltool.main import print_pack as print_pack
import cwltool.workflow
from cwltool.load_tool import fetch_document, validate_document
from cwltool.main import makeRelative
from cwltool.pathmapper import adjustDirObjs, adjustFileObjs
from .util import get_data
class TestPack(unittest.TestCase):
def test_pack(self):
self.maxDiff = None
document_loader, workflowobj, uri = fetch_document(
get_data("tests/wf/revsort.cwl"))
document_loader, avsc_names, processobj, metadata, uri = validate_document(
document_loader, workflowobj, uri)
packed = cwltool.pack.pack(document_loader, processobj, uri, metadata)
with open(get_data("tests/wf/expect_packed.cwl")) as f:
expect_packed = json.load(f)
adjustFileObjs(packed, partial(makeRelative,
os.path.abspath(get_data("tests/wf"))))
adjustDirObjs(packed, partial(makeRelative,
os.path.abspath(get_data("tests/wf"))))
self.assertIn("$schemas", packed)
del packed["$schemas"]
del expect_packed["$schemas"]
self.assertEqual(expect_packed, packed)
def test_pack_missing_cwlVersion(self):
"""Test to ensure the generated pack output is not missing
the `cwlVersion` in case of single tool workflow and single step workflow"""
# Since diff is longer than 3174 characters
self.maxDiff = None
# Testing single tool workflow
document_loader, workflowobj, uri = fetch_document(
get_data("tests/wf/hello_single_tool.cwl"))
document_loader, avsc_names, processobj, metadata, uri = validate_document(
document_loader, workflowobj, uri)
# generate pack output dict
packed = json.loads(print_pack(document_loader, processobj, uri, metadata))
self.assertEqual('v1.0', packed["cwlVersion"])
# Testing single step workflow
document_loader, workflowobj, uri = fetch_document(
get_data("tests/wf/hello-workflow.cwl"))
document_loader, avsc_names, processobj, metadata, uri = validate_document(
document_loader, workflowobj, uri)
# generate pack output dict
packed = json.loads(print_pack(document_loader, processobj, uri, metadata))
self.assertEqual('v1.0', packed["cwlVersion"])
| StarcoderdataPython |
1897726 | <gh_stars>0
from io import StringIO
class Diagnostics_HTML():
def __init__(self, settings, device, page_elements):
self.config = settings
self.device = device
self.diagnostics_html = None
self.page_elements = page_elements
def get_diagnostics_html(self, base_url, force_update=False):
if not self.diagnostics_html or force_update:
fakefile = StringIO()
page_elements = self.page_elements.get()
for line in page_elements["top"]:
fakefile.write(line + "\n")
# a list of 2 part lists containing button information
button_list = [
["Force Channel Update", "chanscan"],
["debug", "debug.json"],
["device.xml", "device.xml"],
["discover.json", "discover.json"],
["lineup.json", "lineup.json"],
["lineup_status.json", "lineup_status.json"],
["cluster.json", "cluster.json"]
]
for button_item in button_list:
button_label = button_item[0]
button_path = button_item[1]
fakefile.write("<div style=\"text-align: center;\">\n")
fakefile.write(" <p><button onclick=\"OpenLink('%s')\">%s</a></button></p>\n" % (button_path, button_label))
fakefile.write("</div>\n")
fakefile.write("\n")
for line in page_elements["end"]:
fakefile.write(line + "\n")
self.diagnostics_html = fakefile.getvalue()
return self.diagnostics_html
| StarcoderdataPython |
61623 | <reponame>rombie/contrail-test
'''
Generate Policy test scenarios based on user input.
Policy test cases can be built based on this topology.
'''
import copy
from random import choice
class PolicyTestBasicConfig_1():
def __init__(self):
self.vmc_list = []
self.vnet_list = []
self.policy_list = []
self.vn_subnets = {}
self.vn_policy = {}
self.vn_of_cn = {}
self.vm_of_cn = {}
self.vn_of_vm = {}
begin_oct = 10
base_net = '.1.1.0/24'
numEntity = 1
numRules = 4
# For a given no., generate n policies, VN & VM's, attach policies to VN's
# & launch VM instances in the VN's.
for type in ['policy', 'vnet', 'vmc']:
for i in range(numEntity):
net = str(begin_oct) + base_net
name = type + str(i)
policy_list_name = 'policy_list_' + name
vnet_list_name = 'vnet_list_' + name
policy_list_name = []
vnet_list_name = []
if type == 'policy':
self.policy_list.append(name)
elif type == 'vnet':
self.vnet_list.append(name)
vnet_list_name.append(net)
self.vn_subnets[name] = vnet_list_name
begin_oct += 1
policy_list_name.append(self.policy_list[i])
self.vn_policy[name] = policy_list_name
elif type == 'vmc':
self.vmc_list.append(name)
self.vn_of_vm[name] = self.vnet_list[i]
print self.policy_list, self.vmc_list, self.vnet_list, self.vn_subnets, \
self.vn_policy, self.vn_of_vm
# Generate m different rules for each policy
self.rules = {}
for j in range(len(self.policy_list)):
proto_opts = [6, 17, 1, 'any']
proto = choice(proto_opts)
self.rules['policy' + str(j)] = []
for i in range(numRules):
rule_base = {'direction': '>', 'protocol': proto,
'source_network': self.vnet_list[j],
'src_ports': [i, i], 'dest_network': self.vnet_list[j],
'dst_ports': 'any', 'simple_action': 'deny'}
self.rules['policy' + str(j)].append(rule_base)
# There can be multple policies for a VN, build the list of policies by
# VN
self.policy_vn = {}
for policy in self.policy_list:
self.policy_vn[policy] = []
for vn in self.vnet_list:
if policy in self.vn_policy[vn]:
self.policy_vn[policy].append(vn)
# print "added vn %s to list for policy %s" %(vn, policy)
# print "completed parsing vn %s policy list" %(vn)
# print "completed building vn list for policy %s, list is %s" %(policy, self.policy_vn[policy])
# end __init__
# end class
if __name__ == '__main__':
PolicyTestBasicConfig_1()
| StarcoderdataPython |
9746080 | """Unit tests for hello_world.py."""
from hello_world.hello_world import hello
def test_hellow_world():
assert hello("Frank") == "Hello Frank"
assert hello("Sam") == "Hello Sam"
| StarcoderdataPython |
7957 | #!/usr/bin/env python
#
#The MIT License (MIT)
#
# Copyright (c) 2015 Bit9 + Carbon Black
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# -----------------------------------------------------------------------------
# Extension regmod watcher and grabber
#
# This script listens to the CB messaging bus for registry modification events,
# and when a modification is seen that matches a regular expression from a file
# of registry path regular expressions, it goes and grabs the registry value
# using CB Live Response.
#
# You need to make sure rabbitmq is enabled in cb.conf, and you might need to
# open a firewall rule for port 5004. You also will need to enable regmod
# in the DatastoreBroadcastEventTypes=<values> entry. If anything is changed
# here, you'll have to do service cb-enterprise restart.
#
# TODO: More error handling, more performance improvements
#
# last updated 2016-01-23 by <NAME> <EMAIL> (<EMAIL>)
#
import re
import Queue
import sys
from threading import Thread
import time
import traceback
try:
from cbapi.legacy.util.cli_helpers import main_helper
from cbapi.legacy.util.composite_helpers import MessageSubscriberAndLiveResponseActor
import cbapi.legacy.util.sensor_events_pb2 as cpb
except ImportError:
from cbapi.util.cli_helpers import main_helper
from cbapi.util.composite_helpers import MessageSubscriberAndLiveResponseActor
import cbapi.util.sensor_events_pb2 as cpb
class RegistryModWatcherAndValueGrabber(MessageSubscriberAndLiveResponseActor):
"""
This class subscribes to messages from the CB messaging bus,
looking for regmod events. For each regmod event, it checks
to see if the the registry path matches one of our regexes.
If it does, it goes and grabs it.
"""
def __init__(self, cb_server_url, cb_ext_api, username, password, regmod_regexes, verbose):
self.regmod_regexes = regmod_regexes
self.verbose = verbose
MessageSubscriberAndLiveResponseActor.__init__(self,
cb_server_url,
cb_ext_api,
username,
password,
"ingress.event.regmod")
# Threading so that message queue arrives do not block waiting for live response
self.queue = Queue.Queue()
self.go = True
self.worker_thread = Thread(target=self._worker_thread_loop)
self.worker_thread.start()
def on_stop(self):
self.go = False
self.worker_thread.join(timeout=2)
MessageSubscriberAndLiveResponseActor.on_stop(self)
def consume_message(self, channel, method_frame, header_frame, body):
if "application/protobuf" != header_frame.content_type:
return
try:
# NOTE -- this is not very efficient in PYTHON, and should
# use a C parser to make this much, much faster.
# http://yz.mit.edu/wp/fast-native-c-protocol-buffers-from-python/
x = cpb.CbEventMsg()
x.ParseFromString(body)
if not x.regmod or x.regmod.action != 2:
# Check for MODIFICATION event because we will usually get
# a creation event and a modification event, and might as
# well go with the one that says data has actually been written.
return
regmod_path = None
if x.regmod.utf8_regpath:
if self.verbose:
print "Event arrived: |%s|" % x.regmod.utf8_regpath
for regmod_regex in self.regmod_regexes:
if regmod_regex.match(x.regmod.utf8_regpath):
regmod_path = x.regmod.utf8_regpath
break
if regmod_path:
regmod_path = regmod_path.replace("\\registry\\machine\\", "HKLM\\")
regmod_path = regmod_path.replace("\\registry\\user\\", "HKEY_USERS\\")
regmod_path = regmod_path.strip()
# TODO -- more cleanup here potentially?
self.queue.put((x, regmod_path))
except:
traceback.print_exc()
def _worker_thread_loop(self):
while self.go:
try:
try:
(x, regmod_path) = self.queue.get(timeout=0.5)
except Queue.Empty:
continue
# TODO -- could comment this out if you want CSV data to feed into something
print "--> Attempting for %s" % regmod_path
# Go Grab it if we think we have something!
sensor_id = x.env.endpoint.SensorId
hostname = x.env.endpoint.SensorHostName
# TODO -- this could use some concurrency and work queues because we could wait a while for
# each of these to get established and retrieve the value
# Establish our CBLR session if necessary!
lrh = self._create_lr_session_if_necessary(sensor_id)
data = lrh.get_registry_value(regmod_path)
print "%s,%s,%d,%s,%s,%s" % ( time.asctime(),
hostname,
sensor_id,
x.header.process_path,
regmod_path,
data.get('value_data', "") if data else "<UNKNOWN>")
# TODO -- could *do something* here, like if it is for autoruns keys then go check the signature status
# of the binary at the path pointed to, and see who wrote it out, etc
except:
traceback.print_exc()
def main(cb, args):
username = args.get("username")
password = args.get("password")
regpaths_file = args.get("regpaths_file")
verbose = args.get("verbose", False)
if verbose:
# maybe you want to print out all the regpaths we're using?
print "Regpaths file:", regpaths_file
f = file(regpaths_file, 'rb')
regpaths_data = f.read()
f.close()
regmod_regexes = []
for line in regpaths_data.split('\n'):
line = line.strip()
if len(line) == 0:
continue
regmod_regexes.append(re.compile(line))
listener = RegistryModWatcherAndValueGrabber(args.get('server_url'), cb, username, password, regmod_regexes, verbose)
try:
if verbose:
print "Registry Mod Watcher and Grabber -- started. Watching for:", regpaths_data
else:
print "Registry Mod Watcher and Grabber -- started. Watching for %d regexes" % len(regmod_regexes)
listener.process()
except KeyboardInterrupt:
print >> sys.stderr, "Caught Ctrl-C"
listener.stop()
print "Registry Mod Watcher and Grabber -- stopped."
if __name__ == "__main__":
## YOU CAN USE data/autoruns_regexes.txt to test ##
required_args =[("-i", "--username", "store", None, "username", "CB messaging username"),
("-p", "--password", "store", None, "password", "CB messaging password"),
("-r", "--regpaths_file", "store", None, "regpaths_file", "File of newline delimited regexes for regpaths")]
optional_args = [("-v", "--verbose", "store_true", False, "verbose", "Enable verbose output")]
main_helper("Subscribe to message bus events and for each registry modification that matches one of our supplied regexes, go retrieve value.",
main,
custom_required=required_args,
custom_optional=optional_args)
| StarcoderdataPython |
9616265 | <reponame>mzweig/LogicalLens
from setuptools import find_packages, setup
setup(
name='logical-lens',
version='0.0.1',
description='Python library for using parametric specifications to'
'embed domain specific knowledge in machine learning.',
url='https://github.com/mvcisback/LogicalLens',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
install_requires=[
'attrs',
'funcy',
'monotone-bipartition',
'numpy',
],
packages=find_packages(),
)
| StarcoderdataPython |
6683365 | """ Terrain package """
from .terrain import Terrain
from .wms import WMS
from .srtm import SRTM
from .threedep import ThreeDEP
__all__ = [
"Terrain",
"WMS",
"SRTM",
"ThreeDEP"
]
| StarcoderdataPython |
46792 | """This imports all the lib package classes"""
from gateway import Gateway
| StarcoderdataPython |
8076355 | # Generated by Django 3.0.3 on 2020-07-04 20:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20200704_0236'),
]
operations = [
migrations.AddField(
model_name='customer',
name='gender',
field=models.CharField(default='male', max_length=100),
),
]
| StarcoderdataPython |
4910549 | # -*- coding: utf-8 -*-
#
# djangoplicity-contacts
# Copyright (c) 2007-2011, European Southern Observatory (ESO)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the European Southern Observatory nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY ESO ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL ESO BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE
#
"""
Helper classes for iterating over data in tabular data located in CSV/Excel files.
An important restriction for all tabular data files are that they must have
* a header row and,
* unique column names.
Usage example::
# First construct an importer.
>>> importer = CSVImporter( filename='/path/to/csvfile.csv' )
>>> importer = ExcelImporter( filename='/path/to/excelfile.xls', sheet=1 )
# Iterate over rows in tabular data
>>> for row in importer:
... for c in importer.keys(): # Iterator over each column
... print row[c]
# Extract column and iterate over all values
>>> column_values = importer['SomeColumnName']
>>> for val in column_values:
... print val
# Get number of rows in tabular data
>>> number_of_rows = len(importer)
# Check if column exists in importer
>>> test = 'SomeColumnName' in importer
# Get all column names in importer
>>> header = importer.keys()
# Get a list of all column values
>>> header = importer.values()
#
>>> for column,values in importer.items()
... print column
... for v in values:
... print v
"""
import codecs
import csv
import xlrd
# ============
# Base classes
# ============
class Importer( object ):
"""
Abstract base class for all importers
"""
def __init__( self, *args, **kwargs ):
self.cols = {}
def __iter__( self ):
return ImportIterator()
def __len__( self ):
return 0
def __getitem__( self, value ):
raise KeyError
def __contains__( self, value ):
"""
Test if column name exists in excel file.
"""
return value in self.cols.keys()
def keys( self ):
"""
Return all column names.
"""
return self.cols.keys()
def items( self ):
return [( c, self[c] ) for c in self.keys()]
def values( self ):
return [self[c] for c in self.keys()]
class ImportIterator( object ):
"""
Abstract base class for all import iterators.
"""
def __iter__( self ):
return self
def next( self ):
raise StopIteration
# ==============
# Excel Importer
# ==============
class ExcelImporter( Importer ):
"""
Importer for Excel files.
Defaults:
sheet = 0
"""
def __init__( self, filename=None, sheet=0 ):
"""
Initialize importer by opening the Excel file and
reading out a specific sheet.
"""
self.sheet = xlrd.open_workbook( filename ).sheet_by_index( sheet )
i = 0
self._header = []
self.cols = {}
for c in self.sheet.row_values( 0 ):
if isinstance(c, basestring):
c = c.strip()
self.cols[c] = i
self._header.append( ( c, None ) )
i += 1
def header( self ):
"""
Return the Excel header for this file. This can be used as input to
ExcelExporter.
"""
import copy
return copy.copy( self._header )
def __len__( self ):
"""
Return the number of rows in the excel file.
"""
return self.sheet.nrows - 1
def __getitem__( self, value ):
"""
Return all values for a specific column
"""
return self.sheet.col_values( self.cols[value] )[1:]
def row( self, rowidx ):
"""
Return a specific row in the table.
"""
rowidx = rowidx + 1
data = {}
for colname, idx in self.cols.items():
data[colname] = self.sheet.cell( rowx=rowidx, colx=idx ).value
return data
def __iter__( self ):
return ExcelImportIterator( self )
class ExcelImportIterator( ImportIterator ):
"""
Iterator object for ExcelImporter
"""
def __init__( self, excelimporter ):
self.excelimporter = excelimporter
self.rowidx = -1
def next( self ):
self.rowidx += 1
if self.rowidx >= len( self.excelimporter ):
raise StopIteration
return self.excelimporter.row( self.rowidx )
# ==============
# CSV Importer
# ==============
class CSVImporter( Importer ):
"""
Importer for CSV files.
Defaults:
encoding='utf-8'
dialect=csv.excel
"""
def __init__( self, filename=None, **kwargs ):
"""
Initialise importer by opening the Excel file and
reading out a specific sheet.
"""
f = open( filename, 'r' )
self.csvreader = _UnicodeReader( f, **kwargs )
# Parse header
i = 0
self.cols = {}
header = self.csvreader.next()
for c in header:
if isinstance(c, basestring):
c = c.strip()
self.cols[c] = i
i += 1
# Build dictionary of tabular data
self._rows = []
for r in self.csvreader:
data = {}
for c, i in self.cols.items():
try:
data[c] = r[i]
except IndexError:
data[c] = None
self._rows.append( data )
def __len__( self ):
"""
Return the number of rows in the excel file.
"""
return len( self._rows )
def __getitem__( self, value ):
"""
Return all values for a specific column
"""
column = []
for r in self._rows:
column.append( r[value] )
return column
def row( self, rowidx ):
"""
Return a specific row in the table.
"""
return self._rows[rowidx]
def __iter__( self ):
return self._rows.__iter__()
class _UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF - 8
"""
def __init__( self, f, encoding ):
self.reader = codecs.getreader( encoding )( f )
def __iter__(self):
return self
def next( self ):
return self.reader.next().encode( "utf-8" )
class _UnicodeReader( object ):
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__( self, f, dialect=csv.excel, encoding="utf-8", **kwds ):
f = _UTF8Recoder( f, encoding )
self.reader = csv.reader( f, dialect=dialect, **kwds )
def next(self):
row = self.reader.next()
return [unicode( s, "utf-8" ) for s in row]
def __iter__(self):
return self
| StarcoderdataPython |
3369343 | #!/bin/python3
"""
Script to perform hyperparameter optimization on a reservoir computer with the specified options.
Run as:
python3 opt_then_test.py SYSTEM MAP_INITIAL PREDICTION_TYPE METHOD [Results directory] [options...]
### Script arguments ###
Choose SYSTEM from ["lorenz", "rossler", "thomas", "softrobot"]
Choose MAP_INITIAL from ["random", "activ_f", "relax"]
Choose PREDICTION_TYPE from ["continue", "random"]
Choose METHOD from ["standard", "augmented"]
Additional options:
"--test" - run with testing values.
"--dashboard" - enable the sherpa dashboard. Not supported on Windows.
"--parallel=<profile name>" - use parallel processing, on all accessible nodes. Uses the controller with the given profile name. Requires ipyparallel and dill packages to be installed.
"""
import sys
from datetime import datetime
#Check for sufficient arguments before importing anything
if __name__ == "__main__":
#Extract the additional options from sys.argv
options = {item for item in sys.argv[1:] if item[:2]=="--"}
argv = [item for item in sys.argv if item not in options]
options = {item.split('=')[0]:'='.join(item.split('=')[1:]) for item in options}
if len(sys.argv) < 5:
print(__doc__)
exit()
SYSTEM = argv[1]
MAP_INITIAL = argv[2]
PREDICTION_TYPE = argv[3]
METHOD = argv[4]
TIMESTAMP = "{:%y%m%d%H%M%S}".format(datetime.now())
if len(argv) > 5:
results_directory = argv[5]
else:
results_directory = None
else:
SYSTEM = None
MAP_INITIAL = None
PREDICTION_TYPE = None
METHOD = None
options = dict()
argv = sys.argv
EXPERIMENT = (SYSTEM, PREDICTION_TYPE, METHOD)
PARALLEL = ("--parallel" in options.keys())
import sherpa
import pickle as pkl
import numpy as np
import rescomp as rc
from scipy.io import loadmat
from os import mkdir
### Constants
#Load from the relevant .py file
if "--test" in options.keys():
from parameters.ott_test import *
else:
from parameters.ott_params import *
RES_DEFAULTS["map_initial"] = MAP_INITIAL
if PARALLEL:
import ipyparallel as ipp
dview = None
node_count = 0
p_profile = options['--parallel']
### Function definitions
# These parameters are used as a prior for the bayesian optimization.
# Decent parameters for each chaotic system are stored in rc.SYSTEMS
# A good prior for the softrobot system is:
# ROBO_PRIOR = {
# "res_sz":1000,
# "activ_f": lambda x: 1/(1+np.exp(-1*x)), "sparse_res":True, "uniform_weights":True,
# "signal_dim":6,
# "max_weight":2,
# "min_weight":0,
# "batchsize":2000,
# "drive_dim":6,
# 'delta': 0.3736117214,
# 'gamma': 18.66636932,
# 'mean_degree': 1.7242465519999999, 'ridge_alpha': 1.268554237,
# 'sigma': 0.3125062064,
# 'spect_rad': 0.8922393143999999, 'map_initial': "activ_f"
# }
# Basically change loadprior function to produce the parameters given above
# in a format that sherpa can read
def _set_experiment(*args):
"""
A helper method to make it easier to set which experiment is being used if this file is imported.
"""
global SYSTEM, MAP_INITIAL, PREDICTION_TYPE, METHOD, EXPERIMENT
SYSTEM, MAP_INITIAL, PREDICTION_TYPE, METHOD = args
EXPERIMENT = (SYSTEM, PREDICTION_TYPE, METHOD)
RES_DEFAULTS["map_initial"] = MAP_INITIAL
def loadprior(system, paramnames):
"""Load best parameters from random searches (Computed previously).
Parameters not included are set to a default value found in the parameters files.
Parameters:
system (string): name of the system type being used
paramnames (list of strings): name of parameters to keep
Returns:
priorprms (List of dictionaries): sets of hyperparameters known to be good"""
#As far as I can tell, the sherpa function we're giving this to
# wants a list of hyperparameter dictionaries, or a pandas.Dataframe
# object of unknown formatting
def _clean_prior(prior):
"""Removes unneeded parameters and adds all needed parameters"""
prior = {**PRIOR_DEFAULTS, **prior}
prior = {key:prior[key] for key in prior if key in paramnames}
return prior
try:
with open(DATADIR + f"{system}_prior.pkl", "rb") as file:
priorprms = pkl.load(file)
if type(priorprms) is dict:
#Clean and wrap in a list
return [_clean_prior(priorprms)]
elif type(priorprms) is list:
#Clean each item in the list
priorprms = [_clean_prior(prms)for prms in priorprms]
return priorprms
else:
print(f"Warning: no correctly-formatted prior data found in {system}_prior.pkl", file=sys.stderr)
except FileNotFoundError as e:
print(e)
print("Using empty prior instead.")
#Return an empty list if we failed to load anything
return []
def load_robo(filename):
"""Load soft robot data"""
data = loadmat(DATADIR + filename)
t = data['t'][0]
q = data['q']
pref = data["pref"]
return t, q, pref
def random_slice(*args, axis=0):
""" Take a random slice of an arbitrary number of arrays from the same index
Parameters:
As (ndarrays): Arbitrary number of arrays with the same size along the given axis
slicesize (int): Size of random slice must be smaller than the size of the
arrays along given axis
Keyword Parameters:
axis (int): Axis to slice. Can be 0 or 1.
Returns
slices (tuple): A tuple of slices from each array
"""
As, slicesize = args[:-1], args[-1]
start = np.random.randint(0, high=len(As[0]) - slicesize + 1)
end = start + slicesize
if axis == 0:
slices = (A[start:end] for A in As)
if axis == 1:
slices = (A[:, start:end] for A in As)
return slices
def robo_train_test_split(timesteps=25000, trainper=0.66, test="continue"):
"""Split robot data into training and test chunks """
global BIG_ROBO_DATA_LOADED, SMALL_ROBO_DATA_LOADED
if 'BIG_ROBO_DATA_LOADED' not in dir():
BIG_ROBO_DATA_LOADED = load_robo(BIG_ROBO_DATA)
SMALL_ROBO_DATA_LOADED = load_robo(SMALL_ROBO_DATA)
t, U, D = BIG_ROBO_DATA_LOADED
t, U, D = random_slice(t, U, D, timesteps)
split_idx = int(np.floor(len(t) * trainper))
tr, ts = t[:split_idx], t[split_idx:]
Utr, Uts = U[:split_idx, :], U[split_idx:, :]
Dtr, Dts = D[:split_idx, :], D[split_idx:, :]
if test == "random":
t, U, D = SMALL_ROBO_DATA_LOADED
#Make sure the slice isn't too large
test_timesteps = int(np.floor(min(timesteps,len(t)) * trainper))
ts, Uts, Dts = random_slice(t, U, D, test_timesteps)
return tr, (Utr, Dtr), (ts, Dts), Uts
def chaos_train_test_split(system, duration=10, trainper=0.66, dt=0.01, test="continue"):
""" Chaotic system train and test data"""
if test == "random":
train_duration = trainper * duration
test_duration = duration - train_duration
tr, Utr = rc.orbit(system, duration=train_duration, trim=True)
ts, Uts = rc.orbit(system, duration=test_duration, trim=True)
else:
tr, Utr, ts, Uts = rc.train_test_orbit(system, duration=duration, trainper=trainper, dt=dt)
return tr, Utr, ts, Uts
def train_test_data(system, trainper=0.66, test="continue"):
""" Load train test data for a given system """
if system == "softrobot":
return robo_train_test_split(timesteps=SOFT_ROBO_TIMESTEPS, trainper=trainper, test=test)
else:
return chaos_train_test_split(system, duration=DURATION[system], trainper=trainper, dt=DT[system], test=test)
def nrmse(true, pred):
""" Normalized root mean square error. (A metric for measuring difference in orbits)
Parameters:
Two mxn arrays. Axis zero is assumed to be the time axis (i.e. there are m time steps)
Returns:
err (ndarray): Error at each time value. 1D array with m entries
"""
sig = np.std(true, axis=0)
err = np.linalg.norm((true-pred) / sig, axis=1, ord=2)
return err
def valid_prediction_index(err, tol):
"First index i where err[i] > tol. err is assumed to be 1D and tol is a float. If err is never greater than tol, then len(err) is returned."
mask = err > tol
if np.any(mask):
return np.argmax(mask)
return len(err)
def trained_rcomp(system, tr, Utr, resprms, methodprms):
""" Returns a reservoir computer trained with the given data and parameters
Parameters:
system (str): Name of the system
tr (ndarray): 1D array of m equally spaced time values
Utr (ndarray): mxn 2D array of training signal states
resparams (dict): Reservoir computer hyperparameters
methodprms (dict): Training method parameters
Returns:
rcomp (ResComp): Trained reservoir computer
"""
if system == "softrobot":
rcomp = rc.DrivenResComp(**resprms)
rcomp.train(tr, *Utr, **methodprms)
else:
rcomp = rc.ResComp(**resprms)
rcomp.train(tr, Utr, **methodprms)
return rcomp
def rcomp_prediction(system, rcomp, predargs, init_cond):
""" Make a prediction with the given system
Parameters:
system (str): Name of the system to predict
rcomp (ResComp): Trained reservoir computer
predargs (variable length arguments): Passed directly into rcomp.predict
init_cond (dict): Keyword args passed rcomp.predict
Returns:
pre (ndarray): Reservoir computer prediction
"""
if system == "softrobot":
pre = rcomp.predict(*predargs, **init_cond)
else:
pre = rcomp.predict(predargs, **init_cond)
return pre
def make_initial(pred_type, rcomp, Uts):
""" Create initial condition for the type of prediction. Either create a reservoir node
initial condition or use a state space initial condition.
"""
if pred_type == "continue":
# Continue evolution of reservoir nodes from current node state
return {"r0": rcomp.r0}
else:
# Use the state space initial condition. (Reservoir will map it to a reservoir node condition)
return {"u0": Uts[0]}
def build_params(opt_prms, combine=False, system=None):
""" Extract training method parameters and augment reservoir parameters with defaults.
Parameters
----------
opt_prms (dict): Dictionary of parameters from the optimizer
combine (bool): default False; whether to return all parameters as a single dictionary
system (string): default None; the system to use. If None, takes the value of the global variable SYSTEM
"""
if system is None:
system = SYSTEM
if combine:
if system == "softrobot":
return {**RES_DEFAULTS, **opt_prms, **ROBO_DEFAULTS}
else:
return {**RES_DEFAULTS, **opt_prms}
resprms = {}
methodprms = {}
for k in opt_prms.keys():
if k in METHOD_PRMS:
methodprms[k] = opt_prms[k]
else:
resprms[k] = opt_prms[k]
resprms = {**RES_DEFAULTS, **resprms}
if system == "softrobot":
resprms = {**resprms, **ROBO_DEFAULTS} # Updates signal_dim and adds drive_dim
return resprms, methodprms
def vpt(*args, **kwargs):
""" Compute the valid prediction time for a set of parameters
Parameters:
-----------
system (str): The name of the system from which to generate training data.
One of: `["lorenz", "rossler", "thomas", "softrobot"]`
pred_type: Predict continuation of training trajectory or predict evolution
of a random initial condition. One of: `["continue", "random"]`
method: Training methodology. One of `["standard", "aumented"]`
The keyword arguments should be parameters from the optimizer (`trial.parameters`),
parsed by `build_params`.
Returns:
-------
vptime (float): Time in seconds that the reservoir computer was able to predict the
evolution of the given system with in a fixed tolerance (`VPTOL`) of error.
"""
system, pred_type, method = args
# Build train and test data. Soft robot data includes driving signal in Utr and ts.
tr, Utr, ts, Uts = train_test_data(system, trainper=TRAINPER, test=pred_type)
# Filter and augment parameters, then build and train a reservoir computer
resprms, methodprms = build_params(kwargs)
rcomp = trained_rcomp(system, tr, Utr, resprms, methodprms)
# Create prediction initial condition and then predict
init_cond = make_initial(pred_type, rcomp, Uts)
pre = rcomp_prediction(system, rcomp, ts, init_cond)
# Compute error and deduce valid prediction time
vptime = get_vptime(system, ts, Uts, pre)
return vptime
def mean_vpt(*args, **kwargs):
""" Average valid prediction time across OPT_VPT_REPS repetitions. Handles parallel processing. """
if PARALLEL:
loop_ct = int(np.ceil(OPT_VPT_REPS / node_count))
vpt_results = dview.apply_sync(lambda ct, *a, **k: [vpt(*a,**k) for _ in range(ct)], loop_ct, *args, **kwargs)
return np.sum(vpt_results) / (loop_ct * node_count)
else:
tot_vpt = 0
for i in range(OPT_VPT_REPS):
tot_vpt += vpt(*args, **kwargs)
return tot_vpt/OPT_VPT_REPS
def get_vptime(system, ts, Uts, pre):
"""
Valid prediction time for a specific instance
"""
err = nrmse(Uts, pre)
idx = valid_prediction_index(err, VPTTOL)
if idx == 0:
vptime = 0.
else:
if system == "softrobot":
vptime = ts[0][idx-1] - ts[0][0]
else:
vptime = ts[idx-1] - ts[0]
#if "--test" in options.keys():
# print(vptime)
return vptime
def meanlyap(rcomp, pre, r0, ts, pert_size=1e-6, system=None):
""" Average lyapunov exponent across LYAP_REPS repititions """
if system is None:
system = SYSTEM
if system == "softrobot":
ts, D = ts
lam = 0
for i in range(LYAP_REPS):
delta0 = np.random.randn(r0.shape[0]) * pert_size
if system == "softrobot":
predelta = rcomp.predict(ts, D, r0=r0+delta0)
else:
predelta = rcomp.predict(ts, r0=r0+delta0)
i = rc.accduration(pre, predelta)
lam += rc.lyapunov(ts[:i], pre[:i, :], predelta[:i, :], delta0)
return lam / LYAP_REPS
def test_all(system, optimized_hyperprms):
"""
Tests a set of optimized hyperparameters for continue and random predictions and derivative fit, as well as Lyapunov exponent.
Returns, in order:
Continue vptime
Random vptime
Lyapunov exponent
Continue deriv fit
Random deriv fit
The derivative fits will be None if system=='softrobot'.
"""
results = [None]*5
tr, Utr, ts, Uts = train_test_data(system, trainper=TRAINPER, test="continue")
resprms, methodprms = build_params(optimized_hyperprms, system=system)
rcomp = trained_rcomp(system, tr, Utr, resprms, methodprms)
## Continued Prediction
init_cond = make_initial("continue", rcomp, Uts)
pre = rcomp_prediction(system, rcomp, ts, init_cond)
# Compute error and deduce valid prediction time
vptime = get_vptime(system, ts, Uts, pre)
results[0] = vptime
## Continued Derivative fit
if system != "softrobot":
err = rc.system_fit_error(ts, pre, system)
trueerr = rc.system_fit_error(ts, Uts, system)
results[3] = (trueerr, err)
## Random Prediction
tr, Utr, ts, Uts = train_test_data(system, trainper=TRAINPER, test="random")
init_cond = make_initial("random", rcomp, Uts)
pre = rcomp_prediction(system, rcomp, ts, init_cond)
vptime = get_vptime(system, ts, Uts, pre)
results[1] = vptime
## Random Derivative fit
if system != "softrobot":
err = rc.system_fit_error(ts, pre, system)
trueerr = rc.system_fit_error(ts, Uts, system)
results[4] = (trueerr, err)
## Lyapunov Exponent Estimation
if "r0" in init_cond.keys():
r0 = init_cond["r0"]
else:
if system == "softrobot":
r0 = rcomp.initial_condition(init_cond["u0"], ts[1][0,:])
else:
r0 = rcomp.initial_condition(init_cond["u0"])
results[2] = meanlyap(rcomp, pre, r0, ts, system=system)
return tuple(results)
if __name__ == "__main__":
if "--test" in options.keys():
print("Running in test mode")
if PARALLEL:
#Set up things for multithreading
client = ipp.Client(profile=p_profile)
dview = client[:]
dview.use_dill()
dview.block = True
node_count = len(client.ids)
print(f"Using multithreading; running on {node_count} engines.")
dview.execute('from opt_then_test import *')
dview.apply(_set_experiment,SYSTEM, MAP_INITIAL, PREDICTION_TYPE, METHOD)
#Find the data directory if none was given as an argument
if results_directory is None:
results_directory = "_".join((SYSTEM, MAP_INITIAL, PREDICTION_TYPE, METHOD,TIMESTAMP))
if "--test" in options.keys():
results_directory = "TEST-" + results_directory
results_directory = DATADIR + SYSTEM + "/" + results_directory
#Make sure the data directory exists
try:
mkdir(results_directory)
except FileExistsError:
pass
### Optimize hyperparameters
param_names = RES_OPT_PRMS
parameters = [
sherpa.Continuous(name='gamma', range=[0.1, 25]),
sherpa.Continuous(name='sigma', range=[0.01, 5.0]),
sherpa.Continuous(name='spect_rad', range=[0.1, 25]),
sherpa.Continuous(name='ridge_alpha', range=[1e-8, 2], scale='log'),
sherpa.Continuous(name='mean_degree', range=[0.1, 5]),
]
augmentedprms = [
sherpa.Continuous(name='window', range=[10*DT[SYSTEM], 1000*DT[SYSTEM]]),
sherpa.Ordinal(name='overlap', range=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95])
#Alternative:
#sherpa.Continuous(name='overlap', range=[0.0, 0.95])
]
roboprms = [
sherpa.Continuous(name='delta', range=[0.01, 5.0]),
]
if METHOD == "augmented":
parameters += augmentedprms
param_names += METHOD_PRMS
if SYSTEM == "softrobot":
parameters += roboprms
param_names += ROBO_OPT_PRMS
#Load robot data
BIG_ROBO_DATA_LOADED = load_robo(BIG_ROBO_DATA)
SMALL_ROBO_DATA_LOADED = load_robo(SMALL_ROBO_DATA)
if PARALLEL:
dview.push({'BIG_ROBO_DATA_LOADED':BIG_ROBO_DATA_LOADED,'SMALL_ROBO_DATA_LOADED':SMALL_ROBO_DATA_LOADED})
# Bayesian hyper parameter optimization
priorprms = loadprior(SYSTEM, param_names)
algorithm = sherpa.algorithms.GPyOpt(max_num_trials=OPT_NTRIALS, initial_data_points=priorprms)
disable_dashboard = (sys.platform in ['cygwin', 'win32']) or ("--dashboard" not in options)
study = sherpa.Study(parameters=parameters,
algorithm=algorithm,
disable_dashboard=disable_dashboard,
lower_is_better=False)
for trial in study:
try:
exp_vpt = mean_vpt(*EXPERIMENT, **build_params(trial.parameters, combine=True))
except Exception as e:
print("Error encountered.")
print("Current experiment:", SYSTEM, MAP_INITIAL, PREDICTION_TYPE, METHOD)
print("Parameter set:", build_params(trial.parameters, combine=True))
raise e
study.add_observation(trial=trial,
objective=exp_vpt)
study.finalize(trial)
study.save(results_directory) # Need separate directories for each method etc
### Choose the best hyper parameters
optimized_hyperprms = study.get_best_result()
# Trim to only have the actual parameters
optimized_hyperprms = {key:optimized_hyperprms[key] for key in param_names}
print("Optimization ran successfully")
### Test the training method
results = {name:[] for name in ["continue", "random", "cont_deriv_fit", "rand_deriv_fit", "lyapunov"]}
results["experiment"] = (SYSTEM, MAP_INITIAL, PREDICTION_TYPE, METHOD)
results["opt_parameters"] = optimized_hyperprms
results["is_test"] = ("--test" in options.keys())
if PARALLEL:
#Run test_all() in parallel
loop_ct = int(np.ceil(NSAVED_ORBITS / node_count))
test_results = dview.apply_sync(lambda s,p,c:[test_all(s,p) for _ in range(c)], SYSTEM, optimized_hyperprms, loop_ct)
#Collect the results
for rlist in test_results:
for cont_vpt, rand_vpt, lyap, cont_df, rand_df in rlist:
results["continue"].append(cont_vpt)
results["random"].append(rand_vpt)
results["lyapunov"].append(lyap)
if SYSTEM != 'softrobot':
results["cont_deriv_fit"].append(cont_df)
results["rand_deriv_fit"].append(rand_df)
else:
for k in range(NSAVED_ORBITS):
cont_vpt, rand_vpt, lyap, cont_df, rand_df = test_all(optimized_hyperprms)
results["continue"].append(cont_vpt)
results["random"].append(rand_vpt)
results["lyapunov"].append(lyap)
if SYSTEM != 'softrobot':
results["cont_deriv_fit"].append(cont_df)
results["rand_deriv_fit"].append(rand_df)
# Save results dictionary with a unique name.
results_filename = "-".join((SYSTEM, MAP_INITIAL, PREDICTION_TYPE, METHOD, TIMESTAMP)) + ".pkl"
if "--test" in options.keys():
results_filename = "TEST-" + results_filename
with open(results_directory + "/" + results_filename, 'wb') as file:
pkl.dump(results, file)
print("Testing ran successfully")
print(f"Results written to {results_directory}/{results_filename}.")
| StarcoderdataPython |
9738677 | <reponame>Galland/kivy
'''
Widget animation
================
This example demonstrates creating and applying a multi-part animation to
a button widget. You should see a button labelled 'plop' that will move with
an animation when clicked.
'''
import kivy
kivy.require('1.0.7')
from kivy.animation import Animation
from kivy.app import App
from kivy.uix.button import Button
class TestApp(App):
def animate(self, instance):
# create an animation object. This object could be stored
# and reused each call or reused across different widgets.
# += is a sequential step, while &= is in parallel
animation = Animation(pos=(100, 100), t='out_bounce')
animation += Animation(pos=(200, 100), t='out_bounce')
animation &= Animation(size=(500, 500))
animation += Animation(size=(100, 50))
# apply the animation on the button, passed in the "instance" argument
# Notice that default 'click' animation (changing the button
# color while the mouse is down) is unchanged.
animation.start(instance)
def build(self):
# create a button, and attach animate() method as a on_press handler
button = Button(size_hint=(None, None), text='plop',
on_press=self.animate)
return button
if __name__ == '__main__':
TestApp().run()
| StarcoderdataPython |
11352153 | <reponame>Funtimes-Smarts/Python-import-Smart<gh_stars>0
# pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101
# Smartsheet Python SDK.
#
# Copyright 2017 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import json
import six
DATE = 1
DATETIME = 2
ABSTRACT_DATETIME = 3
CONTACT = 4
DURATION = 5
PREDECESSOR_LIST = 6
NUMBER = 7
BOOLEAN = 8
STRING = 9
OBJECT_VALUE = {
'object_type': [
'DATE',
'DATETIME',
'ABSTRACT_DATETIME',
'CONTACT',
'DURATION',
'PREDECESSOR_LIST']}
_typeToName = {
DATE: 'DATE',
DATETIME: 'DATETIME',
ABSTRACT_DATETIME: 'ABSTRACT_DATETIME',
CONTACT: 'CONTACT',
DURATION: 'DURATION',
PREDECESSOR_LIST: 'PREDECESSOR_LIST',
}
_nameToType = {
'DATE': DATE,
'DATETIME': DATETIME,
'ABSTRACT_DATETIME': ABSTRACT_DATETIME,
'CONTACT': CONTACT,
'DURATION': DURATION,
'PREDECESSOR_LIST': PREDECESSOR_LIST,
}
def enum_object_value_type(object_type=None):
return _nameToType.get(object_type)
class ObjectValue(object):
"""Smartsheet ObjectValue data model."""
def __init__(self, props=None, base_obj=None):
"""Initialize the ObjectValue model."""
self._base = None
if base_obj is not None:
self._base = base_obj
self._object_type = None
if props:
# account for alternate variable names from raw API response
if 'object_type' in props:
self.object_type = props['object_type']
if 'objectType' in props:
self.object_type = props['objectType']
self.__initialized = True
@property
def object_type(self):
return self._object_type
@object_type.setter
def object_type(self, value):
if isinstance(value, six.string_types):
self._object_type = _nameToType.get(value)
else:
self._object_type = value
def to_dict(self, op_id=None, method=None):
obj = {
'objectType': _typeToName.get(self._object_type)}
return obj
def to_json(self):
return json.dumps(self.to_dict(), indent=2)
def __str__(self):
return json.dumps(self.to_dict()) | StarcoderdataPython |
10285 | <reponame>jlsmirandela/Curso_Python
print('-+-' *10)
print(' <NAME> PA')
print('+-+' * 10)
c = 1
ter = int(input('Insira o primeiro termo - '))
rz = int(input('Insira a razão - '))
while c <= 10:
print(ter, ' → ', end=' ')
ter += rz
c += 1
print('FIM')
| StarcoderdataPython |
6438906 | """Testing file path evaluation when using import_tasks / include_tasks."""
import textwrap
from pathlib import Path
from typing import Dict
import pytest
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
LAYOUT_IMPORTS: Dict[str, str] = {
"main.yml": textwrap.dedent(
"""\
---
- hosts: target
gather_facts: false
tasks:
- name: from main import task 1
import_tasks: tasks/task_1.yml
"""
),
"tasks/task_1.yml": textwrap.dedent(
"""\
---
- name: from task 1 import task 2
import_tasks: tasks/task_2.yml
"""
),
"tasks/task_2.yml": textwrap.dedent(
"""\
---
- name: from task 2 import subtask 1
import_tasks: tasks/subtasks/subtask_1.yml
"""
),
"tasks/subtasks/subtask_1.yml": textwrap.dedent(
"""\
---
- name: from subtask 1 import subtask 2
import_tasks: tasks/subtasks/subtask_2.yml
"""
),
"tasks/subtasks/subtask_2.yml": textwrap.dedent(
"""\
---
- name: from subtask 2 do something
debug:
msg: |
Something...
"""
),
}
LAYOUT_INCLUDES: Dict[str, str] = {
"main.yml": textwrap.dedent(
"""\
---
- hosts: target
gather_facts: false
tasks:
- name: from main import task 1
include_tasks: tasks/task_1.yml
"""
),
"tasks/task_1.yml": textwrap.dedent(
"""\
---
- name: from task 1 import task 2
include_tasks: tasks/task_2.yml
"""
),
"tasks/task_2.yml": textwrap.dedent(
"""\
---
- name: from task 2 import subtask 1
include_tasks: tasks/subtasks/subtask_1.yml
"""
),
"tasks/subtasks/subtask_1.yml": textwrap.dedent(
"""\
---
- name: from subtask 1 import subtask 2
include_tasks: tasks/subtasks/subtask_2.yml
"""
),
"tasks/subtasks/subtask_2.yml": textwrap.dedent(
"""\
---
- name: from subtask 2 do something
debug:
msg: |
Something...
"""
),
}
@pytest.mark.parametrize(
"ansible_project_layout",
(
pytest.param(LAYOUT_IMPORTS, id="using only import_tasks"),
pytest.param(LAYOUT_INCLUDES, id="using only include_tasks"),
),
)
@pytest.mark.xfail(
reason="https://github.com/ansible-community/ansible-lint/issues/1446"
)
def test_file_path_evaluation(
tmp_path: Path,
default_rules_collection: RulesCollection,
ansible_project_layout: Dict[str, str],
) -> None:
"""Test file path evaluation when using import_tasks / include_tasks in the project.
Usage of import_tasks / include_tasks may introduce false positive load-failure due
to incorrect file path evaluation.
"""
for file_path, file_content in ansible_project_layout.items():
full_path = tmp_path / file_path
full_path.parent.mkdir(parents=True, exist_ok=True)
full_path.write_text(file_content)
result = Runner(str(tmp_path), rules=default_rules_collection).run()
assert not result
| StarcoderdataPython |
8108975 | <reponame>SuviVappula/tilavarauspalvelu-core<gh_stars>0
# Generated by Django 3.1.13 on 2021-10-11 07:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('permissions', '0010_unit_permissions'),
]
operations = [
migrations.AddField(
model_name='generalrolechoice',
name='verbose_name_en',
field=models.CharField(max_length=255, null=True, verbose_name='Verbose name'),
),
migrations.AddField(
model_name='generalrolechoice',
name='verbose_name_fi',
field=models.CharField(max_length=255, null=True, verbose_name='Verbose name'),
),
migrations.AddField(
model_name='generalrolechoice',
name='verbose_name_sv',
field=models.CharField(max_length=255, null=True, verbose_name='Verbose name'),
),
migrations.AddField(
model_name='servicesectorrolechoice',
name='verbose_name_en',
field=models.CharField(max_length=255, null=True, verbose_name='Verbose name'),
),
migrations.AddField(
model_name='servicesectorrolechoice',
name='verbose_name_fi',
field=models.CharField(max_length=255, null=True, verbose_name='Verbose name'),
),
migrations.AddField(
model_name='servicesectorrolechoice',
name='verbose_name_sv',
field=models.CharField(max_length=255, null=True, verbose_name='Verbose name'),
),
migrations.AddField(
model_name='unitrolechoice',
name='verbose_name_en',
field=models.CharField(max_length=255, null=True, verbose_name='Verbose name'),
),
migrations.AddField(
model_name='unitrolechoice',
name='verbose_name_fi',
field=models.CharField(max_length=255, null=True, verbose_name='Verbose name'),
),
migrations.AddField(
model_name='unitrolechoice',
name='verbose_name_sv',
field=models.CharField(max_length=255, null=True, verbose_name='Verbose name'),
),
]
| StarcoderdataPython |
6515920 | import os.path
import sys
import math
infile = sys.argv[1]
platform = sys.argv[2]
outfile = sys.argv[3]
trackKey = ""
def get_inputs(infile):
if os.path.isfile(infile):
return [infile]
return [os.path.join(infile, x) for x in os.listdir(infile)]
delayMemory = {}
def main():
global trackKey
global delayMemory
with open(outfile, 'w') as out:
inputs = get_inputs(infile)
for inputfile in inputs:
data = {}
delayMemory = {}
trackKey = os.path.split(inputfile)[-1][:2]
print(f"reading {inputfile}")
with open(inputfile, 'r') as f:
lines = [x.strip() for x in f.readlines()]
section = None
for line in lines:
line = line.strip()
if not len(line) or line.startswith(';'):
continue
if ':' in line:
section = line.replace(':', '').strip()
continue
isSection = section == 'sections'
isDef = section != 'track'
if line.startswith('_'):
parts = [x for x in line.split(' ') if len(x)]
if not data.get(section, None):
data[section] = {} if isDef else []
if isDef:
payload = parts[1:]
if isSection:
payload = ' '.join(parts[1:]).split(',')
payload = [x.split(' ') for x in payload]
data[section][parts[0]] = payload
else:
data[section].append(parts)
if platform == 'nes':
output_nes(data, out)
NES_CPU_CLK = {
'PAL': 1662607,
'NTSC': 1789773
}
TUNING = 440
def midi_freq(midinote):
return 2**((midinote - 69) / 12) * TUNING
def midi_to_tval(midinote, triangle=False, standard='NTSC'):
coeff = 32 if triangle else 16
return round(NES_CPU_CLK[standard]/(coeff*midi_freq(midinote))-1)
REF_CODE = 0xf0
END_CODE = 0xff
SECTION_SPLIT = 0xfe
TRACK_TYPE = 0xfc
TEMPO_COMMAND = 0xfb
NOISE = 0x1
SQUARE = 0x2
TRIANGLE = 0x3
PERC = 0x4
SQUARE_2 = 0x5
DATA_LENS = [0, 3, 4, 3, 1, 4]
def to_hex(x):
return x if type(x) == str else f'${x:02x}'
def get_track_type(s):
if 'bass' in s:
return TRIANGLE
elif 'bd' in s or 'dr' in s or 'clap' in s:
return PERC
elif 'string' in s:
return SQUARE_2
return SQUARE
def get_delay_key(k, i):
return f'{k}{i}'
def get_vd(volume, constVolFlag, haltFlag, dutyCycle):
return volume | (constVolFlag << 4) | (haltFlag << 5) | (dutyCycle << 6)
def get_hi(tval, notelen):
return ((tval >> 8) & 0xff) | notelen << 3
def get_divisor(k):
divisor = 16
if '/' in k:
divParts = k.split('/')
divisor = int(divParts[1],10)
k = divParts[0]
return (k,divisor)
volumeScale = 1.0
latest = {}
def handle_item(dout, k, value, arr, i):
global volumeScale
global delayMemory
isTri = get_track_type(k) == TRIANGLE
isSqu = get_track_type(k) == SQUARE or get_track_type(k) == SQUARE_2
isNoise = get_track_type(k) == NOISE
isString = 'string' in k
addDelay = (isString or 'lead' in k) and not isTri
if value.startswith('v'):
volumeScale = float(value.split('v')[-1])
return
if value == '.':
dn = delayMemory.get(get_delay_key(k, i), None)
if addDelay and dn and ((isString and dn['note'] == latest[k]) or (not isString)):
if dn['type'] == SQUARE or dn['type'] == SQUARE_2:
dout += [dn['vd'], dn['swp'], dn['lo'], dn['hi']]
else:
dout += [dn['lc'], dn['lo'], dn['hi']]
else:
dout += ([0] * DATA_LENS[get_track_type(k)])
elif '_' in value:
parts = value.split('*')
ref = trackKey + parts[0][1:]
times = 1 if len(parts) == 1 else int(parts[1], 10)
dout += [REF_CODE, f'<{ref}', f'>{ref}', times]
else:
tval = 0
intValue = 0
try:
intValue = int(value, 10)
tval = midi_to_tval(intValue, isTri)
latest[k] = intValue
except ValueError:
print('skipping ' + value)
lo = tval & 0xff
notelen = 0b10000 # TODO calc proper length
hi = get_hi(tval, notelen)
lc = 0
swp = 0
dutyCycle = 0x2
haltFlag = 0
constVolFlag = 0x1
volume = 0xf
if isSqu:
if isString:
dutyCycle = 0x1
constVolFlag = 1 if volumeScale < 1.0 else 0
vd = get_vd(round(volume * volumeScale), constVolFlag, haltFlag, dutyCycle)
if isString:
hi = get_hi(tval, 0b00001)
dout += [vd, swp, lo, hi]
elif isTri:
lc = 0x7f
dout += [lc, lo, hi]
elif isNoise:
lc = 0x1a
lo = 0x5
hi = 0
dout += [lc, lo, hi]
else: # PERC
hi = 0
if 's' in value:
hi = 0b10000000
value = value[:-1]
hi = hi | int(value, 10)
dout += [hi]
if addDelay:
volumes = [0x6, 0x4, 0x3] if isString else [0x4, 0x2]
delays = [4, 8, 12] if isString else [3, 4]
(_,divisor) = get_divisor(k)
delays = [int(x * (divisor / 16)) for x in delays]
for j, delayTicks in enumerate(delays):
# if isString:
# dutyCycle += 1
# if dutyCycle == 4:
# dutyCycle = 0
constVolFlag = 0x1
vd = get_vd(math.ceil(volumes[j]), constVolFlag, haltFlag, dutyCycle)
lc = 0x14
dIndex = get_delay_key(k, i+delayTicks)
if not delayMemory.get(dIndex, None):
delayMemory[dIndex] = {
'type': get_track_type(k),
'vd': vd,
'lo': lo,
'hi': hi,
'lc': lc,
'swp': swp,
'note': intValue
}
def explode_repeats(v):
out = []
for x in v:
if type(x) == str and x.startswith('.*'):
out += (['.'] * int(x[2:], 10))
else:
out.append(x)
return out
def output_section(out, k, v, isClip=False, prefixData=None):
global volumeScale
(cleanKey, _) = get_divisor(k)
out.append(f"{trackKey + cleanKey.replace('_','')}:")
v = explode_repeats(v)
dout = []
if prefixData:
dout += prefixData
volumeScale = 1.0
for j, value in enumerate(v):
if type(value) == list:
volumeScale = 1.0
for i, x in enumerate(value):
handle_item(dout, k, x, value, i)
dout += [SECTION_SPLIT]
else:
handle_item(dout, k, value, v, j)
dout += [END_CODE]
if isClip:
(_,divisor) = get_divisor(k)
dout = [TRACK_TYPE, (divisor << 3) | get_track_type(k)] + dout
out.append(f"\tdb {','.join([to_hex(x) for x in dout])}")
TEMPO_COEFF_NTSC = 3200
def output_nes(data, f):
out = []
for k, v in data['clips'].items():
output_section(out, k, v, True)
for k, v in data['sections'].items():
output_section(out, k, v)
for v in data['track']:
output_section(out, 'track', v, False,
[f"${TEMPO_COMMAND:02x}", f"${round(1/int(data['meta']['_tempo'][0], 10)*TEMPO_COEFF_NTSC):02x}"])
f.writelines([x + '\n' for x in out])
if __name__ == "__main__":
main()
| StarcoderdataPython |
9633358 | <gh_stars>0
""" Advent of code 2017 day 3/1 """
import unittest
from code import solution, start_point, count_y, side_pos
class MyTest(unittest.TestCase):
"""Unist tests for actual day"""
def test_start(self):
""" Test start point """
self.assertEqual(start_point(52), (50, 5))
self.assertEqual(start_point(61), (50, 5))
self.assertEqual(start_point(31), (26, 4))
self.assertEqual(start_point(47), (26, 4))
self.assertEqual(start_point(19), (10, 3))
self.assertEqual(start_point(3), (2, 2))
self.assertEqual(start_point(1), (1, 1))
def test_y(self):
""" Test y value """
self.assertEquals(count_y(1, 4), 1)
self.assertEquals(count_y(0, 4), 2)
self.assertEquals(count_y(4, 4), 2)
self.assertEquals(count_y(4, 5), 1)
self.assertEquals(count_y(0, 5), 3)
self.assertEquals(count_y(1, 2), 1)
self.assertEquals(count_y(3, 3), 2)
self.assertEquals(count_y(2, 3), 1)
self.assertEquals(count_y(1, 3), 0)
self.assertEquals(count_y(0, 3), 1)
def test_side(self):
""" Test side position """
self.assertEquals(side_pos(4, 2, 2), 0)
self.assertEquals(side_pos(21, 10, 3), 3)
self.assertEquals(side_pos(45, 26, 4), 1)
self.assertEquals(side_pos(69, 50, 5), 3)
self.assertEquals(side_pos(65, 50, 5), 7)
self.assertEquals(side_pos(50, 50, 5), 0)
self.assertEquals(side_pos(21, 10, 3), 3)
def test_match(self):
""" The basic test cases """
self.assertEqual(solution(12), 3)
self.assertEqual(solution(23), 2)
self.assertEqual(solution(1024), 31)
self.assertEqual(solution(1), 0)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1746516 | <filename>tests/test_api.py<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `mlbench_core.api` package."""
import datetime
import pytest
from mlbench_core.api import ApiClient
@pytest.fixture
def kubernetes_api_client_node_port(mocker):
mock_client = mocker.patch("kubernetes.client.CoreV1Api")
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.type = (
"NodePort"
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__len__.return_value = (
1
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.ports.__iter__.return_value = [
mocker.MagicMock(node_port=12345, port=80)
]
mock_client.return_value.list_namespaced_pod.return_value.items.__len__.return_value = (
1
)
mock_client.return_value.read_node.return_value.status.addresses.__len__.return_value = (
1
)
mock_client.return_value.read_node.return_value.status.addresses.__iter__.return_value = [
mocker.MagicMock(type="ExternalIP", address="1.1.1.1")
]
return mock_client
@pytest.fixture
def kubernetes_api_client_node_port_internal(mocker):
mock_client = mocker.patch("kubernetes.client.CoreV1Api")
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.type = (
"NodePort"
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__len__.return_value = (
1
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.ports.__iter__.return_value = [
mocker.MagicMock(node_port=12345, port=80)
]
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.ports.__len__.return_value = (
1
)
mock_client.return_value.list_namespaced_pod.return_value.items.__len__.return_value = (
1
)
mock_client.return_value.read_node.return_value.status.addresses.__len__.return_value = (
1
)
mock_client.return_value.read_node.return_value.status.addresses.__iter__.return_value = [
mocker.MagicMock(type="InternalIP", address="1.1.1.1")
]
return mock_client
@pytest.fixture
def kubernetes_api_client_clusterip(mocker):
mock_client = mocker.patch("kubernetes.client.CoreV1Api")
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.type = (
"ClusterIP"
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__len__.return_value = (
1
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.cluster_ip = (
"1.1.1.1"
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.ports.__getitem__.return_value.port = (
12345
)
return mock_client
@pytest.fixture
def kubernetes_api_client_loadbalancer(mocker):
mock_client = mocker.patch("kubernetes.client.CoreV1Api")
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.type = (
"LoadBalancer"
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__len__.return_value = (
1
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.spec.ports.__getitem__.return_value.port = (
12345
)
mock_client.return_value.list_service_for_all_namespaces.return_value.items.__getitem__.return_value.status.load_balancer.ingress.ip = (
"1.1.1.1"
)
return mock_client
@pytest.fixture
def kubernetes_api_client_incluster(mocker):
mock_client = mocker.patch("kubernetes.client.CoreV1Api")
mock_client.return_value.list_namespaced_pod.return_value.items.__len__.return_value = (
1
)
mock_client.return_value.list_namespaced_pod.return_value.items.__getitem__.return_value.status.pod_ip = (
"1.1.1.1"
)
return mock_client
def test_instantiation(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
with ApiClient(in_cluster=False) as client:
assert client is not None
assert client.endpoint == "http://1.1.1.1:12345/api/"
def test_instantiation_nodeport_internal(
mocker, kubernetes_api_client_node_port_internal
):
mocker.patch("kubernetes.config.load_kube_config")
client = ApiClient(in_cluster=False)
assert client is not None
assert client.endpoint == "http://1.1.1.1:12345/api/"
def test_instantiation_url():
client = ApiClient(url="1.1.1.1:12345")
assert client is not None
assert client.endpoint == "http://1.1.1.1:12345/api/"
def test_instantiation_incluster(mocker, kubernetes_api_client_incluster):
mocker.patch("kubernetes.config.load_incluster_config")
client = ApiClient(in_cluster=True)
assert client is not None
assert client.endpoint == "http://1.1.1.1:80/api/"
def test_instantiation_clusterip(mocker, kubernetes_api_client_clusterip):
mocker.patch("kubernetes.config.load_kube_config")
client = ApiClient(in_cluster=False)
assert client is not None
assert client.endpoint == "http://1.1.1.1:12345/api/"
def test_instantiation_loadbalancer(mocker, kubernetes_api_client_loadbalancer):
mocker.patch("kubernetes.config.load_kube_config")
client = ApiClient(in_cluster=False)
assert client is not None
assert client.endpoint == "http://1.1.1.1:12345/api/"
def test_get_all_metrics(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
rg = mocker.patch("concurrent.futures.ThreadPoolExecutor")
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.get_all_metrics()
assert result is not None
assert result.result().json() == "a"
def test_get_run_metrics(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
rg = mocker.patch("concurrent.futures.ThreadPoolExecutor")
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.get_run_metrics("1", since=datetime.datetime.now(), summarize=100)
assert result is not None
assert result.result().json() == "a"
def test_get_pod_metrics(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
rg = mocker.patch("concurrent.futures.ThreadPoolExecutor")
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.get_pod_metrics(
"rel-mlbench-worker-0", since=datetime.datetime.now(), summarize=100
)
assert result is not None
assert result.result().json() == "a"
def test_post_metrics(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
rg = mocker.patch("concurrent.futures.ThreadPoolExecutor")
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.post_metric("1", "loss", 10.0, cumulative=False)
assert result is not None
assert result.result().json() == "a"
def test_get_runs(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
rg = mocker.patch("concurrent.futures.ThreadPoolExecutor")
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.get_runs()
assert result is not None
assert result.result().json() == "a"
def test_get_run(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
rg = mocker.patch("concurrent.futures.ThreadPoolExecutor")
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.get_run("1")
assert result is not None
assert result.result().json() == "a"
def test_create_run_official(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
rg = mocker.patch("concurrent.futures.ThreadPoolExecutor")
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.create_run(
"test_run",
5,
num_cpus=4.1,
max_bandwidth=10000,
image="PyTorch Cifar-10 ResNet-20",
)
assert result is not None
assert result.result().json() == "a"
def test_create_run_custom(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
rg = mocker.patch("concurrent.futures.ThreadPoolExecutor")
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.create_run(
"test_run",
5,
num_cpus=4.1,
max_bandwidth=10000,
custom_image_name="localhost:5000/mlbench_worker:latest",
custom_image_command="/.openmpi/bin/mpirun /app/main.py",
run_all_nodes=False,
)
assert result is not None
assert result.result().json() == "a"
def test_get_worker_pods(mocker, kubernetes_api_client_node_port):
mocker.patch("kubernetes.config.load_kube_config")
rg = mocker.patch("concurrent.futures.ThreadPoolExecutor")
rg.return_value.submit.return_value.result.return_value.json.return_value = "a"
client = ApiClient(in_cluster=False)
result = client.get_worker_pods()
assert result is not None
assert result.result().json() == "a"
| StarcoderdataPython |
9737516 | <filename>psana/psana/pyalgos/generic/NDArrGenerators.py<gh_stars>0
####!/usr/bin/env python
#------------------------------
"""
:py:class:`NDArrGenerators` wrapping methods for numpy random array generators
==============================================================================
Usage::
import psana.pyalgos.generic.NDArrGenerators as ag
# Methods
nda = ag.random_standard(shape=(40,60), mu=200, sigma=25, dtype=np.float)
nda = ag.random_exponential(shape=(40,60), a0=100, dtype=np.float)
nda = ag.random_one(shape=(40,60), dtype=np.float)
nda = ag.random_256(shape=(40,60), dtype=np.uint8)
nda = ag.random_xffffffff(shape=(40,60), dtype=np.uint32, add=0xff000000)
nda = ag.aranged_array(shape=(40,60), dtype=np.uint32)
ag.print_ndarr(nda, name='', first=0, last=5)
nda = ag.ring_intensity(r, r0, sigma)
ag.add_ring(arr2d, amp=100, row=4.3, col=5.8, rad=100, sigma=3)
peaks = ag.add_random_peaks(arr2d, npeaks=10, amean=100, arms=50, wmean=2, wrms=0.1)
See:
- :py:class:`graphics`
- :py:class:`NDArrUtils`
- :py:class:`NDArrGenerators`
- `numpy.random.rand <https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.random.rand.html>`_.
- `matplotlib <https://matplotlib.org/contents.html>`_.
This software was developed for the LCLS project.
If you use all or part of it, please give an appropriate acknowledgment.
Modified for LCLS2 on 2015-01-26 by <NAME>
"""
#-----------------------------
import numpy as np
import math
#from psana.pyalgos.generic.NDArrUtils import shape_as_2d, shape_as_3d, reshape_to_2d, reshape_to_3d
#from psana.pyalgos.generic.NDArrUtils import size_from_shape
#from psana.pyalgos.generic.NDArrUtils import print_ndarr
#-----------------------------
def set_random_state(seed=1234567) :
"""seed : {None, int, array_like} Can be any integer between 0 and 2**32 - 1"""
#return np.random.RandomState(seed)
np.random.seed(seed)
#-----------------------------
def random_standard(shape=(40,60), mu=200, sigma=25, dtype=float) :
"""Returns numpy array of requested shape and type filled with normal distribution for mu and sigma.
"""
a = mu + sigma*np.random.standard_normal(size=shape)
return a.astype(dtype)
#-----------------------------
def random_exponential(shape=(40,60), a0=100, dtype=float) :
"""Returns numpy array of requested shape and type filled with exponential distribution for width a0.
"""
a = a0*np.random.standard_exponential(size=shape)
return a.astype(dtype)
#-----------------------------
def random_one(shape=(40,60), dtype=float) :
"""Returns numpy array of requested shape and type filled with random numbers in the range [0,1].
"""
a = np.random.random(shape)
return a.astype(dtype)
#-----------------------------
random_1 = random_one
#-----------------------------
def random_256(shape=(40,60), dtype=np.uint8) :
"""Returns numpy array of requested shape and type filled with random numbers in the range [0,255].
"""
a = 255*np.random.random(shape)
return a.astype(dtype)
#-----------------------------
def random_xffffffff(shape=(40,60), dtype=np.uint32, add=0xff000000) :
"""Returns numpy array of requested shape and type
filled with random numbers in the range [0,0xffffff] with bits 0xff000000 for alpha mask.
"""
a = 0xffffff*np.random.random(shape) + add
return a.astype(dtype)
#------------------------------
def size_from_shape(shape) :
"""Returns size from the shape sequence
"""
size=1
for d in shape : size*=d
return size
#-----------------------------
def aranged_array(shape=(40,60), dtype=np.uint32) :
"""Returns numpy array of requested shape and type filling with ascending integer numbers.
"""
arr = np.arange(size_from_shape(shape), dtype=dtype)
arr.shape = shape
return arr
#-----------------------------
def ring_intensity(r, r0, sigma) :
"""returns numpy array with ring intensity distribution modulated by Gaussian(r-r0,sigma).
Parameters
----------
r : np.array - numpy array of radius (i.e. radios for each pixel)
r0 : float - radius of the ring
sigma : float - width of the ring
"""
factor = 1/ (math.sqrt(2) * sigma)
rr = factor*(r-r0)
return np.exp(-rr*rr)
#-----------------------------
def add_ring(arr2d, amp=100, row=4.3, col=5.8, rad=100, sigma=3) :
"""Adds peak Gaussian-shaped peak intensity to numpy array arr2d
Parameters
----------
arr2d : np.array - 2-d numpy array
amp : float - ring intensity
row : float - ring center row
col : float - ring center col
rad : float - ring mean radius
sigma : float - width of the peak
"""
nsigma = 5
rmin = max(int(math.floor(row - rad - nsigma*sigma)), 0)
cmin = max(int(math.floor(col - rad - nsigma*sigma)), 0)
rmax = min(int(math.floor(row + rad + nsigma*sigma)), arr2d.shape[0])
cmax = min(int(math.floor(col + rad + nsigma*sigma)), arr2d.shape[1])
r = np.arange(rmin, rmax, 1, dtype = np.float32) - row
c = np.arange(cmin, cmax, 1, dtype = np.float32) - col
CG, RG = np.meshgrid(c, r)
R = np.sqrt(RG*RG+CG*CG)
arr2d[rmin:rmax,cmin:cmax] += amp * ring_intensity(R, rad, sigma)
#-----------------------------
def add_random_peaks(arr2d, npeaks=10, amean=100, arms=50, wmean=2, wrms=0.1) :
"""Returns 2-d array with peaks.
"""
shape=arr2d.shape
rand_uni = random_1(shape=(2, npeaks))
r0 = rand_uni[0,:]*shape[0]
c0 = rand_uni[1,:]*shape[1]
rand_std = random_standard(shape=(4,npeaks), mu=0, sigma=1)
a0 = amean + arms*rand_std[0,:]
sigma = wmean + wrms*rand_std[0,:]
peaks = zip(r0, c0, a0, sigma)
for r0, c0, a0, sigma in peaks :
add_ring(arr2d, amp=a0, row=r0, col=c0, rad=0, sigma=sigma)
return peaks
#-----------------------------
def cspad2x1_arr(dtype=np.float32) :
"""returns test np.array for cspad 2x1 with linear variation of intensity from corner (0,0) to (rmax,cmax)"""
rows, cols = 185, 388
row2x1 = np.arange(cols)
col2x1 = np.arange(rows)
iY, iX = np.meshgrid(row2x1, col2x1)
arr2x1 = np.empty((rows,cols), dtype)
arr2x1[iX,iY] = iX+iY
return arr2x1
def cspad_ndarr(n2x1=32, dtype=np.float32) :
"""returns test np.array for cspad with linear variation of intensity in 2x1s"""
arr2x1 = cspad2x1_arr(dtype)
rows, cols = arr2x1.shape
arr = np.vstack([arr2x1 for seg in range(n2x1)])
arr.shape = [n2x1, rows, cols]
return arr
#-----------------------------
#-----------------------------
#-----------------------------
#-----------------------------
#-----------------------------
if __name__ == '__main__':
from psana.pyalgos.generic.NDArrUtils import print_ndarr
set_random_state()
print_ndarr(random_exponential(), 'random_exponential')
print_ndarr(random_standard(), 'random_standard')
print_ndarr(random_1(), 'random_1', last=10)
print_ndarr(random_256(), 'random_256', last=10)
print_ndarr(random_xffffffff(), 'random_xffffffff')
print_ndarr(random_standard(), 'random_standard')
print_ndarr(aranged_array(), 'aranged_array')
#print_ndarr(, '')
print('Test is completed')
#-----------------------------
| StarcoderdataPython |
1926401 | <reponame>dreamergz/gltf-to-3d-tiles
from .gltf_to_tileset import gltf_to_tileset
| StarcoderdataPython |
6552359 | from . import FaceDetector
from . import DataProvider
from . import ImageProvider
| StarcoderdataPython |
1860471 | <reponame>antho214/RayTracing
from .matrix import *
from .ray import *
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as mpath
import matplotlib.transforms as transforms
class MatrixGroup(Matrix):
"""MatrixGroup: A group of Matrix(), allowing
the combination of several elements to be treated as a
whole, or treated explicitly as a sequence when needed.
"""
def __init__(self, elements=None, label=""):
super(MatrixGroup, self).__init__(1,0,0,1,label=label)
self.elements = []
if elements is not None:
for element in elements:
self.append(element)
# Solely for performance reason: it is common to raytrace
# groups of rays that are similar (to mimick intensities)
# We keep the last ray and the last ray trace for optimization
self._lastRayToBeTraced = None
self._lastRayTrace = None
def append(self, matrix):
""" Add an element at the end of the path """
lastElement = None
if len(self.elements) != 0:
lastElement = self.elements[-1]
if lastElement.backIndex != matrix.frontIndex:
print("Mismatch of indices between element {0} and appended {1}".format(lastElement, matrix))
self.elements.append(matrix)
transferMatrix = self.transferMatrix()
self.A = transferMatrix.A
self.B = transferMatrix.B
self.C = transferMatrix.C
self.D = transferMatrix.D
self.L = transferMatrix.L
self.frontVertex = transferMatrix.frontVertex
self.backVertex = transferMatrix.backVertex
def ImagingPath(self):
return ImagingPath(elements=self.elements, label=self.label)
def LaserPath(self):
return LaserPath(elements=self.elements, label=self.label)
def transferMatrix(self, upTo=float('+Inf')):
""" The transfer matrix between front edge and distance=upTo
If "upTo" falls inside an element of finite length, then
it will request from that element a "partial" transfer matrix
for a fraction of the length. It is up to the Matrix() or
MatrixGroup() to define such partial transfer matrix when possible.
Quite simply, Space() defines a partial matrix as Space(d=upTo).
When using this transfer matrix, any information related to rays
that have been blocked is lost: apertures are not part of the
ray formalism. To find out if a ray has been blocked, you must
use trace().
"""
transferMatrix = Matrix(A=1, B=0, C=0, D=1)
distance = upTo
for element in self.elements:
if element.L <= distance:
transferMatrix = element * transferMatrix
distance -= element.L
else:
transferMatrix = element.transferMatrix(upTo=distance) * transferMatrix
break
return transferMatrix
def transferMatrices(self):
""" The list of Matrix() that corresponds to the propagation through
this element (or group). For a Matrix(), it simply returns a list
with a single element [self].
For a MatrixGroup(), it returns the transferMatrices for
each individual element and appends them to a list for this group."""
transferMatrices = []
for element in self.elements:
elementTransferMatrices = element.transferMatrices()
transferMatrices.extend(elementTransferMatrices)
return transferMatrices
def trace(self, inputRay):
"""Trace the input ray from first element until after the last element,
indicating if the ray was blocked or not
Returns a ray trace (i.e. [Ray()]) starting with inputRay, followed by
the ray after each element. If an element is composed of sub-elements,
the ray will also be traced in several steps. If any element blocks the
ray, it will be indicated.
"""
ray = inputRay
if ray != self._lastRayToBeTraced:
rayTrace = [ray]
for element in self.elements:
rayTraceInElement = element.trace(ray)
rayTrace.extend(rayTraceInElement)
ray = rayTraceInElement[-1] # last
self._lastRayToBeTraced = inputRay
self._lastRayTrace = rayTrace
else:
rayTrace = self._lastRayTrace
return rayTrace
def hasFiniteApertureDiameter(self):
""" True if ImagingPath has at least one element of finite diameter """
for element in self.elements:
if element.hasFiniteApertureDiameter():
return True
return False
def largestDiameter(self):
""" Largest finite diameter in all elements """
maxDiameter = 0.0
if self.hasFiniteApertureDiameter():
for element in self.elements:
diameter = element.largestDiameter()
if diameter != float('+Inf') and diameter > maxDiameter:
maxDiameter = diameter
else:
maxDiameter = self.elements[0].displayHalfHeight() * 2
return maxDiameter
def flipOrientation(self):
""" Flip the orientation (forward-backward) of this group of elements.
Each element is also flipped individually. """
allElements = self.elements
allElements.reverse()
self.elements = []
for element in allElements:
element.flipOrientation()
self.append(element)
return self
def drawAt(self, z, axes, showLabels=True):
""" Draw each element of this group """
for element in self.elements:
element.drawAt(z, axes)
element.drawAperture(z, axes)
if showLabels:
element.drawLabels(z, axes)
z += element.L
def drawPointsOfInterest(self, z, axes):
"""
Labels of general points of interest are drawn below the
axis, at 25% of the largest diameter.
AS and FS are drawn at 110% of the largest diameter
"""
labels = {} # Gather labels at same z
zElement = 0
# For the group as a whole, then each element
for pointOfInterest in self.pointsOfInterest(z=zElement):
zStr = "{0:3.3f}".format(pointOfInterest['z'])
label = pointOfInterest['label']
if zStr in labels:
labels[zStr] = labels[zStr] + ", " + label
else:
labels[zStr] = label
# Points of interest for each element
for element in self.elements:
pointsOfInterest = element.pointsOfInterest(zElement)
for pointOfInterest in pointsOfInterest:
zStr = "{0:3.3f}".format(pointOfInterest['z'])
label = pointOfInterest['label']
if zStr in labels:
labels[zStr] = labels[zStr] + ", " + label
else:
labels[zStr] = label
zElement += element.L
halfHeight = self.largestDiameter()/2
for zStr, label in labels.items():
z = float(zStr)
axes.annotate(label, xy=(z, 0.0), xytext=(z, -halfHeight * 0.5),
xycoords='data', fontsize=12,
ha='center', va='bottom')
| StarcoderdataPython |
11210625 | # Importing modules
import argparse
import atexit
import happybase
import json
import logging
from kafka import KafkaConsumer
# Configuring logger
logger_format = '%(asctime)s - %(message)s'
logging.basicConfig(format = logger_format)
logger = logging.getLogger('data-storage-writer')
logger.setLevel(logging.DEBUG)
# Setting default parameters
topic_name = 'test'
kafka_broker = '127.0.0.1:9092'
data_table = 'test-table'
hbase_host = 'myhbase'
# Function to persist consumed message to hbase data table
def persist_data(data, hbase_connection, data_table):
"""
helper method to persist consumed message to hbase table
:param data: consumed data to be stored
:param hbase_connection: instance of hbase connection
:param data_table: hbase data table to store the data
:return: None
"""
try:
logger.debug('Starting to persist data to hbase: %s' % data)
parsed = json.loads(data)
symbol = parsed.get('Symbol')
price = float(parsed.get('LastTradePrice'))
timestamp = parsed.get('Timestamp')
table = hbase_connection.table(data_table)
row_key = "%s-%s" % (symbol, timestamp)
logger.info('Storing values with row key %s' % row_key)
table.put(row_key, {
'family:symbol': str(symbol),
'family:timestamp': str(timestamp),
'family:price': str(price)
})
logger.info('Persisted data to hbase for symbol: %s, price: %f, timestamp: %s' %
(symbol, price, timestamp)
)
except Exception as e:
logger.error('Failed to persist data to hbase for %s' % str(e))
# Function to set up shutdown hook called before shutdown
def shutdown_hook(consumer, connection):
"""
a shutdown hook to be called before the shutdown
:param consumer: instance of a kafka consumer
:param connection: instance of a hbase connection
:return: None
"""
try:
logger.info('Closing Kafka consumer')
consumer.close()
logger.info('Kafka consumer closed')
logger.info('Closing Hbase connection')
connection.close()
logger.info('Hbase connection closed')
except Exception as e:
logger.warn('Failed to close consumer/connection, caused by: %s' % str(e))
finally:
logger.info('Exiting program')
# 'main method'
if __name__ == '__main__':
# Setting up comman line arguments
parser = argparse.ArgumentParser()
parser.add_argument('topic_name', help = 'the kafka topic to push to')
parser.add_argument('kafka_broker', help = 'the location of the kafka broker')
parser.add_argument('data_table', help = 'the data table to use')
parser.add_argument('hbase_host', help = 'the host name of hbase')
# Parsing user input arguments
args = parser.parse_args()
topic_name = args.topic_name
kafka_broker = args.kafka_broker
data_table = args.data_table
hbase_host = args.hbase_host
# Initiating a simple kafka consumer
consumer = KafkaConsumer(topic_name, bootstrap_servers = kafka_broker)
# Initiating a hbase connection
hbase_connection = happybase.Connection(hbase_host)
# Creating the table if not already exists
if data_table not in hbase_connection.tables():
hbase_connection.create_table(
data_table,
{ 'family': dict() }
)
# Setting up proper shutdown hook
atexit.register(shutdown_hook, consumer, hbase_connection)
# Storing consumed message to hbase data table
for msg in consumer:
persist_data(msg.value, hbase_connection, data_table)
| StarcoderdataPython |
3357378 | from pathlib import Path
from argparse import Namespace
import numpy as np
import torch
from torch.utils.data import DataLoader
import mediapipe as mp
from model.estimator_2d import Estimator2D
from data.video_dataset import VideoDataset
from data.skeleton_helper import mediapipe2openpose, mediapipe2coco
from data.data_utils import suggest_metadata
class MediaPipe_Estimator2D(Estimator2D):
"""2D human pose estimator using MediaPipe
Methods
-------
estimate(video)
estimate the 2D pose coordinates in the given video file
"""
BATCH_SIZE = 64
def __init__(self, out_format='mediapipe', model_complexity=1, return_3D=False):
"""
Parameters
----------
out_format : str
output pose topology used; can be 'mediapipe', 'coco' or 'openpose'
model_complexity : int , optional
complexity of the used MediaPipe Pose model (0-2) (default=1)
return_3D : bool , optiona
return estimated keypoints directly as 3D using the depth estimate
from MediaPipe Pose (default=False)
Raises
------
ValueError
If an unknown pose output_format is specified
"""
if out_format not in ['mediapipe', 'coco', 'openpose']
raise ValueError('Unknown pose topolgy')
self.out_format = out_format
self.mp_pose = mp.solutions.pose
self.model_complexity = model_complexity
def _image_coordinates(self, X, w, h):
"""Reverse camera frame normalization"""
assert X.shape[-1] == 2
return X * [w, h]
def estimate(self, video):
"""Estimate the 2D pose coordinates in the given video file
Parameter
---------
video : Video
Video file packed into the data.Video class for convenience
Returns
-------
dict
2D coordinates like {'video': {'custom': [np.ndarray]}}
dict
metadata as used in VideoPose3D
"""
with self.mp_pose.Pose(
static_image_mode=False,
#model_complexity=self.model_complexity,
smooth_landmarks=True,) as pose:
pose_2d = []
for frame in video:
result = pose.process(frame)
if result.pose_landmarks is not None:
pose_2d.append([[p.x, p.y] for p in result.pose_landmarks.landmark])
else:
pose_2d.append([[0, 0] for _ in range(33)])
pose_2d = np.vstack(pose_2d).reshape(-1, 33, 2)
if self.out_format == 'coco':
pose_2d = mediapipe2coco(pose_2d)
elif self.out_format == 'openpose':
pose_2d = mediapipe2openpose(pose_2d)
pose_2d = self._image_coordinates(pose_2d, *video.size)
# create VideoPose3D-compatible metadata and keypoint structure
metadata = suggest_metadata(self.out_format)
video_name = 'video'
video_meta = {'w': video.size[0], 'h': video.size[1], 'fps': video.fps}
metadata['video_metadata'] = {video_name: video_meta}
keypoints = {video_name: {'custom': [pose_2d]}}
return keypoints, metadata
| StarcoderdataPython |
6623944 | from __future__ import unicode_literals
__version__ = "6.27.18"
| StarcoderdataPython |
11347369 | ## https://medium.com/swlh/real-time-object-detection-deployment-using-tensorflow-keras-and-aws-ec2-instance-1c1937c001d9
import sys, os
import io
from fastapi import FastAPI, UploadFile, File
from starlette.requests import Request
from pydantic import BaseModel
import cv2
import cvlib as cv
from cvlib.object_detection import draw_bbox
import numpy as np
app = FastAPI()
class ImageType(BaseModel):
url: str
@app.get("/")
def home():
return "Home"
@app.post("/predict/")
def prediction(request: Request,
file: bytes = File(...)):
if request.method == "POST":
image_stream = io.BytesIO(file)
image_stream.seek(0)
file_bytes = np.asarray(bytearray(image_stream.read()), dtype=np.uint8)
frame = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
bbox, label, conf = cv.detect_common_objects(frame)
output_image = draw_bbox(frame, bbox, label, conf)
num_cars = label.count('car')
print('Number of cars in the image is '+ str(num_cars))
return {"num_cars":num_cars}
return "No post request found"
## gunicorn -w 4 -k uvicorn.workers.UvicornWorker myApp:app
| StarcoderdataPython |
6557852 | <filename>pikuli/uia/control_wrappers/list.py<gh_stars>0
# -*- coding: utf-8 -*-
from .uia_control import UIAControl
class List(UIAControl):
''' Некий список из ListItem'ов. '''
CONTROL_TYPE = 'List'
def list_items(self):
return self.find_all(ControlType='ListItem', exact_level=1)
| StarcoderdataPython |
6471526 | """
SocksProxy implementation.
"""
# TODO implement SocksProxy
| StarcoderdataPython |
3243108 | """Generates capacity diagrams for the bottleneck"""
import csv
from matplotlib import pyplot as plt
from matplotlib import rc
import numpy as np
import os
rc('text', usetex=True)
font = {'weight': 'bold',
'size': 18}
rc('font', **font)
inflows = []
outflows = []
path = os.path.dirname(os.path.abspath(__file__))
with open(path + '/../../data/inflows_outflows.csv', 'rt') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
inflows.append(float(row[0]))
outflows.append(float(row[1]))
unique_inflows = sorted(list(set(inflows)))
sorted_outflows = {inflow: [] for inflow in unique_inflows}
for inflow, outlfow in zip(inflows, outflows):
sorted_outflows[inflow].append(outlfow)
mean_outflows = np.asarray([np.mean(sorted_outflows[inflow])
for inflow in unique_inflows])
min_outflows = np.asarray([np.min(sorted_outflows[inflow])
for inflow in unique_inflows])
max_outflows = np.asarray([np.max(sorted_outflows[inflow])
for inflow in unique_inflows])
std_outflows = np.asarray([np.std(sorted_outflows[inflow])
for inflow in unique_inflows])
plt.figure(figsize=(27, 9))
plt.plot(unique_inflows, mean_outflows, linewidth=2, color='orange')
plt.fill_between(unique_inflows, mean_outflows - std_outflows,
mean_outflows + std_outflows, alpha=0.25, color='orange')
plt.xlabel('Inflow' + r'$ \ \frac{vehs}{hour}$')
plt.ylabel('Outflow' + r'$ \ \frac{vehs}{hour}$')
plt.tick_params(labelsize=20)
plt.rcParams['xtick.minor.size'] = 20
plt.minorticks_on()
plt.show()
| StarcoderdataPython |
6592075 | <gh_stars>0
from .main import scrape
__all__ = ["scrape"]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.