max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
toontown/coghq/InGameEditorDCImports.py
|
TheFamiliarScoot/open-toontown
| 99
|
6626351
|
if __dev__:
from direct.directutil import DistributedLargeBlobSender
from . import DistributedInGameEditor
|
if __dev__:
from direct.directutil import DistributedLargeBlobSender
from . import DistributedInGameEditor
|
none
| 1
| 1.169687
| 1
|
|
livestyled/models/order.py
|
andrelopez/python-sdk
| 0
|
6626352
|
from livestyled.models.app import App
from livestyled.models.fulfilment_point import FulfilmentPoint
from livestyled.models.product import Product, ProductVariant
from livestyled.models.user import User
class OrderItem:
def __init__(
self,
id,
fulfilment_point,
quantity,
title,
subtitle,
image_url,
price,
total_price,
product,
product_variant=None,
):
self.id = id
self.quantity = quantity
self.title = title
self.subtitle = subtitle
self.image_url = image_url
self.price = price
self.total_price = total_price
if product_variant:
if isinstance(product_variant, ProductVariant):
self.product_variant = product_variant
elif isinstance(product_variant, dict):
self.product_variant = ProductVariant(**product_variant)
elif isinstance(product_variant, int):
self.product_variant = ProductVariant.placeholder(id=product_variant)
else:
self.product_variant = None
if fulfilment_point:
if isinstance(fulfilment_point, FulfilmentPoint):
self.fulfilment_point = fulfilment_point
elif isinstance(fulfilment_point, dict):
self.fulfilment_point = FulfilmentPoint(**fulfilment_point)
elif isinstance(fulfilment_point, int):
self.fulfilment_point = FulfilmentPoint.placeholder(id=fulfilment_point)
else:
self.fulfilment_point = None
if product:
if isinstance(product, Product):
self.product = product
elif isinstance(product, dict):
self.product = Product(**product)
elif isinstance(product, int):
self.product = Product.placeholder(id=product)
else:
self.product = None
class Order:
def __init__(
self,
id,
user,
status,
gross_amount,
discount,
net_amount,
order_amount,
order_number,
items,
updated_at,
created_at,
app,
collection_date,
collection_preference_type,
check_in_time,
estimated_at,
fulfilment_point,
external_id,
seat_info
):
self.id = id
self.status = status
self.gross_amount = gross_amount
self.discount = discount
self.net_amount = net_amount
self.order_amount = order_amount
self.updated_at = updated_at
self.created_at = created_at
self.order_number = order_number
self.collection_date = collection_date
self.collection_preference_type = collection_preference_type
self.check_in_time = check_in_time
self.estimated_at = estimated_at
self.external_id = external_id
self.seat_info = seat_info
if user:
if isinstance(user, User):
self.user = user
elif isinstance(user, dict):
self.user = User(**user)
elif isinstance(user, int):
self.user = User.placeholder(id=user)
else:
self.user = None
if items:
self.items = []
for item in items:
if isinstance(item, OrderItem):
self.items.append(item)
elif isinstance(item, dict):
self.items.append(OrderItem(**item))
else:
self.items = None
if app:
if isinstance(app, App):
self.app = app
elif isinstance(app, dict):
self.app = App(**app)
else:
self.app = None
if fulfilment_point:
if isinstance(fulfilment_point, FulfilmentPoint):
self.fulfilment_point = fulfilment_point
elif isinstance(fulfilment_point, dict):
self.fulfilment_point = FulfilmentPoint(**fulfilment_point)
elif isinstance(fulfilment_point, int):
self.fulfilment_point = FulfilmentPoint.placeholder(id=fulfilment_point)
else:
self.fulfilment_point = None
@classmethod
def placeholder(cls, id):
return cls(
id,
user=None,
status=None,
gross_amount=None,
discount=None,
net_amount=None,
order_amount=None,
order_number=None,
items=None,
updated_at=None,
created_at=None,
app=None,
collection_date=None,
collection_preference_type=None,
check_in_time=None,
estimated_at=None,
fulfilment_point=None,
external_id=None,
seat_info=None
)
|
from livestyled.models.app import App
from livestyled.models.fulfilment_point import FulfilmentPoint
from livestyled.models.product import Product, ProductVariant
from livestyled.models.user import User
class OrderItem:
def __init__(
self,
id,
fulfilment_point,
quantity,
title,
subtitle,
image_url,
price,
total_price,
product,
product_variant=None,
):
self.id = id
self.quantity = quantity
self.title = title
self.subtitle = subtitle
self.image_url = image_url
self.price = price
self.total_price = total_price
if product_variant:
if isinstance(product_variant, ProductVariant):
self.product_variant = product_variant
elif isinstance(product_variant, dict):
self.product_variant = ProductVariant(**product_variant)
elif isinstance(product_variant, int):
self.product_variant = ProductVariant.placeholder(id=product_variant)
else:
self.product_variant = None
if fulfilment_point:
if isinstance(fulfilment_point, FulfilmentPoint):
self.fulfilment_point = fulfilment_point
elif isinstance(fulfilment_point, dict):
self.fulfilment_point = FulfilmentPoint(**fulfilment_point)
elif isinstance(fulfilment_point, int):
self.fulfilment_point = FulfilmentPoint.placeholder(id=fulfilment_point)
else:
self.fulfilment_point = None
if product:
if isinstance(product, Product):
self.product = product
elif isinstance(product, dict):
self.product = Product(**product)
elif isinstance(product, int):
self.product = Product.placeholder(id=product)
else:
self.product = None
class Order:
def __init__(
self,
id,
user,
status,
gross_amount,
discount,
net_amount,
order_amount,
order_number,
items,
updated_at,
created_at,
app,
collection_date,
collection_preference_type,
check_in_time,
estimated_at,
fulfilment_point,
external_id,
seat_info
):
self.id = id
self.status = status
self.gross_amount = gross_amount
self.discount = discount
self.net_amount = net_amount
self.order_amount = order_amount
self.updated_at = updated_at
self.created_at = created_at
self.order_number = order_number
self.collection_date = collection_date
self.collection_preference_type = collection_preference_type
self.check_in_time = check_in_time
self.estimated_at = estimated_at
self.external_id = external_id
self.seat_info = seat_info
if user:
if isinstance(user, User):
self.user = user
elif isinstance(user, dict):
self.user = User(**user)
elif isinstance(user, int):
self.user = User.placeholder(id=user)
else:
self.user = None
if items:
self.items = []
for item in items:
if isinstance(item, OrderItem):
self.items.append(item)
elif isinstance(item, dict):
self.items.append(OrderItem(**item))
else:
self.items = None
if app:
if isinstance(app, App):
self.app = app
elif isinstance(app, dict):
self.app = App(**app)
else:
self.app = None
if fulfilment_point:
if isinstance(fulfilment_point, FulfilmentPoint):
self.fulfilment_point = fulfilment_point
elif isinstance(fulfilment_point, dict):
self.fulfilment_point = FulfilmentPoint(**fulfilment_point)
elif isinstance(fulfilment_point, int):
self.fulfilment_point = FulfilmentPoint.placeholder(id=fulfilment_point)
else:
self.fulfilment_point = None
@classmethod
def placeholder(cls, id):
return cls(
id,
user=None,
status=None,
gross_amount=None,
discount=None,
net_amount=None,
order_amount=None,
order_number=None,
items=None,
updated_at=None,
created_at=None,
app=None,
collection_date=None,
collection_preference_type=None,
check_in_time=None,
estimated_at=None,
fulfilment_point=None,
external_id=None,
seat_info=None
)
|
none
| 1
| 2.140191
| 2
|
|
transmute_core/frameworks/flask/handler.py
|
toumorokoshi/web-transmute
| 0
|
6626353
|
import sys
from functools import wraps
from flask import request, Response
from transmute_core import ParamExtractor, NoArgument
def create_routes_and_handler(transmute_func, context):
@wraps(transmute_func.raw_func)
def handler(*args, **kwargs):
exc, result = None, None
try:
args, kwargs = _param_instance.extract_params(
context,
transmute_func,
request.content_type,
)
result = transmute_func(*args, **kwargs)
except Exception as e:
exc = e
exc.__traceback__ = sys.exc_info()[2]
response = transmute_func.process_result(
context, result, exc, request.content_type
)
return Response(
response["body"],
status=response["code"],
mimetype=response["content-type"],
headers=response["headers"],
)
return (_convert_paths_to_flask(transmute_func.paths), handler)
def _convert_paths_to_flask(transmute_paths):
"""flask has it's own route syntax, so we convert it."""
paths = []
for p in transmute_paths:
paths.append(p.replace("{", "<").replace("}", ">"))
return paths
class ParamExtractorFlask(ParamExtractor):
def _get_framework_args(self):
return {}
@property
def body(self):
return request.get_data()
@staticmethod
def _query_argument(key, is_list):
if key not in request.args:
return NoArgument
if is_list:
return request.args.getlist(key)
else:
return request.args[key]
@staticmethod
def _header_argument(key):
return request.headers.get(key, NoArgument)
@staticmethod
def _path_argument(key):
return request.view_args.get(key, NoArgument)
_param_instance = ParamExtractorFlask()
|
import sys
from functools import wraps
from flask import request, Response
from transmute_core import ParamExtractor, NoArgument
def create_routes_and_handler(transmute_func, context):
@wraps(transmute_func.raw_func)
def handler(*args, **kwargs):
exc, result = None, None
try:
args, kwargs = _param_instance.extract_params(
context,
transmute_func,
request.content_type,
)
result = transmute_func(*args, **kwargs)
except Exception as e:
exc = e
exc.__traceback__ = sys.exc_info()[2]
response = transmute_func.process_result(
context, result, exc, request.content_type
)
return Response(
response["body"],
status=response["code"],
mimetype=response["content-type"],
headers=response["headers"],
)
return (_convert_paths_to_flask(transmute_func.paths), handler)
def _convert_paths_to_flask(transmute_paths):
"""flask has it's own route syntax, so we convert it."""
paths = []
for p in transmute_paths:
paths.append(p.replace("{", "<").replace("}", ">"))
return paths
class ParamExtractorFlask(ParamExtractor):
def _get_framework_args(self):
return {}
@property
def body(self):
return request.get_data()
@staticmethod
def _query_argument(key, is_list):
if key not in request.args:
return NoArgument
if is_list:
return request.args.getlist(key)
else:
return request.args[key]
@staticmethod
def _header_argument(key):
return request.headers.get(key, NoArgument)
@staticmethod
def _path_argument(key):
return request.view_args.get(key, NoArgument)
_param_instance = ParamExtractorFlask()
|
en
| 0.980867
|
flask has it's own route syntax, so we convert it.
| 2.479252
| 2
|
clare/clare/common/messaging/consumer/consumers.py
|
dnguyen0304/room-list-watcher
| 0
|
6626354
|
<reponame>dnguyen0304/room-list-watcher<gh_stars>0
# -*- coding: utf-8 -*-
import time
from . import exceptions
from . import interfaces
class Consumer(interfaces.IConsumer):
def __init__(self, fetcher, handler, filters=None):
"""
Parameters
----------
fetcher : typing.Type[clare.common.messaging.consumer.interfaces.IFetcher]
handler : typing.Type[clare.common.messaging.consumer.interfaces.IHandler]
filters : typing.Iterable[clare.common.messaging.interfaces.IFilter]
Defaults to list.
"""
self._fetcher = fetcher
self._handler = handler
self._filters = filters or list()
def consume(self, interval):
while True:
self._consume_once()
time.sleep(interval)
def _consume_once(self):
try:
message = self._fetcher.fetch()
except exceptions.FetchTimeout:
pass
else:
for filter_ in self._filters:
message = filter_.filter(message=message)
if message is None:
break
else:
self._handler.handle(message=message)
def __repr__(self):
repr_ = '{}(fetcher={}, handler={}, filters={})'
return repr_.format(self.__class__.__name__,
self._fetcher,
self._handler,
self._filters)
|
# -*- coding: utf-8 -*-
import time
from . import exceptions
from . import interfaces
class Consumer(interfaces.IConsumer):
def __init__(self, fetcher, handler, filters=None):
"""
Parameters
----------
fetcher : typing.Type[clare.common.messaging.consumer.interfaces.IFetcher]
handler : typing.Type[clare.common.messaging.consumer.interfaces.IHandler]
filters : typing.Iterable[clare.common.messaging.interfaces.IFilter]
Defaults to list.
"""
self._fetcher = fetcher
self._handler = handler
self._filters = filters or list()
def consume(self, interval):
while True:
self._consume_once()
time.sleep(interval)
def _consume_once(self):
try:
message = self._fetcher.fetch()
except exceptions.FetchTimeout:
pass
else:
for filter_ in self._filters:
message = filter_.filter(message=message)
if message is None:
break
else:
self._handler.handle(message=message)
def __repr__(self):
repr_ = '{}(fetcher={}, handler={}, filters={})'
return repr_.format(self.__class__.__name__,
self._fetcher,
self._handler,
self._filters)
|
en
| 0.200304
|
# -*- coding: utf-8 -*- Parameters ---------- fetcher : typing.Type[clare.common.messaging.consumer.interfaces.IFetcher] handler : typing.Type[clare.common.messaging.consumer.interfaces.IHandler] filters : typing.Iterable[clare.common.messaging.interfaces.IFilter] Defaults to list.
| 2.485753
| 2
|
breast_cancer_classifier.py
|
codedeamon/Breast-Cancer-Classifier
| 0
|
6626355
|
import numpy as np
import pandas as pd
import scipy as sp
from scipy import stats
import os
from sklearn import preprocessing
from sklearn import svm
from sklearn.metrics import make_scorer
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics.classification import _prf_divide
from sklearn.svm import LinearSVC
from sklearn.model_selection import train_test_split
from sklearn.utils.fixes import np_version
from sklearn.utils.multiclass import unique_labels
from numpy import bincount
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics.classification import _prf_divide
from sklearn.feature_selection import SelectKBest, f_classif
RANDOM_STATE = 14
def convert_class(item):
if item == "car":
return 1
elif item == "fad":
return 2
elif item == "mas":
return 3
elif item == "gla":
return 4
elif item == "con":
return 5
elif item == "adi":
return 6
def bincount(X, weights=None, minlength=None):
"""Replacing np.bincount in numpy < 1.6 to provide minlength."""
result = np.bincount(X, weights)
if len(result) >= minlength:
return result
out = np.zeros(minlength, np.int)
out[:len(result)] = result
return out
if np_version[:2] < (1, 6):
bincount = bincount
else:
bincount = np.bincount
def g_mean(y_true, y_pred, labels=None, correction=0.01):
present_labels = unique_labels(y_true, y_pred)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,assume_unique=True)])
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=None, minlength=len(labels))
else:
# Pathological case
true_sum = tp_sum = np.zeros(len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=None, minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
recall = _prf_divide(tp_sum, true_sum, "recall", "true", None, "recall")
recall[recall == 0] = correction
return sp.stats.mstats.gmean(recall)
'''
================================================================================================
Normalizing the data
================================================================================================
'''
# normalizeData function normalizes our data values
def normalizeData(filenameIn, filenameOut):
myInput = pd.read_excel(filenameIn, 1, converters = {'Class':convert_class})
#normalizing
myInput.ix[:, 2:] = myInput.ix[:, 2:].apply(lambda x: (x - x.min()) / (x.max() - x.min()))
#myInput.to_excel(filenameOut, index=False)
return myInput
my_norm_dta = normalizeData("BreastTissue.xlsx", "normalized.xlsx")
# lets define our feature data and the target data
data = my_norm_dta.ix[:, 2:]
target = my_norm_dta.ix[:, 1]
# with KFold we will shuffle the data randomly and then split it into 5 folds
k_fold = KFold(n_splits=5, shuffle=True, random_state=RANDOM_STATE)
# here we make our scoring metric: geometric mean, which is defined above
scoring = make_scorer(g_mean)
#========================== 3 : linear SVM implementation ======================================
c_scores = []
max_score = 0
max_C = 1
# here we search for the best C value, using linear kernel
for i in range(1, 200, 5):
clf = svm.SVC(kernel='linear', C=i)
score = cross_val_score(clf, data, target, cv=k_fold, scoring=scoring)
ms = score.mean()
#print("the g_mean score of C = ", i, " is ", ms)
c_scores.append(ms)
if ms > max_score:
max_score = ms
max_C = i
print("scores are ", c_scores)
print("max score was ", max_score, " with C = ", max_C)
plt.figure(1)
plt.plot(range(1,200,5),c_scores)
plt.xlabel('C Values for SVM linear')
plt.ylabel('Geometric Mean Score')
plt.show()
# now lets search for the best gamma value
gamma_scores = []
max_score = 0
best_gamma = 0.5
gamma = 0.5
# here we search for the best gamma value, using rbf kernel
while gamma <= 10:
clf = svm.SVC(kernel='rbf', gamma=gamma, C=max_C)
score = cross_val_score(clf, data, target, cv=k_fold, scoring=scoring)
ms = score.mean()
#print("the g_mean score of gamma = ", gamma, " is ", ms)
gamma_scores.append(ms)
if ms > max_score:
max_score = ms
best_gamma = gamma
gamma += 0.5
print("scores are ", gamma_scores)
print("max score was ", max_score, " with gamma = ", best_gamma)
plt.figure(2)
plt.plot(np.arange(0,10, 0.5), gamma_scores)
plt.xlabel('Gamma Values for SVM RBF')
plt.ylabel('Geometric Mean Score')
plt.show()
# ======================= KNN Classifier =======================================================
k_n = 3
best_k = 3
max_k_score = 0
k_scores = []
while k_n < 16:
knn = KNeighborsClassifier(n_neighbors=k_n)
score = cross_val_score(knn, data, target, cv=k_fold, scoring=scoring)
ms = score.mean()
#print("the g_mean score of knn for k = ", k_n, " is ", ms)
k_scores.append(ms)
if ms > max_k_score:
max_k_score = ms
best_k = k_n
k_n += 1
print("knn mean scores are ", k_scores)
print("max score was ", max_k_score, " with k = ", best_k)
plt.figure(3)
plt.plot(range(3,16), k_scores)
plt.xlabel('K Values for KNN')
plt.ylabel('Mean Score')
plt.show()
#====================== Gaussian Naive Bayes Classifier =========================================
gnb = GaussianNB()
score = cross_val_score(gnb, data, target, cv=k_fold, scoring=scoring)
ms = score.mean()
print("the mean score of Naive Bayes is ", ms)
'''
=============================================================================================
Now let's implement Student t-test for each characteristic
=============================================================================================
'''
del my_norm_dta['Case #']
featureSelector = SelectKBest(f_classif, k=4)
Xtrunc = featureSelector.fit_transform(data, target)
print(Xtrunc)
k_n = 3
best_k = 3
max_k_score = 0
k_scores = []
while k_n < 16:
knn = KNeighborsClassifier(n_neighbors=k_n)
score = cross_val_score(knn, Xtrunc, target, cv=k_fold, scoring=scoring)
ms = score.mean()
#print("the g_mean score of knn for k = ", k_n, " is ", ms)
k_scores.append(ms)
if ms > max_k_score:
max_k_score = ms
best_k = k_n
k_n += 1
print("knn mean scores are ", k_scores)
print("max score was ", max_k_score, " with k = ", best_k)
plt.figure(4)
plt.plot(range(3,16), k_scores)
plt.xlabel('K Values for KNN')
plt.ylabel('Mean Score')
plt.show()
|
import numpy as np
import pandas as pd
import scipy as sp
from scipy import stats
import os
from sklearn import preprocessing
from sklearn import svm
from sklearn.metrics import make_scorer
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics.classification import _prf_divide
from sklearn.svm import LinearSVC
from sklearn.model_selection import train_test_split
from sklearn.utils.fixes import np_version
from sklearn.utils.multiclass import unique_labels
from numpy import bincount
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics.classification import _prf_divide
from sklearn.feature_selection import SelectKBest, f_classif
RANDOM_STATE = 14
def convert_class(item):
if item == "car":
return 1
elif item == "fad":
return 2
elif item == "mas":
return 3
elif item == "gla":
return 4
elif item == "con":
return 5
elif item == "adi":
return 6
def bincount(X, weights=None, minlength=None):
"""Replacing np.bincount in numpy < 1.6 to provide minlength."""
result = np.bincount(X, weights)
if len(result) >= minlength:
return result
out = np.zeros(minlength, np.int)
out[:len(result)] = result
return out
if np_version[:2] < (1, 6):
bincount = bincount
else:
bincount = np.bincount
def g_mean(y_true, y_pred, labels=None, correction=0.01):
present_labels = unique_labels(y_true, y_pred)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,assume_unique=True)])
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=None, minlength=len(labels))
else:
# Pathological case
true_sum = tp_sum = np.zeros(len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=None, minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
recall = _prf_divide(tp_sum, true_sum, "recall", "true", None, "recall")
recall[recall == 0] = correction
return sp.stats.mstats.gmean(recall)
'''
================================================================================================
Normalizing the data
================================================================================================
'''
# normalizeData function normalizes our data values
def normalizeData(filenameIn, filenameOut):
myInput = pd.read_excel(filenameIn, 1, converters = {'Class':convert_class})
#normalizing
myInput.ix[:, 2:] = myInput.ix[:, 2:].apply(lambda x: (x - x.min()) / (x.max() - x.min()))
#myInput.to_excel(filenameOut, index=False)
return myInput
my_norm_dta = normalizeData("BreastTissue.xlsx", "normalized.xlsx")
# lets define our feature data and the target data
data = my_norm_dta.ix[:, 2:]
target = my_norm_dta.ix[:, 1]
# with KFold we will shuffle the data randomly and then split it into 5 folds
k_fold = KFold(n_splits=5, shuffle=True, random_state=RANDOM_STATE)
# here we make our scoring metric: geometric mean, which is defined above
scoring = make_scorer(g_mean)
#========================== 3 : linear SVM implementation ======================================
c_scores = []
max_score = 0
max_C = 1
# here we search for the best C value, using linear kernel
for i in range(1, 200, 5):
clf = svm.SVC(kernel='linear', C=i)
score = cross_val_score(clf, data, target, cv=k_fold, scoring=scoring)
ms = score.mean()
#print("the g_mean score of C = ", i, " is ", ms)
c_scores.append(ms)
if ms > max_score:
max_score = ms
max_C = i
print("scores are ", c_scores)
print("max score was ", max_score, " with C = ", max_C)
plt.figure(1)
plt.plot(range(1,200,5),c_scores)
plt.xlabel('C Values for SVM linear')
plt.ylabel('Geometric Mean Score')
plt.show()
# now lets search for the best gamma value
gamma_scores = []
max_score = 0
best_gamma = 0.5
gamma = 0.5
# here we search for the best gamma value, using rbf kernel
while gamma <= 10:
clf = svm.SVC(kernel='rbf', gamma=gamma, C=max_C)
score = cross_val_score(clf, data, target, cv=k_fold, scoring=scoring)
ms = score.mean()
#print("the g_mean score of gamma = ", gamma, " is ", ms)
gamma_scores.append(ms)
if ms > max_score:
max_score = ms
best_gamma = gamma
gamma += 0.5
print("scores are ", gamma_scores)
print("max score was ", max_score, " with gamma = ", best_gamma)
plt.figure(2)
plt.plot(np.arange(0,10, 0.5), gamma_scores)
plt.xlabel('Gamma Values for SVM RBF')
plt.ylabel('Geometric Mean Score')
plt.show()
# ======================= KNN Classifier =======================================================
k_n = 3
best_k = 3
max_k_score = 0
k_scores = []
while k_n < 16:
knn = KNeighborsClassifier(n_neighbors=k_n)
score = cross_val_score(knn, data, target, cv=k_fold, scoring=scoring)
ms = score.mean()
#print("the g_mean score of knn for k = ", k_n, " is ", ms)
k_scores.append(ms)
if ms > max_k_score:
max_k_score = ms
best_k = k_n
k_n += 1
print("knn mean scores are ", k_scores)
print("max score was ", max_k_score, " with k = ", best_k)
plt.figure(3)
plt.plot(range(3,16), k_scores)
plt.xlabel('K Values for KNN')
plt.ylabel('Mean Score')
plt.show()
#====================== Gaussian Naive Bayes Classifier =========================================
gnb = GaussianNB()
score = cross_val_score(gnb, data, target, cv=k_fold, scoring=scoring)
ms = score.mean()
print("the mean score of Naive Bayes is ", ms)
'''
=============================================================================================
Now let's implement Student t-test for each characteristic
=============================================================================================
'''
del my_norm_dta['Case #']
featureSelector = SelectKBest(f_classif, k=4)
Xtrunc = featureSelector.fit_transform(data, target)
print(Xtrunc)
k_n = 3
best_k = 3
max_k_score = 0
k_scores = []
while k_n < 16:
knn = KNeighborsClassifier(n_neighbors=k_n)
score = cross_val_score(knn, Xtrunc, target, cv=k_fold, scoring=scoring)
ms = score.mean()
#print("the g_mean score of knn for k = ", k_n, " is ", ms)
k_scores.append(ms)
if ms > max_k_score:
max_k_score = ms
best_k = k_n
k_n += 1
print("knn mean scores are ", k_scores)
print("max score was ", max_k_score, " with k = ", best_k)
plt.figure(4)
plt.plot(range(3,16), k_scores)
plt.xlabel('K Values for KNN')
plt.ylabel('Mean Score')
plt.show()
|
en
| 0.583292
|
Replacing np.bincount in numpy < 1.6 to provide minlength. # labels are now from 0 to len(labels) - 1 -> use bincount # Pathological case # Retain only selected labels ================================================================================================ Normalizing the data ================================================================================================ # normalizeData function normalizes our data values #normalizing #myInput.to_excel(filenameOut, index=False) # lets define our feature data and the target data # with KFold we will shuffle the data randomly and then split it into 5 folds # here we make our scoring metric: geometric mean, which is defined above #========================== 3 : linear SVM implementation ====================================== # here we search for the best C value, using linear kernel #print("the g_mean score of C = ", i, " is ", ms) # now lets search for the best gamma value # here we search for the best gamma value, using rbf kernel #print("the g_mean score of gamma = ", gamma, " is ", ms) # ======================= KNN Classifier ======================================================= #print("the g_mean score of knn for k = ", k_n, " is ", ms) #====================== Gaussian Naive Bayes Classifier ========================================= ============================================================================================= Now let's implement Student t-test for each characteristic ============================================================================================= #'] #print("the g_mean score of knn for k = ", k_n, " is ", ms)
| 2.661043
| 3
|
client/p_lib/user.py
|
hazra1991/Message_server_
| 0
|
6626356
|
<filename>client/p_lib/user.py
try:
from queue import Queue
except:
from Queue import Queue
import pickle,random
class User:
def __init__(self,username,conn):
self.username = username
self.pending_message = Queue()
self.group_id = username + '@' + str(random.getrandbits(32))
self.conn_obj = conn
def add_message(self):
pass
def add_groupe(self):
pass
def del_group(self):
pass
def get_obj(obj):
print('entered object',obj)
return pickle.loads(obj)
def create_userobj(username,conn):
obj = User(username,conn)
return obj
def save_obj(obj):
return pickle.dumps(obj)
class Message:
def __init__(self,uname,msg_type,message,send_to):
self.username=uname
self.msg_type = msg_type
self.message = message
self.send_to = send_to
|
<filename>client/p_lib/user.py
try:
from queue import Queue
except:
from Queue import Queue
import pickle,random
class User:
def __init__(self,username,conn):
self.username = username
self.pending_message = Queue()
self.group_id = username + '@' + str(random.getrandbits(32))
self.conn_obj = conn
def add_message(self):
pass
def add_groupe(self):
pass
def del_group(self):
pass
def get_obj(obj):
print('entered object',obj)
return pickle.loads(obj)
def create_userobj(username,conn):
obj = User(username,conn)
return obj
def save_obj(obj):
return pickle.dumps(obj)
class Message:
def __init__(self,uname,msg_type,message,send_to):
self.username=uname
self.msg_type = msg_type
self.message = message
self.send_to = send_to
|
none
| 1
| 2.671201
| 3
|
|
Chatbot_Web/web_interface/ner/ie_interface.py
|
codeants2012/Chatbot_CN
| 6
|
6626357
|
<filename>Chatbot_Web/web_interface/ner/ie_interface.py
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: ie_interface.py
Description : 信息抽取接口
Author : charl
date: 2018/11/15
-------------------------------------------------
Change Activity: 2018/11/15:
-------------------------------------------------
"""
from rest_framework.views import APIView
from dss.Serializer import serializer
from django.http import HttpResponse, HttpRequest
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
@api_view(['GET'])
def ner(request, format=None):
return Response({
'users': reverse('user-list', request=request, format=format),
'snippets': reverse('snippet-list', request=request, format=format)
})
|
<filename>Chatbot_Web/web_interface/ner/ie_interface.py
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: ie_interface.py
Description : 信息抽取接口
Author : charl
date: 2018/11/15
-------------------------------------------------
Change Activity: 2018/11/15:
-------------------------------------------------
"""
from rest_framework.views import APIView
from dss.Serializer import serializer
from django.http import HttpResponse, HttpRequest
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
@api_view(['GET'])
def ner(request, format=None):
return Response({
'users': reverse('user-list', request=request, format=format),
'snippets': reverse('snippet-list', request=request, format=format)
})
|
en
| 0.196558
|
# -*- coding: utf-8 -*- ------------------------------------------------- File Name: ie_interface.py Description : 信息抽取接口 Author : charl date: 2018/11/15 ------------------------------------------------- Change Activity: 2018/11/15: -------------------------------------------------
| 2.357586
| 2
|
raiden_contracts/constants.py
|
konradkonrad/raiden-contracts
| 0
|
6626358
|
<filename>raiden_contracts/constants.py
from enum import Enum, IntEnum
from eth_utils import to_canonical_address
# Contract names
CONTRACT_ENDPOINT_REGISTRY = 'EndpointRegistry'
CONTRACT_HUMAN_STANDARD_TOKEN = 'HumanStandardToken'
CONTRACT_TOKEN_NETWORK_REGISTRY = 'TokenNetworkRegistry'
CONTRACT_TOKEN_NETWORK = 'TokenNetwork'
CONTRACT_SECRET_REGISTRY = 'SecretRegistry'
CONTRACT_CUSTOM_TOKEN = 'CustomToken'
CONTRACT_CUSTOM_TOKEN_NO_DECIMALS = 'CustomTokenNoDecimals'
CONTRACT_MONITORING_SERVICE = 'MonitoringService'
CONTRACT_RAIDEN_SERVICE_BUNDLE = 'RaidenServiceBundle'
# Deployed contract information
# Deployed to Ropsten revival on 2018-09-03 from
# raiden-contracts@fc1c79329a165c738fc55c3505cf801cc79872e4
ROPSTEN_TOKEN_NETWORK_REGISTRY_ADDRESS = '0xf2a175A52Bd3c815eD7500c765bA19652AB89B30'
ROPSTEN_ENDPOINT_REGISTRY_ADDRESS = '0xEEADDC1667B6EBc7784721B123a6F669B69Eb9bD'
ROPSTEN_SECRET_REGISTRY_ADDRESS = '0x16a25511A92C5ebfc6C30ad98F754e4c820c6822'
# Deployed to Ropsten revival on 2018-09-21 from
# raiden-contracts@bfb24fed3ebda2799e4d11ad1bb5a6de116bd12d
ROPSTEN_LIMITS_TOKEN_NETWORK_REGISTRY_ADDRESS = '0x6cC27CBF184B4177CD3c5D1a39a875aD07345eEb'
ROPSTEN_LIMITS_ENDPOINT_REGISTRY_ADDRESS = '0xcF47EDF0D951c862ED9825F47075c15BEAf5Db1B'
ROPSTEN_LIMITS_SECRET_REGISTRY_ADDRESS = '0x8167a262Fa3Be92F05420675c3b409c64Be3d348'
# Network configurations
START_QUERY_BLOCK_KEY = 'DefaultStartBlock'
class ChainId(Enum):
MAINNET = 1
ROPSTEN = 3
RINKEBY = 4
KOVAN = 42
SMOKETEST = 627
MAINNET = 'mainnet'
ROPSTEN = 'ropsten'
RINKEBY = 'rinkeby'
KOVAN = 'kovan'
SMOKETEST = 'smoketest'
ID_TO_NETWORKNAME = {
ChainId.MAINNET: MAINNET,
ChainId.ROPSTEN: ROPSTEN,
ChainId.RINKEBY: RINKEBY,
ChainId.KOVAN: KOVAN,
ChainId.SMOKETEST: SMOKETEST,
}
NETWORKNAME_TO_ID = {
name: id
for id, name in ID_TO_NETWORKNAME.items()
}
class NetworkType(Enum):
MAIN = 1
TEST = 2
ID_TO_NETWORK_CONFIG = {
ChainId.ROPSTEN: {
NetworkType.TEST: {
'network_type': NetworkType.TEST,
'contract_addresses': {
CONTRACT_ENDPOINT_REGISTRY: to_canonical_address(
ROPSTEN_ENDPOINT_REGISTRY_ADDRESS,
),
CONTRACT_SECRET_REGISTRY: to_canonical_address(ROPSTEN_SECRET_REGISTRY_ADDRESS),
CONTRACT_TOKEN_NETWORK_REGISTRY: to_canonical_address(
ROPSTEN_TOKEN_NETWORK_REGISTRY_ADDRESS,
),
},
# 924 blocks before token network registry deployment
START_QUERY_BLOCK_KEY: 3604000,
},
NetworkType.MAIN: {
'network_type': NetworkType.MAIN,
'contract_addresses': {
CONTRACT_ENDPOINT_REGISTRY: to_canonical_address(
ROPSTEN_LIMITS_ENDPOINT_REGISTRY_ADDRESS,
),
CONTRACT_SECRET_REGISTRY: to_canonical_address(
ROPSTEN_LIMITS_SECRET_REGISTRY_ADDRESS,
),
CONTRACT_TOKEN_NETWORK_REGISTRY: to_canonical_address(
ROPSTEN_LIMITS_TOKEN_NETWORK_REGISTRY_ADDRESS,
),
},
# 153 blocks before token network registry deployment
START_QUERY_BLOCK_KEY: 4084000,
},
},
}
# TokenNetworkRegistry
EVENT_TOKEN_NETWORK_CREATED = 'TokenNetworkCreated'
class ChannelEvent(str, Enum):
OPENED = 'ChannelOpened'
DEPOSIT = 'ChannelNewDeposit'
WITHDRAW = 'ChannelWithdraw'
BALANCE_PROOF_UPDATED = 'NonClosingBalanceProofUpdated'
CLOSED = 'ChannelClosed'
SETTLED = 'ChannelSettled'
UNLOCKED = 'ChannelUnlocked'
# SecretRegistry
EVENT_SECRET_REVEALED = 'SecretRevealed'
# EndpointRegistry
EVENT_ADDRESS_REGISTERED = 'AddressRegistered'
# Timeouts
TEST_SETTLE_TIMEOUT_MIN = 5
TEST_SETTLE_TIMEOUT_MAX = 100000
DEPLOY_SETTLE_TIMEOUT_MIN = 500 # ~ 2 hours
DEPLOY_SETTLE_TIMEOUT_MAX = 555428 # ~ 3 months
class MessageTypeId(IntEnum):
BALANCE_PROOF = 1
BALANCE_PROOF_UPDATE = 2
WITHDRAW = 3
COOPERATIVE_SETTLE = 4
class ChannelState(IntEnum):
NONEXISTENT = 0
OPENED = 1
CLOSED = 2
SETTLED = 3
REMOVED = 4
# Temporary deposit limits for the Red Eyes release in WEI
MAX_ETH_CHANNEL_PARTICIPANT = int(0.075 * 10**18)
MAX_ETH_TOKEN_NETWORK = int(250 * 10**18)
class ChannelInfoIndex(IntEnum):
SETTLE_BLOCK = 0
STATE = 1
class ParticipantInfoIndex(IntEnum):
DEPOSIT = 0
WITHDRAWN = 1
IS_CLOSER = 2
BALANCE_HASH = 3
NONCE = 4
LOCKSROOT = 5
LOCKED_AMOUNT = 6
|
<filename>raiden_contracts/constants.py
from enum import Enum, IntEnum
from eth_utils import to_canonical_address
# Contract names
CONTRACT_ENDPOINT_REGISTRY = 'EndpointRegistry'
CONTRACT_HUMAN_STANDARD_TOKEN = 'HumanStandardToken'
CONTRACT_TOKEN_NETWORK_REGISTRY = 'TokenNetworkRegistry'
CONTRACT_TOKEN_NETWORK = 'TokenNetwork'
CONTRACT_SECRET_REGISTRY = 'SecretRegistry'
CONTRACT_CUSTOM_TOKEN = 'CustomToken'
CONTRACT_CUSTOM_TOKEN_NO_DECIMALS = 'CustomTokenNoDecimals'
CONTRACT_MONITORING_SERVICE = 'MonitoringService'
CONTRACT_RAIDEN_SERVICE_BUNDLE = 'RaidenServiceBundle'
# Deployed contract information
# Deployed to Ropsten revival on 2018-09-03 from
# raiden-contracts@fc1c79329a165c738fc55c3505cf801cc79872e4
ROPSTEN_TOKEN_NETWORK_REGISTRY_ADDRESS = '0xf2a175A52Bd3c815eD7500c765bA19652AB89B30'
ROPSTEN_ENDPOINT_REGISTRY_ADDRESS = '0xEEADDC1667B6EBc7784721B123a6F669B69Eb9bD'
ROPSTEN_SECRET_REGISTRY_ADDRESS = '0x16a25511A92C5ebfc6C30ad98F754e4c820c6822'
# Deployed to Ropsten revival on 2018-09-21 from
# raiden-contracts@bfb24fed3ebda2799e4d11ad1bb5a6de116bd12d
ROPSTEN_LIMITS_TOKEN_NETWORK_REGISTRY_ADDRESS = '0x6cC27CBF184B4177CD3c5D1a39a875aD07345eEb'
ROPSTEN_LIMITS_ENDPOINT_REGISTRY_ADDRESS = '0xcF47EDF0D951c862ED9825F47075c15BEAf5Db1B'
ROPSTEN_LIMITS_SECRET_REGISTRY_ADDRESS = '0x8167a262Fa3Be92F05420675c3b409c64Be3d348'
# Network configurations
START_QUERY_BLOCK_KEY = 'DefaultStartBlock'
class ChainId(Enum):
MAINNET = 1
ROPSTEN = 3
RINKEBY = 4
KOVAN = 42
SMOKETEST = 627
MAINNET = 'mainnet'
ROPSTEN = 'ropsten'
RINKEBY = 'rinkeby'
KOVAN = 'kovan'
SMOKETEST = 'smoketest'
ID_TO_NETWORKNAME = {
ChainId.MAINNET: MAINNET,
ChainId.ROPSTEN: ROPSTEN,
ChainId.RINKEBY: RINKEBY,
ChainId.KOVAN: KOVAN,
ChainId.SMOKETEST: SMOKETEST,
}
NETWORKNAME_TO_ID = {
name: id
for id, name in ID_TO_NETWORKNAME.items()
}
class NetworkType(Enum):
MAIN = 1
TEST = 2
ID_TO_NETWORK_CONFIG = {
ChainId.ROPSTEN: {
NetworkType.TEST: {
'network_type': NetworkType.TEST,
'contract_addresses': {
CONTRACT_ENDPOINT_REGISTRY: to_canonical_address(
ROPSTEN_ENDPOINT_REGISTRY_ADDRESS,
),
CONTRACT_SECRET_REGISTRY: to_canonical_address(ROPSTEN_SECRET_REGISTRY_ADDRESS),
CONTRACT_TOKEN_NETWORK_REGISTRY: to_canonical_address(
ROPSTEN_TOKEN_NETWORK_REGISTRY_ADDRESS,
),
},
# 924 blocks before token network registry deployment
START_QUERY_BLOCK_KEY: 3604000,
},
NetworkType.MAIN: {
'network_type': NetworkType.MAIN,
'contract_addresses': {
CONTRACT_ENDPOINT_REGISTRY: to_canonical_address(
ROPSTEN_LIMITS_ENDPOINT_REGISTRY_ADDRESS,
),
CONTRACT_SECRET_REGISTRY: to_canonical_address(
ROPSTEN_LIMITS_SECRET_REGISTRY_ADDRESS,
),
CONTRACT_TOKEN_NETWORK_REGISTRY: to_canonical_address(
ROPSTEN_LIMITS_TOKEN_NETWORK_REGISTRY_ADDRESS,
),
},
# 153 blocks before token network registry deployment
START_QUERY_BLOCK_KEY: 4084000,
},
},
}
# TokenNetworkRegistry
EVENT_TOKEN_NETWORK_CREATED = 'TokenNetworkCreated'
class ChannelEvent(str, Enum):
OPENED = 'ChannelOpened'
DEPOSIT = 'ChannelNewDeposit'
WITHDRAW = 'ChannelWithdraw'
BALANCE_PROOF_UPDATED = 'NonClosingBalanceProofUpdated'
CLOSED = 'ChannelClosed'
SETTLED = 'ChannelSettled'
UNLOCKED = 'ChannelUnlocked'
# SecretRegistry
EVENT_SECRET_REVEALED = 'SecretRevealed'
# EndpointRegistry
EVENT_ADDRESS_REGISTERED = 'AddressRegistered'
# Timeouts
TEST_SETTLE_TIMEOUT_MIN = 5
TEST_SETTLE_TIMEOUT_MAX = 100000
DEPLOY_SETTLE_TIMEOUT_MIN = 500 # ~ 2 hours
DEPLOY_SETTLE_TIMEOUT_MAX = 555428 # ~ 3 months
class MessageTypeId(IntEnum):
BALANCE_PROOF = 1
BALANCE_PROOF_UPDATE = 2
WITHDRAW = 3
COOPERATIVE_SETTLE = 4
class ChannelState(IntEnum):
NONEXISTENT = 0
OPENED = 1
CLOSED = 2
SETTLED = 3
REMOVED = 4
# Temporary deposit limits for the Red Eyes release in WEI
MAX_ETH_CHANNEL_PARTICIPANT = int(0.075 * 10**18)
MAX_ETH_TOKEN_NETWORK = int(250 * 10**18)
class ChannelInfoIndex(IntEnum):
SETTLE_BLOCK = 0
STATE = 1
class ParticipantInfoIndex(IntEnum):
DEPOSIT = 0
WITHDRAWN = 1
IS_CLOSER = 2
BALANCE_HASH = 3
NONCE = 4
LOCKSROOT = 5
LOCKED_AMOUNT = 6
|
en
| 0.673394
|
# Contract names # Deployed contract information # Deployed to Ropsten revival on 2018-09-03 from # raiden-contracts@fc1c79329a165c738fc55c3505cf801cc79872e4 # Deployed to Ropsten revival on 2018-09-21 from # raiden-contracts@bfb24fed3ebda2799e4d11ad1bb5a6de116bd12d # Network configurations # 924 blocks before token network registry deployment # 153 blocks before token network registry deployment # TokenNetworkRegistry # SecretRegistry # EndpointRegistry # Timeouts # ~ 2 hours # ~ 3 months # Temporary deposit limits for the Red Eyes release in WEI
| 1.472933
| 1
|
apps/Vision/QRClassification/QRCodeClassificationTester.py
|
uvic-aero/onboard-computer
| 1
|
6626359
|
<filename>apps/Vision/QRClassification/QRCodeClassificationTester.py<gh_stars>1-10
from QRCodeClassification import QRCodeClassification
import cv2
import os
"""
simple test file for QRCodeClassification. takes an image as input, converts it to nparray subimages and then returns images as jpgs
"""
tmp = QRCodeClassification()
subimages = tmp.split_frames("0.jpg")
directory = os.fsencode("./nparray-to-image").decode()
count = 0
for subimage in subimages:
cv2.imwrite(f"./{directory}/img_{count}.jpg", subimage)
count += 1
|
<filename>apps/Vision/QRClassification/QRCodeClassificationTester.py<gh_stars>1-10
from QRCodeClassification import QRCodeClassification
import cv2
import os
"""
simple test file for QRCodeClassification. takes an image as input, converts it to nparray subimages and then returns images as jpgs
"""
tmp = QRCodeClassification()
subimages = tmp.split_frames("0.jpg")
directory = os.fsencode("./nparray-to-image").decode()
count = 0
for subimage in subimages:
cv2.imwrite(f"./{directory}/img_{count}.jpg", subimage)
count += 1
|
en
| 0.85168
|
simple test file for QRCodeClassification. takes an image as input, converts it to nparray subimages and then returns images as jpgs
| 2.921261
| 3
|
recommender/recommender/framework/tf2/layers/dense_to_sparsetensor.py
|
ericdoug-qi/RecommendationsInAction
| 0
|
6626360
|
# _*_ coding: utf-8 _*_
"""
-------------------------------------------------
File Name: dense_to_sparsetensor.py
Description :
Author : ericdoug
date:2021/3/20
-------------------------------------------------
Change Activity:
2021/3/20: created
-------------------------------------------------
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
# sys packages
import os
# third packages
from tensorflow.keras.layers import Layer
import tensorflow as tf
# my packages
class DenseToSparseTensor(Layer):
def __init__(self, mask_value=-1, **kwargs):
super(DenseToSparseTensor, self).__init__()
self.mask_value = mask_value
def call(self, dense_tensor):
idx = tf.where(tf.not_equal(dense_tensor, tf.constant(self.mask_value, dtype=dense_tensor.dtype)))
sparse_tensor = tf.SparseTensor(idx, tf.gather_nd(dense_tensor, idx), tf.shape(dense_tensor, out_type=tf.int64))
return sparse_tensor
def get_config(self):
config = super(DenseToSparseTensor, self).get_config()
config.update({'mask_value': self.mask_value})
return config
|
# _*_ coding: utf-8 _*_
"""
-------------------------------------------------
File Name: dense_to_sparsetensor.py
Description :
Author : ericdoug
date:2021/3/20
-------------------------------------------------
Change Activity:
2021/3/20: created
-------------------------------------------------
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
# sys packages
import os
# third packages
from tensorflow.keras.layers import Layer
import tensorflow as tf
# my packages
class DenseToSparseTensor(Layer):
def __init__(self, mask_value=-1, **kwargs):
super(DenseToSparseTensor, self).__init__()
self.mask_value = mask_value
def call(self, dense_tensor):
idx = tf.where(tf.not_equal(dense_tensor, tf.constant(self.mask_value, dtype=dense_tensor.dtype)))
sparse_tensor = tf.SparseTensor(idx, tf.gather_nd(dense_tensor, idx), tf.shape(dense_tensor, out_type=tf.int64))
return sparse_tensor
def get_config(self):
config = super(DenseToSparseTensor, self).get_config()
config.update({'mask_value': self.mask_value})
return config
|
en
| 0.357547
|
# _*_ coding: utf-8 _*_ ------------------------------------------------- File Name: dense_to_sparsetensor.py Description : Author : ericdoug date:2021/3/20 ------------------------------------------------- Change Activity: 2021/3/20: created ------------------------------------------------- # sys packages # third packages # my packages
| 2.239562
| 2
|
rest-service/manager_rest/rest/resources_v1/nodes.py
|
TS-at-WS/cloudify-manager
| 0
|
6626361
|
<reponame>TS-at-WS/cloudify-manager
#########
# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
#
import collections
from flask import request
from flask_restful.reqparse import Argument
from flask_restful_swagger import swagger
from manager_rest import manager_exceptions
from manager_rest.resource_manager import ResourceManager
from manager_rest.rest.rest_decorators import marshal_with
from manager_rest.rest.rest_utils import (
get_args_and_verify_arguments,
get_json_and_verify_params,
)
from manager_rest.security import SecuredResource
from manager_rest.security.authorization import authorize
from manager_rest.storage import (
get_storage_manager,
models,
get_node
)
class Nodes(SecuredResource):
@swagger.operation(
responseClass='List[{0}]'.format(models.Node.__name__),
nickname="listNodes",
notes="Returns nodes list according to the provided query parameters.",
parameters=[{'name': 'deployment_id',
'description': 'Deployment id',
'required': False,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'query'}]
)
@authorize('node_list')
@marshal_with(models.Node)
def get(self, _include=None, **kwargs):
"""
List nodes
"""
args = get_args_and_verify_arguments(
[Argument('deployment_id', required=False),
Argument('node_id', required=False)]
)
deployment_id = args.get('deployment_id')
node_id = args.get('node_id')
if deployment_id and node_id:
try:
nodes = [get_node(deployment_id, node_id)]
except manager_exceptions.NotFoundError:
nodes = []
else:
deployment_id_filter = ResourceManager.create_filters_dict(
deployment_id=deployment_id)
nodes = get_storage_manager().list(
models.Node,
filters=deployment_id_filter,
include=_include
).items
return nodes
class NodeInstances(SecuredResource):
@swagger.operation(
responseClass='List[{0}]'.format(models.NodeInstance.__name__),
nickname="listNodeInstances",
notes="Returns node instances list according to the provided query"
" parameters.",
parameters=[{'name': 'deployment_id',
'description': 'Deployment id',
'required': False,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'query'},
{'name': 'node_name',
'description': 'node name',
'required': False,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'query'}]
)
@authorize('node_instance_list')
@marshal_with(models.NodeInstance)
def get(self, _include=None, **kwargs):
"""
List node instances
"""
args = get_args_and_verify_arguments(
[Argument('deployment_id', required=False),
Argument('node_name', required=False)]
)
deployment_id = args.get('deployment_id')
node_id = args.get('node_name')
params_filter = ResourceManager.create_filters_dict(
deployment_id=deployment_id, node_id=node_id)
return get_storage_manager().list(
models.NodeInstance,
filters=params_filter,
include=_include
).items
class NodeInstancesId(SecuredResource):
@swagger.operation(
responseClass=models.Node,
nickname="getNodeInstance",
notes="Returns node state/runtime properties "
"according to the provided query parameters.",
parameters=[{'name': 'node_id',
'description': 'Node Id',
'required': True,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'path'},
{'name': 'state_and_runtime_properties',
'description': 'Specifies whether to return state and '
'runtime properties',
'required': False,
'allowMultiple': False,
'dataType': 'boolean',
'defaultValue': True,
'paramType': 'query'}]
)
@authorize('node_instance_get')
@marshal_with(models.NodeInstance)
def get(self, node_instance_id, _include=None, **kwargs):
"""
Get node instance by id
"""
return get_storage_manager().get(
models.NodeInstance,
node_instance_id,
include=_include
)
@swagger.operation(
responseClass=models.NodeInstance,
nickname="patchNodeState",
notes="Update node instance. Expecting the request body to "
"be a dictionary containing 'version' which is used for "
"optimistic locking during the update, and optionally "
"'runtime_properties' (dictionary) and/or 'state' (string) "
"properties",
parameters=[{'name': 'node_instance_id',
'description': 'Node instance identifier',
'required': True,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'path'},
{'name': 'version',
'description': 'used for optimistic locking during '
'update',
'required': True,
'allowMultiple': False,
'dataType': 'int',
'paramType': 'body'},
{'name': 'runtime_properties',
'description': 'a dictionary of runtime properties. If '
'omitted, the runtime properties wont be '
'updated',
'required': False,
'allowMultiple': False,
'dataType': 'dict',
'paramType': 'body'},
{'name': 'state',
'description': "the new node's state. If omitted, "
"the state wont be updated",
'required': False,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'body'}],
consumes=["application/json"]
)
@authorize('node_instance_update')
@marshal_with(models.NodeInstance)
def patch(self, node_instance_id, **kwargs):
"""Update node instance by id."""
request_dict = get_json_and_verify_params(
{'version': {'type': int}}
)
if not isinstance(request.json, collections.Mapping):
raise manager_exceptions.BadParametersError(
'Request body is expected to be a map containing a "version" '
'field and optionally "runtimeProperties" and/or "state" '
'fields')
# Added for backwards compatibility with older client versions that
# had version=0 by default
version = request_dict['version'] or 1
instance = get_storage_manager().get(
models.NodeInstance,
node_instance_id,
locking=True
)
if instance.version > version:
raise manager_exceptions.ConflictError(
'Node instance update conflict [current version={0}, '
'update version={1}]'.format(instance.version, version)
)
# Only update if new values were included in the request
instance.runtime_properties = request_dict.get(
'runtime_properties',
instance.runtime_properties
)
instance.state = request_dict.get('state', instance.state)
return get_storage_manager().update(instance)
|
#########
# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
#
import collections
from flask import request
from flask_restful.reqparse import Argument
from flask_restful_swagger import swagger
from manager_rest import manager_exceptions
from manager_rest.resource_manager import ResourceManager
from manager_rest.rest.rest_decorators import marshal_with
from manager_rest.rest.rest_utils import (
get_args_and_verify_arguments,
get_json_and_verify_params,
)
from manager_rest.security import SecuredResource
from manager_rest.security.authorization import authorize
from manager_rest.storage import (
get_storage_manager,
models,
get_node
)
class Nodes(SecuredResource):
@swagger.operation(
responseClass='List[{0}]'.format(models.Node.__name__),
nickname="listNodes",
notes="Returns nodes list according to the provided query parameters.",
parameters=[{'name': 'deployment_id',
'description': 'Deployment id',
'required': False,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'query'}]
)
@authorize('node_list')
@marshal_with(models.Node)
def get(self, _include=None, **kwargs):
"""
List nodes
"""
args = get_args_and_verify_arguments(
[Argument('deployment_id', required=False),
Argument('node_id', required=False)]
)
deployment_id = args.get('deployment_id')
node_id = args.get('node_id')
if deployment_id and node_id:
try:
nodes = [get_node(deployment_id, node_id)]
except manager_exceptions.NotFoundError:
nodes = []
else:
deployment_id_filter = ResourceManager.create_filters_dict(
deployment_id=deployment_id)
nodes = get_storage_manager().list(
models.Node,
filters=deployment_id_filter,
include=_include
).items
return nodes
class NodeInstances(SecuredResource):
@swagger.operation(
responseClass='List[{0}]'.format(models.NodeInstance.__name__),
nickname="listNodeInstances",
notes="Returns node instances list according to the provided query"
" parameters.",
parameters=[{'name': 'deployment_id',
'description': 'Deployment id',
'required': False,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'query'},
{'name': 'node_name',
'description': 'node name',
'required': False,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'query'}]
)
@authorize('node_instance_list')
@marshal_with(models.NodeInstance)
def get(self, _include=None, **kwargs):
"""
List node instances
"""
args = get_args_and_verify_arguments(
[Argument('deployment_id', required=False),
Argument('node_name', required=False)]
)
deployment_id = args.get('deployment_id')
node_id = args.get('node_name')
params_filter = ResourceManager.create_filters_dict(
deployment_id=deployment_id, node_id=node_id)
return get_storage_manager().list(
models.NodeInstance,
filters=params_filter,
include=_include
).items
class NodeInstancesId(SecuredResource):
@swagger.operation(
responseClass=models.Node,
nickname="getNodeInstance",
notes="Returns node state/runtime properties "
"according to the provided query parameters.",
parameters=[{'name': 'node_id',
'description': 'Node Id',
'required': True,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'path'},
{'name': 'state_and_runtime_properties',
'description': 'Specifies whether to return state and '
'runtime properties',
'required': False,
'allowMultiple': False,
'dataType': 'boolean',
'defaultValue': True,
'paramType': 'query'}]
)
@authorize('node_instance_get')
@marshal_with(models.NodeInstance)
def get(self, node_instance_id, _include=None, **kwargs):
"""
Get node instance by id
"""
return get_storage_manager().get(
models.NodeInstance,
node_instance_id,
include=_include
)
@swagger.operation(
responseClass=models.NodeInstance,
nickname="patchNodeState",
notes="Update node instance. Expecting the request body to "
"be a dictionary containing 'version' which is used for "
"optimistic locking during the update, and optionally "
"'runtime_properties' (dictionary) and/or 'state' (string) "
"properties",
parameters=[{'name': 'node_instance_id',
'description': 'Node instance identifier',
'required': True,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'path'},
{'name': 'version',
'description': 'used for optimistic locking during '
'update',
'required': True,
'allowMultiple': False,
'dataType': 'int',
'paramType': 'body'},
{'name': 'runtime_properties',
'description': 'a dictionary of runtime properties. If '
'omitted, the runtime properties wont be '
'updated',
'required': False,
'allowMultiple': False,
'dataType': 'dict',
'paramType': 'body'},
{'name': 'state',
'description': "the new node's state. If omitted, "
"the state wont be updated",
'required': False,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'body'}],
consumes=["application/json"]
)
@authorize('node_instance_update')
@marshal_with(models.NodeInstance)
def patch(self, node_instance_id, **kwargs):
"""Update node instance by id."""
request_dict = get_json_and_verify_params(
{'version': {'type': int}}
)
if not isinstance(request.json, collections.Mapping):
raise manager_exceptions.BadParametersError(
'Request body is expected to be a map containing a "version" '
'field and optionally "runtimeProperties" and/or "state" '
'fields')
# Added for backwards compatibility with older client versions that
# had version=0 by default
version = request_dict['version'] or 1
instance = get_storage_manager().get(
models.NodeInstance,
node_instance_id,
locking=True
)
if instance.version > version:
raise manager_exceptions.ConflictError(
'Node instance update conflict [current version={0}, '
'update version={1}]'.format(instance.version, version)
)
# Only update if new values were included in the request
instance.runtime_properties = request_dict.get(
'runtime_properties',
instance.runtime_properties
)
instance.state = request_dict.get('state', instance.state)
return get_storage_manager().update(instance)
|
en
| 0.881125
|
######### # Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. # List nodes List node instances Get node instance by id Update node instance by id. # Added for backwards compatibility with older client versions that # had version=0 by default # Only update if new values were included in the request
| 1.86395
| 2
|
codebase/data/utils.py
|
ShihengDuan/multiple_forcing
| 1
|
6626362
|
<filename>codebase/data/utils.py
"""
This file is part of the accompanying code to our manuscript:
<NAME>., <NAME>., <NAME>., and <NAME>.: A note on leveraging synergy in multiple meteorological
datasets with deep learning for rainfall-runoff modeling, Hydrol. Earth Syst. Sci. Discuss.,
https://doi.org/10.5194/hess-2020-221, in review, 2020.
You should have received a copy of the Apache-2.0 license along with the code. If not,
see <https://opensource.org/licenses/Apache-2.0>
"""
from pathlib import Path, PosixPath
from typing import List, Tuple
import numpy as np
import pandas as pd
from numba import njit
def load_camels_attributes(data_dir: PosixPath, basins: List = []) -> pd.DataFrame:
attributes_path = Path(data_dir) / 'camels_attributes_v2.0'
if not attributes_path.exists():
raise RuntimeError(f"Attribute folder not found at {attributes_path}")
txt_files = attributes_path.glob('camels_*.txt')
# Read-in attributes into one big dataframe
dfs = []
for txt_file in txt_files:
df_temp = pd.read_csv(txt_file, sep=';', header=0, dtype={'gauge_id': str})
df_temp = df_temp.set_index('gauge_id')
dfs.append(df_temp)
df = pd.concat(dfs, axis=1)
# convert huc column to double digit strings
df['huc'] = df['huc_02'].apply(lambda x: str(x).zfill(2))
df = df.drop('huc_02', axis=1)
if basins:
# drop rows of basins not contained in the passed list
drop_basins = [b for b in df.index if b not in basins]
df = df.drop(drop_basins, axis=0)
return df
@njit
def reshape_data(x_d: np.ndarray, y: np.ndarray, seq_length: int,
x_s: np.ndarray = None) -> Tuple[np.ndarray, np.ndarray, np.ndarray,]:
num_samples, num_features = x_d.shape
num_targets = y.shape[-1]
x_d_new = np.zeros((num_samples - seq_length + 1, seq_length, num_features))
y_new = np.zeros((num_samples - seq_length + 1, seq_length, num_targets))
if x_s is not None:
x_s_new = np.zeros((num_samples - seq_length + 1, x_s.shape[-1]))
else:
x_s_new = None
for i in range(0, x_d_new.shape[0]):
x_d_new[i, :, :] = x_d[i:i + seq_length, :]
y_new[i, :, :] = y[i:i + seq_length, :]
if x_s is not None:
x_s_new[i, :] = x_s[i + seq_length - 1, :]
return x_d_new, x_s_new, y_new
def load_forcings(data_dir: PosixPath, basin: str, forcings: str) -> Tuple[pd.DataFrame, int]:
forcing_path = data_dir / 'basin_mean_forcing' / forcings
if not forcing_path.is_dir():
raise OSError(f"{forcing_path} does not exist")
files = list(forcing_path.glob('**/*_forcing_leap.txt'))
file_path = [f for f in files if f.name[:8] == basin]
if file_path:
file_path = file_path[0]
else:
raise FileNotFoundError(f'No file for Basin {basin} at {file_path}')
df = pd.read_csv(file_path, sep='\s+', header=3)
dates = (df.Year.map(str) + "/" + df.Mnth.map(str) + "/" + df.Day.map(str))
df.index = pd.to_datetime(dates, format="%Y/%m/%d")
# load area from header
with open(file_path, 'r') as fp:
content = fp.readlines()
area = int(content[2])
return df, area
def load_discharge(data_dir: PosixPath, basin: str, area: int) -> pd.Series:
discharge_path = data_dir / 'usgs_streamflow'
files = list(discharge_path.glob('**/*_streamflow_qc.txt'))
file_path = [f for f in files if f.name[:8] == basin]
if file_path:
file_path = file_path[0]
else:
raise FileNotFoundError(f'No file for Basin {basin} at {file_path}')
col_names = ['basin', 'Year', 'Mnth', 'Day', 'QObs', 'flag']
df = pd.read_csv(file_path, sep='\s+', header=None, names=col_names)
dates = (df.Year.map(str) + "/" + df.Mnth.map(str) + "/" + df.Day.map(str))
df.index = pd.to_datetime(dates, format="%Y/%m/%d")
# normalize discharge from cubic feed per second to mm per day
df.QObs = 28316846.592 * df.QObs * 86400 / (area * 10**6)
return df.QObs
def get_camels_scaler(data_dir: PosixPath, basins: List, attributes: List):
df = load_camels_attributes(data_dir=data_dir, basins=basins)
drop_cols = [c for c in df.columns if c not in attributes]
df = df.drop(drop_cols, axis=1)
return df.mean(), df.std()
def load_basin_file(basin_file: PosixPath) -> List:
with basin_file.open('r') as fp:
basins = fp.readlines()
basins = [basin.strip() for basin in basins]
return basins
def attributes_sanity_check(data_dir: PosixPath, dataset: str, basins: list, attribute_list: list):
if dataset == "camels_us":
df = load_camels_attributes(data_dir, basins)
drop_cols = [c for c in df.columns if c not in attribute_list]
df = df.drop(drop_cols, axis=1)
attributes = []
if any(df.std() == 0.0) or any(df.std().isnull()):
for k, v in df.std().iteritems():
if (v == 0) or (np.isnan(v)):
attributes.append(k)
if attributes:
msg = [
"The following attributes have a std of zero or NaN, which results in NaN's ",
"when normalizing the features. Remove the attributes from the attribute feature list ",
"and restart the run. \n", f"Attributes: {attributes}"
]
raise RuntimeError("".join(msg))
|
<filename>codebase/data/utils.py
"""
This file is part of the accompanying code to our manuscript:
<NAME>., <NAME>., <NAME>., and <NAME>.: A note on leveraging synergy in multiple meteorological
datasets with deep learning for rainfall-runoff modeling, Hydrol. Earth Syst. Sci. Discuss.,
https://doi.org/10.5194/hess-2020-221, in review, 2020.
You should have received a copy of the Apache-2.0 license along with the code. If not,
see <https://opensource.org/licenses/Apache-2.0>
"""
from pathlib import Path, PosixPath
from typing import List, Tuple
import numpy as np
import pandas as pd
from numba import njit
def load_camels_attributes(data_dir: PosixPath, basins: List = []) -> pd.DataFrame:
attributes_path = Path(data_dir) / 'camels_attributes_v2.0'
if not attributes_path.exists():
raise RuntimeError(f"Attribute folder not found at {attributes_path}")
txt_files = attributes_path.glob('camels_*.txt')
# Read-in attributes into one big dataframe
dfs = []
for txt_file in txt_files:
df_temp = pd.read_csv(txt_file, sep=';', header=0, dtype={'gauge_id': str})
df_temp = df_temp.set_index('gauge_id')
dfs.append(df_temp)
df = pd.concat(dfs, axis=1)
# convert huc column to double digit strings
df['huc'] = df['huc_02'].apply(lambda x: str(x).zfill(2))
df = df.drop('huc_02', axis=1)
if basins:
# drop rows of basins not contained in the passed list
drop_basins = [b for b in df.index if b not in basins]
df = df.drop(drop_basins, axis=0)
return df
@njit
def reshape_data(x_d: np.ndarray, y: np.ndarray, seq_length: int,
x_s: np.ndarray = None) -> Tuple[np.ndarray, np.ndarray, np.ndarray,]:
num_samples, num_features = x_d.shape
num_targets = y.shape[-1]
x_d_new = np.zeros((num_samples - seq_length + 1, seq_length, num_features))
y_new = np.zeros((num_samples - seq_length + 1, seq_length, num_targets))
if x_s is not None:
x_s_new = np.zeros((num_samples - seq_length + 1, x_s.shape[-1]))
else:
x_s_new = None
for i in range(0, x_d_new.shape[0]):
x_d_new[i, :, :] = x_d[i:i + seq_length, :]
y_new[i, :, :] = y[i:i + seq_length, :]
if x_s is not None:
x_s_new[i, :] = x_s[i + seq_length - 1, :]
return x_d_new, x_s_new, y_new
def load_forcings(data_dir: PosixPath, basin: str, forcings: str) -> Tuple[pd.DataFrame, int]:
forcing_path = data_dir / 'basin_mean_forcing' / forcings
if not forcing_path.is_dir():
raise OSError(f"{forcing_path} does not exist")
files = list(forcing_path.glob('**/*_forcing_leap.txt'))
file_path = [f for f in files if f.name[:8] == basin]
if file_path:
file_path = file_path[0]
else:
raise FileNotFoundError(f'No file for Basin {basin} at {file_path}')
df = pd.read_csv(file_path, sep='\s+', header=3)
dates = (df.Year.map(str) + "/" + df.Mnth.map(str) + "/" + df.Day.map(str))
df.index = pd.to_datetime(dates, format="%Y/%m/%d")
# load area from header
with open(file_path, 'r') as fp:
content = fp.readlines()
area = int(content[2])
return df, area
def load_discharge(data_dir: PosixPath, basin: str, area: int) -> pd.Series:
discharge_path = data_dir / 'usgs_streamflow'
files = list(discharge_path.glob('**/*_streamflow_qc.txt'))
file_path = [f for f in files if f.name[:8] == basin]
if file_path:
file_path = file_path[0]
else:
raise FileNotFoundError(f'No file for Basin {basin} at {file_path}')
col_names = ['basin', 'Year', 'Mnth', 'Day', 'QObs', 'flag']
df = pd.read_csv(file_path, sep='\s+', header=None, names=col_names)
dates = (df.Year.map(str) + "/" + df.Mnth.map(str) + "/" + df.Day.map(str))
df.index = pd.to_datetime(dates, format="%Y/%m/%d")
# normalize discharge from cubic feed per second to mm per day
df.QObs = 28316846.592 * df.QObs * 86400 / (area * 10**6)
return df.QObs
def get_camels_scaler(data_dir: PosixPath, basins: List, attributes: List):
df = load_camels_attributes(data_dir=data_dir, basins=basins)
drop_cols = [c for c in df.columns if c not in attributes]
df = df.drop(drop_cols, axis=1)
return df.mean(), df.std()
def load_basin_file(basin_file: PosixPath) -> List:
with basin_file.open('r') as fp:
basins = fp.readlines()
basins = [basin.strip() for basin in basins]
return basins
def attributes_sanity_check(data_dir: PosixPath, dataset: str, basins: list, attribute_list: list):
if dataset == "camels_us":
df = load_camels_attributes(data_dir, basins)
drop_cols = [c for c in df.columns if c not in attribute_list]
df = df.drop(drop_cols, axis=1)
attributes = []
if any(df.std() == 0.0) or any(df.std().isnull()):
for k, v in df.std().iteritems():
if (v == 0) or (np.isnan(v)):
attributes.append(k)
if attributes:
msg = [
"The following attributes have a std of zero or NaN, which results in NaN's ",
"when normalizing the features. Remove the attributes from the attribute feature list ",
"and restart the run. \n", f"Attributes: {attributes}"
]
raise RuntimeError("".join(msg))
|
en
| 0.800654
|
This file is part of the accompanying code to our manuscript: <NAME>., <NAME>., <NAME>., and <NAME>.: A note on leveraging synergy in multiple meteorological datasets with deep learning for rainfall-runoff modeling, Hydrol. Earth Syst. Sci. Discuss., https://doi.org/10.5194/hess-2020-221, in review, 2020. You should have received a copy of the Apache-2.0 license along with the code. If not, see <https://opensource.org/licenses/Apache-2.0> # Read-in attributes into one big dataframe # convert huc column to double digit strings # drop rows of basins not contained in the passed list # load area from header # normalize discharge from cubic feed per second to mm per day
| 2.270267
| 2
|
src/opendr/perception/object_detection_3d/voxel_object_detection_3d/second_detector/protos/input_reader_pb2.py
|
makistsantekidis/opendr
| 217
|
6626363
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: second/protos/input_reader.proto
import sys
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from opendr.perception.object_detection_3d.voxel_object_detection_3d.second_detector.protos import (
target_pb2 as second_dot_protos_dot_target__pb2,
)
from opendr.perception.object_detection_3d.voxel_object_detection_3d.second_detector.protos import (
preprocess_pb2 as second_dot_protos_dot_preprocess__pb2,
)
from opendr.perception.object_detection_3d.voxel_object_detection_3d.second_detector.protos import (
sampler_pb2 as second_dot_protos_dot_sampler__pb2,
)
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="second/protos/input_reader.proto",
package="second.protos",
syntax="proto3",
serialized_options=None,
serialized_pb=_b(
'\n second/protos/input_reader.proto\x12\rsecond.protos\x1a\x1asecond/proto' +
's/target.proto\x1a\x1esecond/pr' +
'otos/preprocess.proto\x1a\x1bsecond/protos/sampler.proto"\xc7\x07\n\x0bInputR' +
'eader\x12\x18\n\x10record_file_pa' +
'th\x18\x01 \x01(\t\x12\x13\n\x0b\x63lass_names\x18\x02 \x03(\t\x12\x12\n\nbatch' +
'_size\x18\x03 \x01(\r\x12\x16\n\x0emax_n' +
'um_epochs\x18\x04 \x01(\r\x12\x15\n\rprefetch_size\x18\x05 \x01(\r\x12\x1c\n\x14max_n' +
'umber_of_voxels\x18\x06 \x01(\r\x12\x36\n\x0ftarge' +
't_assigner\x18\x07 \x01(\x0b\x32\x1d.second.protos.TargetAssigner\x12\x17\n\x0fkitti' +
'_info_path\x18\x08 \x01(\t\x12\x17\n\x0fkitti_ro' +
'ot_path\x18\t \x01(\t\x12\x16\n\x0eshuffle_points\x18\n \x01(\x08\x12*\n"groundtr' +
'uth_localization_noise_std\x18\x0b \x03(\x02\x12*\n"groundt' +
'ruth_rotation_uniform_noise\x18\x0c \x03(\x02\x12%\n\x1dglobal_rotation_uniform_' +
'noise\x18\r \x03(\x02\x12$\n\x1cglobal_scaling_uniform_n' +
'oise\x18\x0e \x03(\x02\x12\x1f\n\x17remove_unknown_examples\x18\x0f \x01(\x08\x12\x13\n\x0bnu' +
'm_workers\x18\x10 \x01(\r\x12\x1d\n\x15\x61nch' +
'or_area_threshold\x18\x11 \x01(\x02\x12"\n\x1aremove_points_after_sample\x18\x12 \x01(\x08\x12*\n"g' +
'roundtruth_points_drop_percenta' +
'ge\x18\x13 \x01(\x02\x12(\n groundtruth_drop_max_keep_points\x18\x14 \x01(\r\x12\x1a\n\x12remov' +
'e_environment\x18\x15 \x01(\x08\x12\x1a\n\x12u' +
'nlabeled_training\x18\x16 \x01(\x08\x12/\n\'global_random_rotation_range_per_obj' +
'ect\x18\x17 \x03(\x02\x12\x45\n\x13\x64\x61tabase_prep' +
'_steps\x18\x18 \x03(\x0b\x32(.second.protos.DatabasePreproces' +
'singStep\x12\x30\n\x10\x64\x61tabase_sampler\x18\x19 \x01(\x0b\x32\x16.sec' +
'ond.protos.Sampler\x12\x14\n\x0cuse_group_id\x18\x1a \x01(\x08\x12:\n\x1aunla' +
'beled_database_sampler\x18\x1b \x01(\x0b\x32\x16.second.p' +
'rotos.Samplerb\x06proto3'
),
dependencies=[
second_dot_protos_dot_target__pb2.DESCRIPTOR,
second_dot_protos_dot_preprocess__pb2.DESCRIPTOR,
second_dot_protos_dot_sampler__pb2.DESCRIPTOR,
],
)
_INPUTREADER = _descriptor.Descriptor(
name="InputReader",
full_name="second.protos.InputReader",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="record_file_path",
full_name="second.protos.InputReader.record_file_path",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="class_names",
full_name="second.protos.InputReader.class_names",
index=1,
number=2,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="batch_size",
full_name="second.protos.InputReader.batch_size",
index=2,
number=3,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max_num_epochs",
full_name="second.protos.InputReader.max_num_epochs",
index=3,
number=4,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="prefetch_size",
full_name="second.protos.InputReader.prefetch_size",
index=4,
number=5,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max_number_of_voxels",
full_name="second.protos.InputReader.max_number_of_voxels",
index=5,
number=6,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="target_assigner",
full_name="second.protos.InputReader.target_assigner",
index=6,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="kitti_info_path",
full_name="second.protos.InputReader.kitti_info_path",
index=7,
number=8,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="kitti_root_path",
full_name="second.protos.InputReader.kitti_root_path",
index=8,
number=9,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="shuffle_points",
full_name="second.protos.InputReader.shuffle_points",
index=9,
number=10,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="groundtruth_localization_noise_std",
full_name="second.protos.InputReader.groundtruth_localization_noise_std",
index=10,
number=11,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="groundtruth_rotation_uniform_noise",
full_name="second.protos.InputReader.groundtruth_rotation_uniform_noise",
index=11,
number=12,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="global_rotation_uniform_noise",
full_name="second.protos.InputReader.global_rotation_uniform_noise",
index=12,
number=13,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="global_scaling_uniform_noise",
full_name="second.protos.InputReader.global_scaling_uniform_noise",
index=13,
number=14,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="remove_unknown_examples",
full_name="second.protos.InputReader.remove_unknown_examples",
index=14,
number=15,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="num_workers",
full_name="second.protos.InputReader.num_workers",
index=15,
number=16,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="anchor_area_threshold",
full_name="second.protos.InputReader.anchor_area_threshold",
index=16,
number=17,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="remove_points_after_sample",
full_name="second.protos.InputReader.remove_points_after_sample",
index=17,
number=18,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="groundtruth_points_drop_percentage",
full_name="second.protos.InputReader.groundtruth_points_drop_percentage",
index=18,
number=19,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="groundtruth_drop_max_keep_points",
full_name="second.protos.InputReader.groundtruth_drop_max_keep_points",
index=19,
number=20,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="remove_environment",
full_name="second.protos.InputReader.remove_environment",
index=20,
number=21,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="unlabeled_training",
full_name="second.protos.InputReader.unlabeled_training",
index=21,
number=22,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="global_random_rotation_range_per_object",
full_name="second.protos.InputReader.global_random_rotation_range_per_object",
index=22,
number=23,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="database_prep_steps",
full_name="second.protos.InputReader.database_prep_steps",
index=23,
number=24,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="database_sampler",
full_name="second.protos.InputReader.database_sampler",
index=24,
number=25,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="use_group_id",
full_name="second.protos.InputReader.use_group_id",
index=25,
number=26,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="unlabeled_database_sampler",
full_name="second.protos.InputReader.unlabeled_database_sampler",
index=26,
number=27,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=141,
serialized_end=1108,
)
_INPUTREADER.fields_by_name[
"target_assigner"
].message_type = second_dot_protos_dot_target__pb2._TARGETASSIGNER
_INPUTREADER.fields_by_name[
"database_prep_steps"
].message_type = second_dot_protos_dot_preprocess__pb2._DATABASEPREPROCESSINGSTEP
_INPUTREADER.fields_by_name[
"database_sampler"
].message_type = second_dot_protos_dot_sampler__pb2._SAMPLER
_INPUTREADER.fields_by_name[
"unlabeled_database_sampler"
].message_type = second_dot_protos_dot_sampler__pb2._SAMPLER
DESCRIPTOR.message_types_by_name["InputReader"] = _INPUTREADER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
InputReader = _reflection.GeneratedProtocolMessageType(
"InputReader",
(_message.Message,),
dict(
DESCRIPTOR=_INPUTREADER,
__module__="second.protos.input_reader_pb2"
# @@protoc_insertion_point(class_scope:second.protos.InputReader)
),
)
_sym_db.RegisterMessage(InputReader)
# @@protoc_insertion_point(module_scope)
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: second/protos/input_reader.proto
import sys
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from opendr.perception.object_detection_3d.voxel_object_detection_3d.second_detector.protos import (
target_pb2 as second_dot_protos_dot_target__pb2,
)
from opendr.perception.object_detection_3d.voxel_object_detection_3d.second_detector.protos import (
preprocess_pb2 as second_dot_protos_dot_preprocess__pb2,
)
from opendr.perception.object_detection_3d.voxel_object_detection_3d.second_detector.protos import (
sampler_pb2 as second_dot_protos_dot_sampler__pb2,
)
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="second/protos/input_reader.proto",
package="second.protos",
syntax="proto3",
serialized_options=None,
serialized_pb=_b(
'\n second/protos/input_reader.proto\x12\rsecond.protos\x1a\x1asecond/proto' +
's/target.proto\x1a\x1esecond/pr' +
'otos/preprocess.proto\x1a\x1bsecond/protos/sampler.proto"\xc7\x07\n\x0bInputR' +
'eader\x12\x18\n\x10record_file_pa' +
'th\x18\x01 \x01(\t\x12\x13\n\x0b\x63lass_names\x18\x02 \x03(\t\x12\x12\n\nbatch' +
'_size\x18\x03 \x01(\r\x12\x16\n\x0emax_n' +
'um_epochs\x18\x04 \x01(\r\x12\x15\n\rprefetch_size\x18\x05 \x01(\r\x12\x1c\n\x14max_n' +
'umber_of_voxels\x18\x06 \x01(\r\x12\x36\n\x0ftarge' +
't_assigner\x18\x07 \x01(\x0b\x32\x1d.second.protos.TargetAssigner\x12\x17\n\x0fkitti' +
'_info_path\x18\x08 \x01(\t\x12\x17\n\x0fkitti_ro' +
'ot_path\x18\t \x01(\t\x12\x16\n\x0eshuffle_points\x18\n \x01(\x08\x12*\n"groundtr' +
'uth_localization_noise_std\x18\x0b \x03(\x02\x12*\n"groundt' +
'ruth_rotation_uniform_noise\x18\x0c \x03(\x02\x12%\n\x1dglobal_rotation_uniform_' +
'noise\x18\r \x03(\x02\x12$\n\x1cglobal_scaling_uniform_n' +
'oise\x18\x0e \x03(\x02\x12\x1f\n\x17remove_unknown_examples\x18\x0f \x01(\x08\x12\x13\n\x0bnu' +
'm_workers\x18\x10 \x01(\r\x12\x1d\n\x15\x61nch' +
'or_area_threshold\x18\x11 \x01(\x02\x12"\n\x1aremove_points_after_sample\x18\x12 \x01(\x08\x12*\n"g' +
'roundtruth_points_drop_percenta' +
'ge\x18\x13 \x01(\x02\x12(\n groundtruth_drop_max_keep_points\x18\x14 \x01(\r\x12\x1a\n\x12remov' +
'e_environment\x18\x15 \x01(\x08\x12\x1a\n\x12u' +
'nlabeled_training\x18\x16 \x01(\x08\x12/\n\'global_random_rotation_range_per_obj' +
'ect\x18\x17 \x03(\x02\x12\x45\n\x13\x64\x61tabase_prep' +
'_steps\x18\x18 \x03(\x0b\x32(.second.protos.DatabasePreproces' +
'singStep\x12\x30\n\x10\x64\x61tabase_sampler\x18\x19 \x01(\x0b\x32\x16.sec' +
'ond.protos.Sampler\x12\x14\n\x0cuse_group_id\x18\x1a \x01(\x08\x12:\n\x1aunla' +
'beled_database_sampler\x18\x1b \x01(\x0b\x32\x16.second.p' +
'rotos.Samplerb\x06proto3'
),
dependencies=[
second_dot_protos_dot_target__pb2.DESCRIPTOR,
second_dot_protos_dot_preprocess__pb2.DESCRIPTOR,
second_dot_protos_dot_sampler__pb2.DESCRIPTOR,
],
)
_INPUTREADER = _descriptor.Descriptor(
name="InputReader",
full_name="second.protos.InputReader",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="record_file_path",
full_name="second.protos.InputReader.record_file_path",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="class_names",
full_name="second.protos.InputReader.class_names",
index=1,
number=2,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="batch_size",
full_name="second.protos.InputReader.batch_size",
index=2,
number=3,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max_num_epochs",
full_name="second.protos.InputReader.max_num_epochs",
index=3,
number=4,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="prefetch_size",
full_name="second.protos.InputReader.prefetch_size",
index=4,
number=5,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max_number_of_voxels",
full_name="second.protos.InputReader.max_number_of_voxels",
index=5,
number=6,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="target_assigner",
full_name="second.protos.InputReader.target_assigner",
index=6,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="kitti_info_path",
full_name="second.protos.InputReader.kitti_info_path",
index=7,
number=8,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="kitti_root_path",
full_name="second.protos.InputReader.kitti_root_path",
index=8,
number=9,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="shuffle_points",
full_name="second.protos.InputReader.shuffle_points",
index=9,
number=10,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="groundtruth_localization_noise_std",
full_name="second.protos.InputReader.groundtruth_localization_noise_std",
index=10,
number=11,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="groundtruth_rotation_uniform_noise",
full_name="second.protos.InputReader.groundtruth_rotation_uniform_noise",
index=11,
number=12,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="global_rotation_uniform_noise",
full_name="second.protos.InputReader.global_rotation_uniform_noise",
index=12,
number=13,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="global_scaling_uniform_noise",
full_name="second.protos.InputReader.global_scaling_uniform_noise",
index=13,
number=14,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="remove_unknown_examples",
full_name="second.protos.InputReader.remove_unknown_examples",
index=14,
number=15,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="num_workers",
full_name="second.protos.InputReader.num_workers",
index=15,
number=16,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="anchor_area_threshold",
full_name="second.protos.InputReader.anchor_area_threshold",
index=16,
number=17,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="remove_points_after_sample",
full_name="second.protos.InputReader.remove_points_after_sample",
index=17,
number=18,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="groundtruth_points_drop_percentage",
full_name="second.protos.InputReader.groundtruth_points_drop_percentage",
index=18,
number=19,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="groundtruth_drop_max_keep_points",
full_name="second.protos.InputReader.groundtruth_drop_max_keep_points",
index=19,
number=20,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="remove_environment",
full_name="second.protos.InputReader.remove_environment",
index=20,
number=21,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="unlabeled_training",
full_name="second.protos.InputReader.unlabeled_training",
index=21,
number=22,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="global_random_rotation_range_per_object",
full_name="second.protos.InputReader.global_random_rotation_range_per_object",
index=22,
number=23,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="database_prep_steps",
full_name="second.protos.InputReader.database_prep_steps",
index=23,
number=24,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="database_sampler",
full_name="second.protos.InputReader.database_sampler",
index=24,
number=25,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="use_group_id",
full_name="second.protos.InputReader.use_group_id",
index=25,
number=26,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="unlabeled_database_sampler",
full_name="second.protos.InputReader.unlabeled_database_sampler",
index=26,
number=27,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=141,
serialized_end=1108,
)
_INPUTREADER.fields_by_name[
"target_assigner"
].message_type = second_dot_protos_dot_target__pb2._TARGETASSIGNER
_INPUTREADER.fields_by_name[
"database_prep_steps"
].message_type = second_dot_protos_dot_preprocess__pb2._DATABASEPREPROCESSINGSTEP
_INPUTREADER.fields_by_name[
"database_sampler"
].message_type = second_dot_protos_dot_sampler__pb2._SAMPLER
_INPUTREADER.fields_by_name[
"unlabeled_database_sampler"
].message_type = second_dot_protos_dot_sampler__pb2._SAMPLER
DESCRIPTOR.message_types_by_name["InputReader"] = _INPUTREADER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
InputReader = _reflection.GeneratedProtocolMessageType(
"InputReader",
(_message.Message,),
dict(
DESCRIPTOR=_INPUTREADER,
__module__="second.protos.input_reader_pb2"
# @@protoc_insertion_point(class_scope:second.protos.InputReader)
),
)
_sym_db.RegisterMessage(InputReader)
# @@protoc_insertion_point(module_scope)
|
en
| 0.375345
|
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: second/protos/input_reader.proto # @@protoc_insertion_point(imports) # @@protoc_insertion_point(class_scope:second.protos.InputReader) # @@protoc_insertion_point(module_scope)
| 1.191416
| 1
|
gtk_modules/mouse.py
|
henrikmidtiby/gtk_modules
| 0
|
6626364
|
from gtk_modules import MouseSignals
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk
class Mouse:
def __init__(self, event_box=None):
if event_box is None:
self.event_box = Gtk.EventBox()
else:
self.event_box = event_box
self.signals = MouseSignals()
self.event_box.connect('realize', self._realize)
self.event_box.connect('button-press-event', self.press)
self.event_box.connect('button-release-event', self.release)
self.event_box.connect('motion_notify_event', self.move)
self.size = [0, 0]
def _realize(self, widget):
draw_area = widget.get_child()
draw_area.connect('size-allocate', self._get_size)
def _get_size(self, _, allocation):
self.size = (allocation.width, allocation.height)
def press(self, _, event):
if event.button == 1:
self.signals.emit('left_mouse_press', event.x, event.y, *self.size)
elif event.button == 2:
self.signals.emit('middle_mouse_press', event.x, event.y, *self.size)
elif event.button == 3:
self.signals.emit('right_mouse_press', event.x, event.y, *self.size)
def release(self, _, event):
if event.button == 1:
self.signals.emit('left_mouse_release', event.x, event.y)
elif event.button == 2:
self.signals.emit('middle_mouse_release', event.x, event.y)
elif event.button == 3:
self.signals.emit('right_mouse_release', event.x, event.y)
def move(self, _, event):
if event.state & Gdk.ModifierType.BUTTON1_MASK:
self.signals.emit('left_mouse_move', event.x, event.y)
elif event.state & Gdk.ModifierType.BUTTON2_MASK:
self.signals.emit('middle_mouse_move', event.x, event.y)
elif event.state & Gdk.ModifierType.BUTTON3_MASK:
self.signals.emit('right_mouse_move', event.x, event.y)
|
from gtk_modules import MouseSignals
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk
class Mouse:
def __init__(self, event_box=None):
if event_box is None:
self.event_box = Gtk.EventBox()
else:
self.event_box = event_box
self.signals = MouseSignals()
self.event_box.connect('realize', self._realize)
self.event_box.connect('button-press-event', self.press)
self.event_box.connect('button-release-event', self.release)
self.event_box.connect('motion_notify_event', self.move)
self.size = [0, 0]
def _realize(self, widget):
draw_area = widget.get_child()
draw_area.connect('size-allocate', self._get_size)
def _get_size(self, _, allocation):
self.size = (allocation.width, allocation.height)
def press(self, _, event):
if event.button == 1:
self.signals.emit('left_mouse_press', event.x, event.y, *self.size)
elif event.button == 2:
self.signals.emit('middle_mouse_press', event.x, event.y, *self.size)
elif event.button == 3:
self.signals.emit('right_mouse_press', event.x, event.y, *self.size)
def release(self, _, event):
if event.button == 1:
self.signals.emit('left_mouse_release', event.x, event.y)
elif event.button == 2:
self.signals.emit('middle_mouse_release', event.x, event.y)
elif event.button == 3:
self.signals.emit('right_mouse_release', event.x, event.y)
def move(self, _, event):
if event.state & Gdk.ModifierType.BUTTON1_MASK:
self.signals.emit('left_mouse_move', event.x, event.y)
elif event.state & Gdk.ModifierType.BUTTON2_MASK:
self.signals.emit('middle_mouse_move', event.x, event.y)
elif event.state & Gdk.ModifierType.BUTTON3_MASK:
self.signals.emit('right_mouse_move', event.x, event.y)
|
none
| 1
| 2.494024
| 2
|
|
pylayers/gis/test/algo_search_convex.py
|
usmanwardag/pylayers
| 143
|
6626365
|
#-*- coding:Utf-8 -*-
# from pylayers.gis.layout import *
# from itertools import combinations
# from scipy.spatial import Delaunay
# import shapely.geometry as sh
# L = Layout('WHERE1_2.ini')
# L.build('t')
# # L.dumpr()
# L.showG('s')
# for n in L.Gt.nodes():
# no = L.Gt.node[n]['cycle'].cycle
# nop = L.Gt.node[n]['cycle'].cycle
# tcc, nn = L.Gt.node[n]['polyg'].ptconvex()
# utconvex = np.nonzero(tcc == 1)[0]
# utsconvex = np.nonzero(abs(tcc) == 1)[0]
# if len(utconvex) != 0:
# # get points ID in the cycle
# uu = filter(lambda x: x<0,no)
# uus = filter(lambda x: x<0,no)
# # get point convex ID
# uc = np.array(uu)[utconvex]
# ucs = np.array(uus)[utsconvex]
# puc = array(map(lambda x: L.Gs.pos[x], uc))
# pucs = array(map(lambda x: L.Gs.pos[x], ucs))
# trid=Delaunay(pucs)
# tri =trid.simplices
# # filter tri in the cycle
# kt = []
# pkt = []
# for t in tri:
# ts = sh.Polygon(pucs[t])
# U = ts.intersection(L.Gt.node[n]['polyg'])
# if not U.area < 1e-2:
# #pkt.append(pucs[t])
# kt.append(t)
# # # ptt = puc[tt]
# plt.triplot(pucs[:,0],pucs[:,1], np.array(kt))
# for n in L.Gt.nodes():
# if n > 0:
# no = L.Gt.node[n]['cycle'].cycle
# tcc, nn = L.Gt.node[n]['polyg'].ptconvex()
# utconvex = np.nonzero(tcc == 1)[0]
# utsconvex = np.nonzero(abs(tcc) == 1)[0]
# if len(utconvex) != 0:
# # get points ID in the cycle
# uu = filter(lambda x: x<0,no)
# uus = filter(lambda x: x<0,no)
# # get point convex ID
# # uc = np.array(uu)[utconvex]
# ucs = np.array(uus)[utsconvex]
# puc = array(map(lambda x: L.Gs.pos[x], uc))
# pucs = array(map(lambda x: L.Gs.pos[x], ucs))
# if len(ucs) >2:
# trid=Delaunay(pucs)
# tri =trid.simplices
# # filter tri in the cycle
# kt = []
# pkt = []
# for t in tri:
# ts = sh.Polygon(pucs[t])
# U = L.Gt.node[n]['polyg'].contains(ts)
# if U:
# #pkt.append(pucs[t])
# kt.append(t)
# # # ptt = puc[tt]
# try:
# plt.triplot(pucs[:,0],pucs[:,1], np.array(kt))
# except:
# pass
from pylayers.gis.layout import *
from itertools import combinations
from scipy.spatial import Delaunay
import shapely.geometry as sh
Lfile = 'scattering_nonconvex.ini'
data = '/home/niamiot/Documents/code/pylayers/data/struc/ini/'+Lfile
proj = '/home/niamiot/Documents/Pylayers_project/P1/struc/ini/'+Lfile
shutil.copyfile(data,proj)
L = Layout(Lfile,force=True)
#L.dumpr()
L.build('t')
fig,ax=L.showG('s',labels=True)
def polyplot(poly,fig=[]):
if fig == []:
fig=plt.figure()
fig,ax=L.showG('s',fig=fig)
color=['r','b','g']*10
for ip, p in enumerate(poly):
fig,ax = p.plot(fig=fig,ax=ax,color=color[ip],alpha =0.5)
# lacy : list of added cycles
lacy =[]
for n in L.Gt.nodes():
#if indoor cycles
if n > 0:
ncy=max(L.Gt.nodes())
####
#### 1 Determine if pt convex in cycle
####
if L.Gt.node[n]['indoor']:
no = L.Gt.node[n]['cycle'].cycle
tcc, nn = L.Gt.node[n]['polyg'].ptconvex()
# diffracting points
utconvex = np.nonzero(tcc == 1)[0]
#all possible diffracting point (in and out of cycle)
utsconvex = np.nonzero(abs(tcc) == 1)[0]
if len(utconvex) != 0:
# get points ID in the cycle
uus = filter(lambda x: x<0,no)
# get point convex ID
uc = np.array(uus)[utconvex]
ucs = np.array(uus)[utsconvex]
pucs = array(map(lambda x: L.Gs.pos[x], ucs))
pucs = np.vstack((pucs,pucs[-1]))
####
#### 2 perform a Delaunay Partioning
####
if len(ucs) >2:
trid=Delaunay(pucs)
tri =trid.simplices
aucs = np.arange(len(ucs))
# filter tri in the cycle
kt = []
pkt = []
polys = []
naw = []
for t in tri:
ts = geu.Polygon(pucs[t])
# check if the new polygon is contained into
#the original polygon (non guaratee by Delaunay)
#U = L.Gt.node[n]['polyg'].contains(ts)
U = L.Gt.node[n]['polyg'].intersection(ts)
if not isinstance(U,sh.MultiPolygon):
U=[U]
for p in U:
if L.Gt.node[n]['polyg'].contains(p):
cp = geu.Polygon(p)
cp.setvnodes(L)
uaw = np.where(cp.vnodes == 0)[0]
lvn = len(cp.vnodes)
for i in uaw:
#keep trace of created airwalls, because some
#of them will be destroyed in step 3.
naw.append(L.add_segment(
cp.vnodes[np.mod(i-1,lvn)],
cp.vnodes[np.mod(i+1,lvn)]
,name='AIR'))
polys.append(cp)
import ipdb
ipdb.set_trace()
#
# 3. merge delaunay triangulation in order to obtain
# the larger convex polygons partioning
#
cpolys = []
nbpolys = len(polys)
while polys !=[]:
p = polys.pop(0)
for ip2,p2 in enumerate(polys):
conv=False
inter = p.intersection(p2)
#if 2 triangles have a common segment
pold = p
if isinstance(inter,sh.LineString):
p = p + p2
if p.isconvex():
if p.area < 1e-1:
import ipdb
ipdb.set_trace()
polys.pop(ip2)
polys.insert(0,p)
conv=True
break
else:
# if pold not in cpolys:
# cpolys.append(pold)
p = pold
# if (ip2 >= len(polys)):# and (conv):
# if conv :
# if p not in cpolys:
# cpolys.append(p)
if not conv:#else:
if pold not in cpolys:
cpolys.append(pold)
if len(polys) == 0:
cpolys.append(p)
# polyplot(cpolys,fig=plt.gcf())
# plt.draw()
# import ipdb
# ipdb.set_trace()
####
#### 4. ensure the correct vnode numerotaion of the polygons
#### and remove unecessary airwalls
# ncpol : new created polygons
ncpol = []
vnodes=[]
for p in cpolys:
interpoly = L.Gt.node[n]['polyg'].intersection(p)
if isinstance(interpoly,sh.MultiPolygon):
raise AttributeError('multi polygon encountered')
else :
ptmp = geu.Polygon(interpoly)
ptmp.setvnodes(L)
ncpol.append(ptmp)
vnodes.extend(ptmp.vnodes)
#air walls to be deleted (because origin Delaunay triangle
# has been merged )
daw = filter(lambda x: x not in vnodes,naw)
[L.del_segment(d,verbose=False) for d in daw]
nbpolys=len(ncpol)
#remove old cycle
L.Gt.remove_node(n)
# lcyid: (new) list of cycle id
lcyid = [n] + range(ncy+1,ncy+(nbpolys))
lacy.extend(lcyid)
for ip,p in enumerate(ncpol):
#p.coorddeter()
cyid = lcyid[ip]
# replace by new ones
lnode = p.vnodes
G = nx.subgraph(L.Gs,lnode)
G.pos = {}
G.pos.update({l: L.Gs.pos[l] for l in lnode})
cy = cycl.Cycle(G,lnode=p.vnodes)
L.Gt.add_node(cyid,cycle=cy)
# WARNING
# recreate polygon is mandatory otherwise cycle.cycle and polygon.vnodes
#are shifted.
L.Gt.node[cyid]['polyg'] = p#geu.Polygon(p.xy,cy.cycle)
L.Gt.node[cyid]['indoor']=True
L.Gt.node[cyid]['isopen']=True
L.Gt.pos[cyid] = tuple(cy.g)
Gtnodes= filter(lambda x: x>0,L.Gt.nodes())
for k in combinations(Gtnodes, 2):
vnodes0 = np.array(L.Gt.node[k[0]]['cycle'].cycle)
vnodes1 = np.array(L.Gt.node[k[1]]['cycle'].cycle)
#
# Connect Cycles if they share at least one segments
#
intersection_vnodes = np.intersect1d(vnodes0, vnodes1)
if len(intersection_vnodes) > 1:
segment = intersection_vnodes[np.where(intersection_vnodes>0)]
L.Gt.add_edge(k[0], k[1],segment= segment)
#update self.Gs.node[x]['ncycles']
L._updGsncy()
#add outside cycle to Gs.node[x]['ncycles']
L._addoutcy()
#update interaction list into Gt.nodes (cycles)
L._interlist(nodelist=lacy)
# polyplot(ncpol)
# for n in L.Gt.nodes():
# if n > 0:
# no = L.Gt.node[n]['cycle'].cycle
# tcc, nn = L.Gt.node[n]['polyg'].ptconvex()
# # diffracting points
# utconvex = np.nonzero(tcc == 1)[0]
# #all possible diffracting point (in and out of cycle)
# utsconvex = np.nonzero(abs(tcc) == 1)[0]
# if len(utconvex) != 0:
# # get points ID in the cycle
# uus = filter(lambda x: x<0,no)
# # get point convex ID
# uc = np.array(uus)[utconvex]
# ucs = np.array(uus)[utsconvex]
# pucs = array(map(lambda x: L.Gs.pos[x], ucs))
# pucs = np.vstack((pucs,pucs[-1]))
# if len(ucs) >2:
# trid=Delaunay(pucs)
# tri =trid.simplices
# utri = ucs[tri]
# # filter tri in the cycle
# kt = []
# pkt = []
# polys = []
# for t in tri:
# ts = geu.Polygon(pucs[t])
# #check if inside the original polygon
# # U = L.Gt.node[n]['polyg'].contains(ts)
# U = L.Gt.node[n]['polyg'].intersection(ts)
# ats = ts.area
# # fig,ax=ts.plot(fig=fig,ax=ax)
# if U.area > (1*ats/100):
# #pkt.append(pucs[t])
# kt.append(t)
# polys.append(ts)
# polyplot(polys)
# # # ptt = puc[tt]
# # try:
# # plt.triplot(pucs[:,0],pucs[:,1], np.array(kt))
# # except:
# # pass
# kt = array(kt)
# npttri = np.arange(0,np.max(kt))
# # search for each triangle, which is connecte
# conecttri = [np.where(kt == i) for i in npttri]
# cpolys = []
# nbpolys = len(polys)
# while polys !=[]:
# p = polys.pop(0)
# for ip2,p2 in enumerate(polys):
# conv=False
# inter = p.intersection(p2)
# #if 2 triangles have a common segment
# pold = p
# if isinstance(inter,sh.LineString):
# p = p + p2
# if p.isconvex():
# polys.pop(ip2)
# polys.insert(0,p)
# conv=True
# break
# elif len(cpolys) != 0:
# if pold != cpolys[-1]:
# cpolys.append(pold)
# p = pold
# else :
# cpolys.append(pold)
# p = pold
# # if (ip2 >= len(polys)):# and (conv):
# if conv :
# cpolys.append(p)
# else:
# cpolys.append(pold)
# if len(polys) == 0:
# cpolys.append(p)
# # polyplot(polys)
# # import ipdb
# # ipdb.set_trace()
# polyplot(cpolys)
###################################################
#################################################
####################################################
###################################################
#################################################
####################################################
###################################################
#################################################
####################################################
# for n in range(nbpolys):
# p = polys.pop(-1)
# ip = iter(polys)
# for p2 in ip:
# inter = p.intersection(p2)
# if isinstance(inter,sh.LineString):
# import ipdb
# ipdb.set_trace()
# try:
# mpold = mp
# if mp.touches(p):
# mp = mp + p
# if mp.isconvex():
# mpold = mp
# else :
# cpolys.append(mpold)
# del mp
# else
# except:
# mp = p
################
#############""
# for n in L.Gt.nodes():
# no = L.Gt.node[n]['cycle'].cycle
# nop = L.Gt.node[n]['cycle'].cycle
# tcc, nn = L.Gt.node[n]['polyg'].ptconvex()
# utconvex = np.nonzero(tcc == 1)[0]
# if len(utconvex) != 0:
# # get points ID in the cycle
# ii = filter(lambda x: x<0,no)
# # get point convex ID
# ic = np.array(ii)[utconvex]
# pic = array(map(lambda x: L.Gs.pos[x], ic))
# luc = [nqp.where(ic[x]==no)[0][0] for x in range(len(ic))]
# # to close the cycle
# luc.append(luc[0])
# # distance between each uc
# duc = np.roll(np.mod(np.diff(luc),len(no)),1)
# rnp.mod(np.diff(luc[::-1]),len(no))
# lenic = len(ic)
# ptbl=[]
# for u in range(lenic-1,-1,-1):
# # find which convex point is the closest but not directly connected
# if duc[u-1] == duc[np.mod(u+1,lenic)]:
# import ipdb
# ipdb.set_trace()
# if (duc[u-1] < duc[np.mod(u+1,lenic)]) and duc[u-1] > 2:
# #node to be linked
# tbl = no[luc[np.mod(u+1,lenic)]]
# else:
# tbl = no[luc[u-1]]
# #node to be linked
# ptbl.append(L.Gs.pos[tbl])
# X=np.array(ptbl)
# plu.displot(X.T,pic.T)
# ################
# #############""
# for n in L.Gt.nodes():
# if n != 0:
# no = L.Gt.node[n]['cycle'].cycle
# tcc, nn = L.Gt.node[n]['polyg'].ptconvex()
# utconvex = np.nonzero(tcc == 1)[0]
# if len(utconvex) != 0:
# # get points ID in the cycle
# ii = filter(lambda x: x<0,no)
# # get point convex ID
# ic = np.array(ii)[utconvex]
# pic = array(map(lambda x: L.Gs.pos[x], ic))
# luc = [np.where(ic[x]==no)[0][0] for x in range(len(ic))]
# lenuc = len(luc)
# # to close the cycle
# luc.append(luc[0])
# duc = np.roll(np.mod(np.diff(luc),len(no)),1)
# # distance between each uc
# ptbl=[]
# for u in range(len(duc)):
# um = np.mod(u-1,lenuc)
# up = np.mod(u+1,lenuc)
# print no[luc[u]],no[luc[um]]
# print no[luc[u]],no[luc[up]]
# if (duc[u] < duc[up]) and (duc[u] >2):
# print 'choose',no[luc[u]],no[luc[um]]
# tbl = no[luc[um]]
# ptbl.append([pic[u],pic[um]])
# elif duc[up] >2:
# print 'choose',no[luc[u]],no[luc[up]]
# tbl = no[luc[up]]
# ptbl.append([pic[u],pic[up]])
# # import ipdb
# # ipdb.set_trace()
# X=np.array(ptbl)
# plu.displot(X[:,0].T,X[:,1].T)
# import ipdb
# ipdb.set_trace()
# import ipdb
# ipdb.set_trace()
# for n in L.Gt.nodes():
# no = L.Gt.node[n]['cycle'].cycle
# lno = len(no)
# nop = L.Gt.node[n]['cycle'].cycqle
# tcc, nn = L.Gt.node[n]['polyg'].ptconvex()
# utconvex = np.nonzero(tcc == 1)[0]
# if len(utconvex) != 0:
# # get points ID in the cycle
# uu = filter(lambda x: x<0,no)
# # get point convex ID (utconvex*2 because point follow segment in
# # cycles. and utconvex only concern points)
# uc = no[utconvex*2]
# pc = array(map(lambda x: L.Gs.pos[x], uc))
# # id of adjacent segemnts 1
# ucm = no[np.mod((utconvex*2)-1,lno)]
# pcm = array(map(lambda x: L.Gs.pos[x], ucm))
# # id of adjacent segemnts 2
# ucp = no[np.mod((utconvex*2)+1,lno)]
# pcp = array(map(lambda x: L.Gs.pos[x], ucp))
# # build vector director of segment1-point and segment 2-point
# vcm = (pcm-pc)/(np.sum(pcm-pc,axis=0))
# vcp = (pcp-pc)/(np.sum(pcp-pc,axis=0))
# import ipdb
# ipdb.set_trace()
# ss = L.seginline(pc[0],pcm[0])
# if len(uc) > 1:
# for nw in combinations(uc,2):
# pf = map(lambda x: self.Gw.pos[x],nw)
# pf = np.array((pf))
# if self.seginline(pf[0],pf[1]).shape[1] <= 1:
# d = np.sqrt(np.sum((pf[0]-pf[1])**2))
# self.Gw.add_edges_from([(nw[0],nw[1])],weight=d)
|
#-*- coding:Utf-8 -*-
# from pylayers.gis.layout import *
# from itertools import combinations
# from scipy.spatial import Delaunay
# import shapely.geometry as sh
# L = Layout('WHERE1_2.ini')
# L.build('t')
# # L.dumpr()
# L.showG('s')
# for n in L.Gt.nodes():
# no = L.Gt.node[n]['cycle'].cycle
# nop = L.Gt.node[n]['cycle'].cycle
# tcc, nn = L.Gt.node[n]['polyg'].ptconvex()
# utconvex = np.nonzero(tcc == 1)[0]
# utsconvex = np.nonzero(abs(tcc) == 1)[0]
# if len(utconvex) != 0:
# # get points ID in the cycle
# uu = filter(lambda x: x<0,no)
# uus = filter(lambda x: x<0,no)
# # get point convex ID
# uc = np.array(uu)[utconvex]
# ucs = np.array(uus)[utsconvex]
# puc = array(map(lambda x: L.Gs.pos[x], uc))
# pucs = array(map(lambda x: L.Gs.pos[x], ucs))
# trid=Delaunay(pucs)
# tri =trid.simplices
# # filter tri in the cycle
# kt = []
# pkt = []
# for t in tri:
# ts = sh.Polygon(pucs[t])
# U = ts.intersection(L.Gt.node[n]['polyg'])
# if not U.area < 1e-2:
# #pkt.append(pucs[t])
# kt.append(t)
# # # ptt = puc[tt]
# plt.triplot(pucs[:,0],pucs[:,1], np.array(kt))
# for n in L.Gt.nodes():
# if n > 0:
# no = L.Gt.node[n]['cycle'].cycle
# tcc, nn = L.Gt.node[n]['polyg'].ptconvex()
# utconvex = np.nonzero(tcc == 1)[0]
# utsconvex = np.nonzero(abs(tcc) == 1)[0]
# if len(utconvex) != 0:
# # get points ID in the cycle
# uu = filter(lambda x: x<0,no)
# uus = filter(lambda x: x<0,no)
# # get point convex ID
# # uc = np.array(uu)[utconvex]
# ucs = np.array(uus)[utsconvex]
# puc = array(map(lambda x: L.Gs.pos[x], uc))
# pucs = array(map(lambda x: L.Gs.pos[x], ucs))
# if len(ucs) >2:
# trid=Delaunay(pucs)
# tri =trid.simplices
# # filter tri in the cycle
# kt = []
# pkt = []
# for t in tri:
# ts = sh.Polygon(pucs[t])
# U = L.Gt.node[n]['polyg'].contains(ts)
# if U:
# #pkt.append(pucs[t])
# kt.append(t)
# # # ptt = puc[tt]
# try:
# plt.triplot(pucs[:,0],pucs[:,1], np.array(kt))
# except:
# pass
from pylayers.gis.layout import *
from itertools import combinations
from scipy.spatial import Delaunay
import shapely.geometry as sh
Lfile = 'scattering_nonconvex.ini'
data = '/home/niamiot/Documents/code/pylayers/data/struc/ini/'+Lfile
proj = '/home/niamiot/Documents/Pylayers_project/P1/struc/ini/'+Lfile
shutil.copyfile(data,proj)
L = Layout(Lfile,force=True)
#L.dumpr()
L.build('t')
fig,ax=L.showG('s',labels=True)
def polyplot(poly,fig=[]):
if fig == []:
fig=plt.figure()
fig,ax=L.showG('s',fig=fig)
color=['r','b','g']*10
for ip, p in enumerate(poly):
fig,ax = p.plot(fig=fig,ax=ax,color=color[ip],alpha =0.5)
# lacy : list of added cycles
lacy =[]
for n in L.Gt.nodes():
#if indoor cycles
if n > 0:
ncy=max(L.Gt.nodes())
####
#### 1 Determine if pt convex in cycle
####
if L.Gt.node[n]['indoor']:
no = L.Gt.node[n]['cycle'].cycle
tcc, nn = L.Gt.node[n]['polyg'].ptconvex()
# diffracting points
utconvex = np.nonzero(tcc == 1)[0]
#all possible diffracting point (in and out of cycle)
utsconvex = np.nonzero(abs(tcc) == 1)[0]
if len(utconvex) != 0:
# get points ID in the cycle
uus = filter(lambda x: x<0,no)
# get point convex ID
uc = np.array(uus)[utconvex]
ucs = np.array(uus)[utsconvex]
pucs = array(map(lambda x: L.Gs.pos[x], ucs))
pucs = np.vstack((pucs,pucs[-1]))
####
#### 2 perform a Delaunay Partioning
####
if len(ucs) >2:
trid=Delaunay(pucs)
tri =trid.simplices
aucs = np.arange(len(ucs))
# filter tri in the cycle
kt = []
pkt = []
polys = []
naw = []
for t in tri:
ts = geu.Polygon(pucs[t])
# check if the new polygon is contained into
#the original polygon (non guaratee by Delaunay)
#U = L.Gt.node[n]['polyg'].contains(ts)
U = L.Gt.node[n]['polyg'].intersection(ts)
if not isinstance(U,sh.MultiPolygon):
U=[U]
for p in U:
if L.Gt.node[n]['polyg'].contains(p):
cp = geu.Polygon(p)
cp.setvnodes(L)
uaw = np.where(cp.vnodes == 0)[0]
lvn = len(cp.vnodes)
for i in uaw:
#keep trace of created airwalls, because some
#of them will be destroyed in step 3.
naw.append(L.add_segment(
cp.vnodes[np.mod(i-1,lvn)],
cp.vnodes[np.mod(i+1,lvn)]
,name='AIR'))
polys.append(cp)
import ipdb
ipdb.set_trace()
#
# 3. merge delaunay triangulation in order to obtain
# the larger convex polygons partioning
#
cpolys = []
nbpolys = len(polys)
while polys !=[]:
p = polys.pop(0)
for ip2,p2 in enumerate(polys):
conv=False
inter = p.intersection(p2)
#if 2 triangles have a common segment
pold = p
if isinstance(inter,sh.LineString):
p = p + p2
if p.isconvex():
if p.area < 1e-1:
import ipdb
ipdb.set_trace()
polys.pop(ip2)
polys.insert(0,p)
conv=True
break
else:
# if pold not in cpolys:
# cpolys.append(pold)
p = pold
# if (ip2 >= len(polys)):# and (conv):
# if conv :
# if p not in cpolys:
# cpolys.append(p)
if not conv:#else:
if pold not in cpolys:
cpolys.append(pold)
if len(polys) == 0:
cpolys.append(p)
# polyplot(cpolys,fig=plt.gcf())
# plt.draw()
# import ipdb
# ipdb.set_trace()
####
#### 4. ensure the correct vnode numerotaion of the polygons
#### and remove unecessary airwalls
# ncpol : new created polygons
ncpol = []
vnodes=[]
for p in cpolys:
interpoly = L.Gt.node[n]['polyg'].intersection(p)
if isinstance(interpoly,sh.MultiPolygon):
raise AttributeError('multi polygon encountered')
else :
ptmp = geu.Polygon(interpoly)
ptmp.setvnodes(L)
ncpol.append(ptmp)
vnodes.extend(ptmp.vnodes)
#air walls to be deleted (because origin Delaunay triangle
# has been merged )
daw = filter(lambda x: x not in vnodes,naw)
[L.del_segment(d,verbose=False) for d in daw]
nbpolys=len(ncpol)
#remove old cycle
L.Gt.remove_node(n)
# lcyid: (new) list of cycle id
lcyid = [n] + range(ncy+1,ncy+(nbpolys))
lacy.extend(lcyid)
for ip,p in enumerate(ncpol):
#p.coorddeter()
cyid = lcyid[ip]
# replace by new ones
lnode = p.vnodes
G = nx.subgraph(L.Gs,lnode)
G.pos = {}
G.pos.update({l: L.Gs.pos[l] for l in lnode})
cy = cycl.Cycle(G,lnode=p.vnodes)
L.Gt.add_node(cyid,cycle=cy)
# WARNING
# recreate polygon is mandatory otherwise cycle.cycle and polygon.vnodes
#are shifted.
L.Gt.node[cyid]['polyg'] = p#geu.Polygon(p.xy,cy.cycle)
L.Gt.node[cyid]['indoor']=True
L.Gt.node[cyid]['isopen']=True
L.Gt.pos[cyid] = tuple(cy.g)
Gtnodes= filter(lambda x: x>0,L.Gt.nodes())
for k in combinations(Gtnodes, 2):
vnodes0 = np.array(L.Gt.node[k[0]]['cycle'].cycle)
vnodes1 = np.array(L.Gt.node[k[1]]['cycle'].cycle)
#
# Connect Cycles if they share at least one segments
#
intersection_vnodes = np.intersect1d(vnodes0, vnodes1)
if len(intersection_vnodes) > 1:
segment = intersection_vnodes[np.where(intersection_vnodes>0)]
L.Gt.add_edge(k[0], k[1],segment= segment)
#update self.Gs.node[x]['ncycles']
L._updGsncy()
#add outside cycle to Gs.node[x]['ncycles']
L._addoutcy()
#update interaction list into Gt.nodes (cycles)
L._interlist(nodelist=lacy)
# polyplot(ncpol)
# for n in L.Gt.nodes():
# if n > 0:
# no = L.Gt.node[n]['cycle'].cycle
# tcc, nn = L.Gt.node[n]['polyg'].ptconvex()
# # diffracting points
# utconvex = np.nonzero(tcc == 1)[0]
# #all possible diffracting point (in and out of cycle)
# utsconvex = np.nonzero(abs(tcc) == 1)[0]
# if len(utconvex) != 0:
# # get points ID in the cycle
# uus = filter(lambda x: x<0,no)
# # get point convex ID
# uc = np.array(uus)[utconvex]
# ucs = np.array(uus)[utsconvex]
# pucs = array(map(lambda x: L.Gs.pos[x], ucs))
# pucs = np.vstack((pucs,pucs[-1]))
# if len(ucs) >2:
# trid=Delaunay(pucs)
# tri =trid.simplices
# utri = ucs[tri]
# # filter tri in the cycle
# kt = []
# pkt = []
# polys = []
# for t in tri:
# ts = geu.Polygon(pucs[t])
# #check if inside the original polygon
# # U = L.Gt.node[n]['polyg'].contains(ts)
# U = L.Gt.node[n]['polyg'].intersection(ts)
# ats = ts.area
# # fig,ax=ts.plot(fig=fig,ax=ax)
# if U.area > (1*ats/100):
# #pkt.append(pucs[t])
# kt.append(t)
# polys.append(ts)
# polyplot(polys)
# # # ptt = puc[tt]
# # try:
# # plt.triplot(pucs[:,0],pucs[:,1], np.array(kt))
# # except:
# # pass
# kt = array(kt)
# npttri = np.arange(0,np.max(kt))
# # search for each triangle, which is connecte
# conecttri = [np.where(kt == i) for i in npttri]
# cpolys = []
# nbpolys = len(polys)
# while polys !=[]:
# p = polys.pop(0)
# for ip2,p2 in enumerate(polys):
# conv=False
# inter = p.intersection(p2)
# #if 2 triangles have a common segment
# pold = p
# if isinstance(inter,sh.LineString):
# p = p + p2
# if p.isconvex():
# polys.pop(ip2)
# polys.insert(0,p)
# conv=True
# break
# elif len(cpolys) != 0:
# if pold != cpolys[-1]:
# cpolys.append(pold)
# p = pold
# else :
# cpolys.append(pold)
# p = pold
# # if (ip2 >= len(polys)):# and (conv):
# if conv :
# cpolys.append(p)
# else:
# cpolys.append(pold)
# if len(polys) == 0:
# cpolys.append(p)
# # polyplot(polys)
# # import ipdb
# # ipdb.set_trace()
# polyplot(cpolys)
###################################################
#################################################
####################################################
###################################################
#################################################
####################################################
###################################################
#################################################
####################################################
# for n in range(nbpolys):
# p = polys.pop(-1)
# ip = iter(polys)
# for p2 in ip:
# inter = p.intersection(p2)
# if isinstance(inter,sh.LineString):
# import ipdb
# ipdb.set_trace()
# try:
# mpold = mp
# if mp.touches(p):
# mp = mp + p
# if mp.isconvex():
# mpold = mp
# else :
# cpolys.append(mpold)
# del mp
# else
# except:
# mp = p
################
#############""
# for n in L.Gt.nodes():
# no = L.Gt.node[n]['cycle'].cycle
# nop = L.Gt.node[n]['cycle'].cycle
# tcc, nn = L.Gt.node[n]['polyg'].ptconvex()
# utconvex = np.nonzero(tcc == 1)[0]
# if len(utconvex) != 0:
# # get points ID in the cycle
# ii = filter(lambda x: x<0,no)
# # get point convex ID
# ic = np.array(ii)[utconvex]
# pic = array(map(lambda x: L.Gs.pos[x], ic))
# luc = [nqp.where(ic[x]==no)[0][0] for x in range(len(ic))]
# # to close the cycle
# luc.append(luc[0])
# # distance between each uc
# duc = np.roll(np.mod(np.diff(luc),len(no)),1)
# rnp.mod(np.diff(luc[::-1]),len(no))
# lenic = len(ic)
# ptbl=[]
# for u in range(lenic-1,-1,-1):
# # find which convex point is the closest but not directly connected
# if duc[u-1] == duc[np.mod(u+1,lenic)]:
# import ipdb
# ipdb.set_trace()
# if (duc[u-1] < duc[np.mod(u+1,lenic)]) and duc[u-1] > 2:
# #node to be linked
# tbl = no[luc[np.mod(u+1,lenic)]]
# else:
# tbl = no[luc[u-1]]
# #node to be linked
# ptbl.append(L.Gs.pos[tbl])
# X=np.array(ptbl)
# plu.displot(X.T,pic.T)
# ################
# #############""
# for n in L.Gt.nodes():
# if n != 0:
# no = L.Gt.node[n]['cycle'].cycle
# tcc, nn = L.Gt.node[n]['polyg'].ptconvex()
# utconvex = np.nonzero(tcc == 1)[0]
# if len(utconvex) != 0:
# # get points ID in the cycle
# ii = filter(lambda x: x<0,no)
# # get point convex ID
# ic = np.array(ii)[utconvex]
# pic = array(map(lambda x: L.Gs.pos[x], ic))
# luc = [np.where(ic[x]==no)[0][0] for x in range(len(ic))]
# lenuc = len(luc)
# # to close the cycle
# luc.append(luc[0])
# duc = np.roll(np.mod(np.diff(luc),len(no)),1)
# # distance between each uc
# ptbl=[]
# for u in range(len(duc)):
# um = np.mod(u-1,lenuc)
# up = np.mod(u+1,lenuc)
# print no[luc[u]],no[luc[um]]
# print no[luc[u]],no[luc[up]]
# if (duc[u] < duc[up]) and (duc[u] >2):
# print 'choose',no[luc[u]],no[luc[um]]
# tbl = no[luc[um]]
# ptbl.append([pic[u],pic[um]])
# elif duc[up] >2:
# print 'choose',no[luc[u]],no[luc[up]]
# tbl = no[luc[up]]
# ptbl.append([pic[u],pic[up]])
# # import ipdb
# # ipdb.set_trace()
# X=np.array(ptbl)
# plu.displot(X[:,0].T,X[:,1].T)
# import ipdb
# ipdb.set_trace()
# import ipdb
# ipdb.set_trace()
# for n in L.Gt.nodes():
# no = L.Gt.node[n]['cycle'].cycle
# lno = len(no)
# nop = L.Gt.node[n]['cycle'].cycqle
# tcc, nn = L.Gt.node[n]['polyg'].ptconvex()
# utconvex = np.nonzero(tcc == 1)[0]
# if len(utconvex) != 0:
# # get points ID in the cycle
# uu = filter(lambda x: x<0,no)
# # get point convex ID (utconvex*2 because point follow segment in
# # cycles. and utconvex only concern points)
# uc = no[utconvex*2]
# pc = array(map(lambda x: L.Gs.pos[x], uc))
# # id of adjacent segemnts 1
# ucm = no[np.mod((utconvex*2)-1,lno)]
# pcm = array(map(lambda x: L.Gs.pos[x], ucm))
# # id of adjacent segemnts 2
# ucp = no[np.mod((utconvex*2)+1,lno)]
# pcp = array(map(lambda x: L.Gs.pos[x], ucp))
# # build vector director of segment1-point and segment 2-point
# vcm = (pcm-pc)/(np.sum(pcm-pc,axis=0))
# vcp = (pcp-pc)/(np.sum(pcp-pc,axis=0))
# import ipdb
# ipdb.set_trace()
# ss = L.seginline(pc[0],pcm[0])
# if len(uc) > 1:
# for nw in combinations(uc,2):
# pf = map(lambda x: self.Gw.pos[x],nw)
# pf = np.array((pf))
# if self.seginline(pf[0],pf[1]).shape[1] <= 1:
# d = np.sqrt(np.sum((pf[0]-pf[1])**2))
# self.Gw.add_edges_from([(nw[0],nw[1])],weight=d)
|
en
| 0.413549
|
#-*- coding:Utf-8 -*- # from pylayers.gis.layout import * # from itertools import combinations # from scipy.spatial import Delaunay # import shapely.geometry as sh # L = Layout('WHERE1_2.ini') # L.build('t') # # L.dumpr() # L.showG('s') # for n in L.Gt.nodes(): # no = L.Gt.node[n]['cycle'].cycle # nop = L.Gt.node[n]['cycle'].cycle # tcc, nn = L.Gt.node[n]['polyg'].ptconvex() # utconvex = np.nonzero(tcc == 1)[0] # utsconvex = np.nonzero(abs(tcc) == 1)[0] # if len(utconvex) != 0: # # get points ID in the cycle # uu = filter(lambda x: x<0,no) # uus = filter(lambda x: x<0,no) # # get point convex ID # uc = np.array(uu)[utconvex] # ucs = np.array(uus)[utsconvex] # puc = array(map(lambda x: L.Gs.pos[x], uc)) # pucs = array(map(lambda x: L.Gs.pos[x], ucs)) # trid=Delaunay(pucs) # tri =trid.simplices # # filter tri in the cycle # kt = [] # pkt = [] # for t in tri: # ts = sh.Polygon(pucs[t]) # U = ts.intersection(L.Gt.node[n]['polyg']) # if not U.area < 1e-2: # #pkt.append(pucs[t]) # kt.append(t) # # # ptt = puc[tt] # plt.triplot(pucs[:,0],pucs[:,1], np.array(kt)) # for n in L.Gt.nodes(): # if n > 0: # no = L.Gt.node[n]['cycle'].cycle # tcc, nn = L.Gt.node[n]['polyg'].ptconvex() # utconvex = np.nonzero(tcc == 1)[0] # utsconvex = np.nonzero(abs(tcc) == 1)[0] # if len(utconvex) != 0: # # get points ID in the cycle # uu = filter(lambda x: x<0,no) # uus = filter(lambda x: x<0,no) # # get point convex ID # # uc = np.array(uu)[utconvex] # ucs = np.array(uus)[utsconvex] # puc = array(map(lambda x: L.Gs.pos[x], uc)) # pucs = array(map(lambda x: L.Gs.pos[x], ucs)) # if len(ucs) >2: # trid=Delaunay(pucs) # tri =trid.simplices # # filter tri in the cycle # kt = [] # pkt = [] # for t in tri: # ts = sh.Polygon(pucs[t]) # U = L.Gt.node[n]['polyg'].contains(ts) # if U: # #pkt.append(pucs[t]) # kt.append(t) # # # ptt = puc[tt] # try: # plt.triplot(pucs[:,0],pucs[:,1], np.array(kt)) # except: # pass #L.dumpr() # lacy : list of added cycles #if indoor cycles #### #### 1 Determine if pt convex in cycle #### # diffracting points #all possible diffracting point (in and out of cycle) # get points ID in the cycle # get point convex ID #### #### 2 perform a Delaunay Partioning #### # filter tri in the cycle # check if the new polygon is contained into #the original polygon (non guaratee by Delaunay) #U = L.Gt.node[n]['polyg'].contains(ts) #keep trace of created airwalls, because some #of them will be destroyed in step 3. # # 3. merge delaunay triangulation in order to obtain # the larger convex polygons partioning # #if 2 triangles have a common segment # if pold not in cpolys: # cpolys.append(pold) # if (ip2 >= len(polys)):# and (conv): # if conv : # if p not in cpolys: # cpolys.append(p) #else: # polyplot(cpolys,fig=plt.gcf()) # plt.draw() # import ipdb # ipdb.set_trace() #### #### 4. ensure the correct vnode numerotaion of the polygons #### and remove unecessary airwalls # ncpol : new created polygons #air walls to be deleted (because origin Delaunay triangle # has been merged ) #remove old cycle # lcyid: (new) list of cycle id #p.coorddeter() # replace by new ones # WARNING # recreate polygon is mandatory otherwise cycle.cycle and polygon.vnodes #are shifted. #geu.Polygon(p.xy,cy.cycle) # # Connect Cycles if they share at least one segments # #update self.Gs.node[x]['ncycles'] #add outside cycle to Gs.node[x]['ncycles'] #update interaction list into Gt.nodes (cycles) # polyplot(ncpol) # for n in L.Gt.nodes(): # if n > 0: # no = L.Gt.node[n]['cycle'].cycle # tcc, nn = L.Gt.node[n]['polyg'].ptconvex() # # diffracting points # utconvex = np.nonzero(tcc == 1)[0] # #all possible diffracting point (in and out of cycle) # utsconvex = np.nonzero(abs(tcc) == 1)[0] # if len(utconvex) != 0: # # get points ID in the cycle # uus = filter(lambda x: x<0,no) # # get point convex ID # uc = np.array(uus)[utconvex] # ucs = np.array(uus)[utsconvex] # pucs = array(map(lambda x: L.Gs.pos[x], ucs)) # pucs = np.vstack((pucs,pucs[-1])) # if len(ucs) >2: # trid=Delaunay(pucs) # tri =trid.simplices # utri = ucs[tri] # # filter tri in the cycle # kt = [] # pkt = [] # polys = [] # for t in tri: # ts = geu.Polygon(pucs[t]) # #check if inside the original polygon # # U = L.Gt.node[n]['polyg'].contains(ts) # U = L.Gt.node[n]['polyg'].intersection(ts) # ats = ts.area # # fig,ax=ts.plot(fig=fig,ax=ax) # if U.area > (1*ats/100): # #pkt.append(pucs[t]) # kt.append(t) # polys.append(ts) # polyplot(polys) # # # ptt = puc[tt] # # try: # # plt.triplot(pucs[:,0],pucs[:,1], np.array(kt)) # # except: # # pass # kt = array(kt) # npttri = np.arange(0,np.max(kt)) # # search for each triangle, which is connecte # conecttri = [np.where(kt == i) for i in npttri] # cpolys = [] # nbpolys = len(polys) # while polys !=[]: # p = polys.pop(0) # for ip2,p2 in enumerate(polys): # conv=False # inter = p.intersection(p2) # #if 2 triangles have a common segment # pold = p # if isinstance(inter,sh.LineString): # p = p + p2 # if p.isconvex(): # polys.pop(ip2) # polys.insert(0,p) # conv=True # break # elif len(cpolys) != 0: # if pold != cpolys[-1]: # cpolys.append(pold) # p = pold # else : # cpolys.append(pold) # p = pold # # if (ip2 >= len(polys)):# and (conv): # if conv : # cpolys.append(p) # else: # cpolys.append(pold) # if len(polys) == 0: # cpolys.append(p) # # polyplot(polys) # # import ipdb # # ipdb.set_trace() # polyplot(cpolys) ################################################### ################################################# #################################################### ################################################### ################################################# #################################################### ################################################### ################################################# #################################################### # for n in range(nbpolys): # p = polys.pop(-1) # ip = iter(polys) # for p2 in ip: # inter = p.intersection(p2) # if isinstance(inter,sh.LineString): # import ipdb # ipdb.set_trace() # try: # mpold = mp # if mp.touches(p): # mp = mp + p # if mp.isconvex(): # mpold = mp # else : # cpolys.append(mpold) # del mp # else # except: # mp = p ################ #############"" # for n in L.Gt.nodes(): # no = L.Gt.node[n]['cycle'].cycle # nop = L.Gt.node[n]['cycle'].cycle # tcc, nn = L.Gt.node[n]['polyg'].ptconvex() # utconvex = np.nonzero(tcc == 1)[0] # if len(utconvex) != 0: # # get points ID in the cycle # ii = filter(lambda x: x<0,no) # # get point convex ID # ic = np.array(ii)[utconvex] # pic = array(map(lambda x: L.Gs.pos[x], ic)) # luc = [nqp.where(ic[x]==no)[0][0] for x in range(len(ic))] # # to close the cycle # luc.append(luc[0]) # # distance between each uc # duc = np.roll(np.mod(np.diff(luc),len(no)),1) # rnp.mod(np.diff(luc[::-1]),len(no)) # lenic = len(ic) # ptbl=[] # for u in range(lenic-1,-1,-1): # # find which convex point is the closest but not directly connected # if duc[u-1] == duc[np.mod(u+1,lenic)]: # import ipdb # ipdb.set_trace() # if (duc[u-1] < duc[np.mod(u+1,lenic)]) and duc[u-1] > 2: # #node to be linked # tbl = no[luc[np.mod(u+1,lenic)]] # else: # tbl = no[luc[u-1]] # #node to be linked # ptbl.append(L.Gs.pos[tbl]) # X=np.array(ptbl) # plu.displot(X.T,pic.T) # ################ # #############"" # for n in L.Gt.nodes(): # if n != 0: # no = L.Gt.node[n]['cycle'].cycle # tcc, nn = L.Gt.node[n]['polyg'].ptconvex() # utconvex = np.nonzero(tcc == 1)[0] # if len(utconvex) != 0: # # get points ID in the cycle # ii = filter(lambda x: x<0,no) # # get point convex ID # ic = np.array(ii)[utconvex] # pic = array(map(lambda x: L.Gs.pos[x], ic)) # luc = [np.where(ic[x]==no)[0][0] for x in range(len(ic))] # lenuc = len(luc) # # to close the cycle # luc.append(luc[0]) # duc = np.roll(np.mod(np.diff(luc),len(no)),1) # # distance between each uc # ptbl=[] # for u in range(len(duc)): # um = np.mod(u-1,lenuc) # up = np.mod(u+1,lenuc) # print no[luc[u]],no[luc[um]] # print no[luc[u]],no[luc[up]] # if (duc[u] < duc[up]) and (duc[u] >2): # print 'choose',no[luc[u]],no[luc[um]] # tbl = no[luc[um]] # ptbl.append([pic[u],pic[um]]) # elif duc[up] >2: # print 'choose',no[luc[u]],no[luc[up]] # tbl = no[luc[up]] # ptbl.append([pic[u],pic[up]]) # # import ipdb # # ipdb.set_trace() # X=np.array(ptbl) # plu.displot(X[:,0].T,X[:,1].T) # import ipdb # ipdb.set_trace() # import ipdb # ipdb.set_trace() # for n in L.Gt.nodes(): # no = L.Gt.node[n]['cycle'].cycle # lno = len(no) # nop = L.Gt.node[n]['cycle'].cycqle # tcc, nn = L.Gt.node[n]['polyg'].ptconvex() # utconvex = np.nonzero(tcc == 1)[0] # if len(utconvex) != 0: # # get points ID in the cycle # uu = filter(lambda x: x<0,no) # # get point convex ID (utconvex*2 because point follow segment in # # cycles. and utconvex only concern points) # uc = no[utconvex*2] # pc = array(map(lambda x: L.Gs.pos[x], uc)) # # id of adjacent segemnts 1 # ucm = no[np.mod((utconvex*2)-1,lno)] # pcm = array(map(lambda x: L.Gs.pos[x], ucm)) # # id of adjacent segemnts 2 # ucp = no[np.mod((utconvex*2)+1,lno)] # pcp = array(map(lambda x: L.Gs.pos[x], ucp)) # # build vector director of segment1-point and segment 2-point # vcm = (pcm-pc)/(np.sum(pcm-pc,axis=0)) # vcp = (pcp-pc)/(np.sum(pcp-pc,axis=0)) # import ipdb # ipdb.set_trace() # ss = L.seginline(pc[0],pcm[0]) # if len(uc) > 1: # for nw in combinations(uc,2): # pf = map(lambda x: self.Gw.pos[x],nw) # pf = np.array((pf)) # if self.seginline(pf[0],pf[1]).shape[1] <= 1: # d = np.sqrt(np.sum((pf[0]-pf[1])**2)) # self.Gw.add_edges_from([(nw[0],nw[1])],weight=d)
| 2.157306
| 2
|
test/BribeNet/prediction/test_parameterPrediction.py
|
RobMurray98/BribeNet
| 0
|
6626366
|
from unittest import TestCase
from networkit.generators import WattsStrogatzGenerator
from numpy import logspace
from BribeNet.prediction.parameterPrediction import ParameterPrediction
class TestParameterPrediction(TestCase):
def setUp(self) -> None:
self.generator = WattsStrogatzGenerator(50, 6, 0.1)
self.pred = ParameterPrediction(self.generator.generate())
def tearDown(self) -> None:
del self.pred, self.generator
def test_average_clustering(self):
self.assertTrue(self.pred.average_clustering() > 0)
def test_average_shortest_path_length(self):
self.assertTrue(self.pred.average_shortest_path_length() > 0)
def test_predict_small_world(self):
n, k, p = self.pred.predict_small_world()
self.assertTrue(n > 0)
self.assertTrue(k > 0)
self.assertTrue(p > 0)
def test_generate_example_graphs(self):
l_values, c_values, l0, c0 = ParameterPrediction.generate_example_graphs(50, 6, logspace(-5, 0, 64, False, 10))
self.assertTrue(l0 > 0)
self.assertTrue(c0 > 0)
|
from unittest import TestCase
from networkit.generators import WattsStrogatzGenerator
from numpy import logspace
from BribeNet.prediction.parameterPrediction import ParameterPrediction
class TestParameterPrediction(TestCase):
def setUp(self) -> None:
self.generator = WattsStrogatzGenerator(50, 6, 0.1)
self.pred = ParameterPrediction(self.generator.generate())
def tearDown(self) -> None:
del self.pred, self.generator
def test_average_clustering(self):
self.assertTrue(self.pred.average_clustering() > 0)
def test_average_shortest_path_length(self):
self.assertTrue(self.pred.average_shortest_path_length() > 0)
def test_predict_small_world(self):
n, k, p = self.pred.predict_small_world()
self.assertTrue(n > 0)
self.assertTrue(k > 0)
self.assertTrue(p > 0)
def test_generate_example_graphs(self):
l_values, c_values, l0, c0 = ParameterPrediction.generate_example_graphs(50, 6, logspace(-5, 0, 64, False, 10))
self.assertTrue(l0 > 0)
self.assertTrue(c0 > 0)
|
none
| 1
| 2.431064
| 2
|
|
lvreuse/analysis/combined/construct_launch_vehicle.py
|
mvernacc/lvreuse
| 7
|
6626367
|
<gh_stars>1-10
"""Construct a LaunchVehicle given strategy choices and masses."""
import os.path
from lvreuse.cost.elements import CryoLH2TurboFed, ExpendableBallisticStageLH2, \
ExpendableBallisticStageStorable, StorableTurboFed, ReusableBallisticStageLH2, \
VTOStageFlybackVehicle, TurboJetEngine, ReusableBallisticStageStorable, \
ExpendableTank
from lvreuse.cost.vehicle import LaunchVehicle
def construct_launch_vehicle(stage_type, prop_choice, portion_reused, ab_rec, num_ab_engines=None, num_rocket_engines=9):
"""Create a LaunchVehicle object given strategy choices and element masses.
Arguments:
stage_type: type of first stage vehicle, choose from 'winged' or 'ballistic'
prop_choice: propellant choice for first stage, choose from 'kerosene' or 'H2'
portion_reused: portion of first stage that is reused, choose from 'full', 'partial', or 'none'
ab_rec (boolean): specifies whether the recovery scheme is powered or not, True for powered
masses_dict: dictionary mapping vehicle element names to their dry masses in kg,
i.e. {'element_name': mass}
Returns:
instance of the LaunchVehicle class describing the launch vehicle
"""
stage2 = ExpendableBallisticStageStorable(name='s2', m=0)
stage2_engine = StorableTurboFed(name='e2', m=0)
veh_element_list = [stage2, stage2_engine]
if stage_type == 'ballistic' and prop_choice == 'H2' and portion_reused == 'none':
stage1 = ExpendableBallisticStageLH2(name='s1', m=0)
stage1_engine = CryoLH2TurboFed(name='e1', m=0)
stage1_list = [stage1, stage1_engine]
elif stage_type == 'ballistic' and prop_choice == 'H2' and portion_reused == 'full':
stage1 = ReusableBallisticStageLH2(name='s1', m=0)
stage1_engine = CryoLH2TurboFed(name='e1', m=0)
stage1_list = [stage1, stage1_engine]
elif stage_type == 'ballistic' and prop_choice == 'H2' and portion_reused == 'partial':
stage1_rec = ReusableBallisticStageLH2(name='s1', m=0)
stage1_disp = ExpendableTank(name='d1', m=0)
stage1_engine = CryoLH2TurboFed(name='e1', m=0)
stage1_list = [stage1_rec, stage1_disp, stage1_engine]
elif stage_type == 'ballistic' and prop_choice == 'kerosene' and portion_reused == 'none':
stage1 = ExpendableBallisticStageStorable(name='s1', m=0)
stage1_engine = StorableTurboFed(name='e1', m=0)
stage1_list = [stage1, stage1_engine]
elif stage_type == 'ballistic' and prop_choice == 'kerosene' and portion_reused == 'full':
stage1 = ReusableBallisticStageStorable(name='s1', m=0)
stage1_engine = StorableTurboFed(name='e1', m=0)
stage1_list = [stage1, stage1_engine]
elif stage_type == 'ballistic' and prop_choice == 'kerosene' and portion_reused == 'partial':
stage1_rec = ReusableBallisticStageStorable(name='s1', m=0)
stage1_disp = ExpendableTank(name='d1', m=0)
stage1_engine = StorableTurboFed(name='e1', m=0)
stage1_list = [stage1_rec, stage1_disp, stage1_engine]
elif stage_type == 'winged' and prop_choice == 'H2' and portion_reused == 'full' and ab_rec:
stage1 = VTOStageFlybackVehicle(name='s1', m=0)
stage1_rocket_engine = CryoLH2TurboFed(name='e1', m=0)
stage1_ab_engine = TurboJetEngine(name='ab', m=0)
stage1_list = [stage1, stage1_rocket_engine, stage1_ab_engine]
elif stage_type == 'winged' and prop_choice == 'H2' and portion_reused == 'full' and not ab_rec:
stage1_rec = VTOStageFlybackVehicle(name='s1', m=0)
stage1_rocket_engine = CryoLH2TurboFed(name='e1', m=0)
stage1_list = [stage1_rec, stage1_rocket_engine]
elif stage_type == 'winged' and prop_choice == 'H2' and portion_reused == 'partial' and ab_rec:
stage1_rec = VTOStageFlybackVehicle(name='s1', m=0)
stage1_disp = ExpendableTank(name='d1', m=0)
stage1_rocket_engine = CryoLH2TurboFed(name='e1', m=0)
stage1_ab_engine = TurboJetEngine(name='ab', m=0)
stage1_list = [stage1_rec, stage1_disp, stage1_rocket_engine, stage1_ab_engine]
elif stage_type == 'winged' and prop_choice == 'H2' and portion_reused == 'partial' and not ab_rec:
stage1_rec = VTOStageFlybackVehicle(name='s1', m=0)
stage1_disp = ExpendableTank(name='d1', m=0)
stage1_rocket_engine = CryoLH2TurboFed(name='e1', m=0)
stage1_list = [stage1_rec, stage1_disp, stage1_rocket_engine]
elif stage_type == 'winged' and prop_choice == 'kerosene' and portion_reused == 'full' and ab_rec:
stage1 = VTOStageFlybackVehicle(name='s1', m=0)
stage1_rocket_engine = StorableTurboFed(name='e1', m=0)
stage1_ab_engine = TurboJetEngine(name='ab', m=0)
stage1_list = [stage1, stage1_rocket_engine, stage1_ab_engine]
elif stage_type == 'winged' and prop_choice == 'kerosene' and portion_reused == 'full' and not ab_rec:
stage1 = VTOStageFlybackVehicle(name='s1', m=0)
stage1_rocket_engine = StorableTurboFed(name='e1', m=0)
stage1_list = [stage1, stage1_rocket_engine]
elif stage_type == 'winged' and prop_choice == 'kerosene' and portion_reused == 'partial' and ab_rec:
stage1_rec = VTOStageFlybackVehicle(name='s1', m=0)
stage1_disp = ExpendableTank(name='d1', m=0)
stage1_rocket_engine = StorableTurboFed(name='e1', m=0)
stage1_ab_engine = TurboJetEngine(name='ab', m=0)
stage1_list = [stage1_rec, stage1_disp, stage1_rocket_engine, stage1_ab_engine]
elif stage_type == 'winged' and prop_choice == 'kerosene' and portion_reused == 'partial' and not ab_rec:
stage1_rec = VTOStageFlybackVehicle(name='s1', m=0)
stage1_disp = ExpendableTank(name='d1', m=0)
stage1_rocket_engine = StorableTurboFed(name='e1', m=0)
stage1_list = [stage1_rec, stage1_disp, stage1_rocket_engine]
veh_element_list += stage1_list
if portion_reused == 'partial':
N_veh = 3
else:
N_veh = 2
launch_vehicle = LaunchVehicle(name='veh', M0=0, N=N_veh, element_list=veh_element_list)
return launch_vehicle
def demo():
from lvreuse.analysis.performance.strategy_perf_models import (Expendable, WingedPoweredLaunchSite,
WingedPoweredLaunchSitePartial,
ParachutePartial,
kero_GG_boost_tech,
kero_GG_upper_tech,
LEO, GTO)
import abc
import os.path
import math
import matplotlib.pyplot as plt
import rhodium as rdm
import seaborn as sns
import pandas
from lvreuse.cost.tools import cost_reduction_factor
from lvreuse.cost.CER_values import CERValues
from lvreuse.cost.cost_factors import ElementCostFactors, VehicleCostFactors, OperationsCostFactors
from lvreuse.cost.indirect_ops import indirect_ops_cost
from lvreuse.data.propellants import propellant_cost_list
from lvreuse.data.vehicle_cpf_data import ariane5G, falcon9, atlasV, deltaIV, electron, antares230
tech_1 = kero_GG_boost_tech
tech_2 = kero_GG_upper_tech
mission = LEO
wingpwr_part = WingedPoweredLaunchSitePartial(tech_1, tech_2, mission)
wingpwr_part_dict = wingpwr_part.get_masses(pi_star=0.01, a=0.60, E_1=0.06, E_2=0.04)
print(wingpwr_part_dict)
launch_veh = construct_launch_vehicle(stage_type='winged', prop_choice='kerosene', portion_reused='partial', ab_rec=True, num_ab_engines=2)
s1_CER_vals = CERValues(dev_a=10, dev_x=0.5, prod_a=1, prod_x=0.5)
e1_CER_vals = CERValues(dev_a=10, dev_x=0.5, prod_a=1, prod_x=0.5)
s2_CER_vals = CERValues(dev_a=10, dev_x=0.5, prod_a=1, prod_x=0.5)
e2_CER_vals = CERValues(dev_a=10, dev_x=0.5, prod_a=1, prod_x=0.5)
d1_CER_vals = CERValues(dev_a=10, dev_x=0.5, prod_a=1, prod_x=0.5)
ab_CER_vals = CERValues(dev_a=10, dev_x=0.5, prod_a=1, prod_x=0.5)
s1_cost_factors = ElementCostFactors(f1=1, f2=1, f3=1, f8=1,
f10=1, f11=1, p=1)
e1_cost_factors = ElementCostFactors(f1=1, f2=1, f3=1, f8=1,
f10=1, f11=1, p=1)
s2_cost_factors = ElementCostFactors(f1=1, f2=1, f3=1, f8=1,
f10=1, f11=1, p=1)
e2_cost_factors = ElementCostFactors(f1=1, f2=1, f3=1, f8=1,
f10=1, f11=1, p=1)
d1_dost_factors = ElementCostFactors(f1=1, f2=1, f3=1, f8=1,
f10=1, f11=1, p=1)
ab_cost_factors = ElementCostFactors(f1=1, f2=1, f3=1, f8=1,
f10=1, f11=1, p=1)
element_map = {'s1': [s1_CER_vals, s1_cost_factors, 1],
'e1': [e1_CER_vals, e1_cost_factors, 9],
's2': [s2_CER_vals, s2_cost_factors, 1],
'e2': [e2_CER_vals, e2_cost_factors, 1],
'd1': [d1_CER_vals, d1_dost_factors, 1],
'ab': [ab_CER_vals, ab_cost_factors, 2]}
veh_cost_factors = VehicleCostFactors(f0_dev=1, f0_prod=1, f6=1,
f7=1, f8=1, f9=1, p=1)
prod_nums = [1]
prod_cost = launch_veh.average_vehicle_production_cost(veh_cost_factors, prod_nums, element_map)
print('prod_cost: ', prod_cost)
if __name__ == '__main__':
demo()
print(range(4))
|
"""Construct a LaunchVehicle given strategy choices and masses."""
import os.path
from lvreuse.cost.elements import CryoLH2TurboFed, ExpendableBallisticStageLH2, \
ExpendableBallisticStageStorable, StorableTurboFed, ReusableBallisticStageLH2, \
VTOStageFlybackVehicle, TurboJetEngine, ReusableBallisticStageStorable, \
ExpendableTank
from lvreuse.cost.vehicle import LaunchVehicle
def construct_launch_vehicle(stage_type, prop_choice, portion_reused, ab_rec, num_ab_engines=None, num_rocket_engines=9):
"""Create a LaunchVehicle object given strategy choices and element masses.
Arguments:
stage_type: type of first stage vehicle, choose from 'winged' or 'ballistic'
prop_choice: propellant choice for first stage, choose from 'kerosene' or 'H2'
portion_reused: portion of first stage that is reused, choose from 'full', 'partial', or 'none'
ab_rec (boolean): specifies whether the recovery scheme is powered or not, True for powered
masses_dict: dictionary mapping vehicle element names to their dry masses in kg,
i.e. {'element_name': mass}
Returns:
instance of the LaunchVehicle class describing the launch vehicle
"""
stage2 = ExpendableBallisticStageStorable(name='s2', m=0)
stage2_engine = StorableTurboFed(name='e2', m=0)
veh_element_list = [stage2, stage2_engine]
if stage_type == 'ballistic' and prop_choice == 'H2' and portion_reused == 'none':
stage1 = ExpendableBallisticStageLH2(name='s1', m=0)
stage1_engine = CryoLH2TurboFed(name='e1', m=0)
stage1_list = [stage1, stage1_engine]
elif stage_type == 'ballistic' and prop_choice == 'H2' and portion_reused == 'full':
stage1 = ReusableBallisticStageLH2(name='s1', m=0)
stage1_engine = CryoLH2TurboFed(name='e1', m=0)
stage1_list = [stage1, stage1_engine]
elif stage_type == 'ballistic' and prop_choice == 'H2' and portion_reused == 'partial':
stage1_rec = ReusableBallisticStageLH2(name='s1', m=0)
stage1_disp = ExpendableTank(name='d1', m=0)
stage1_engine = CryoLH2TurboFed(name='e1', m=0)
stage1_list = [stage1_rec, stage1_disp, stage1_engine]
elif stage_type == 'ballistic' and prop_choice == 'kerosene' and portion_reused == 'none':
stage1 = ExpendableBallisticStageStorable(name='s1', m=0)
stage1_engine = StorableTurboFed(name='e1', m=0)
stage1_list = [stage1, stage1_engine]
elif stage_type == 'ballistic' and prop_choice == 'kerosene' and portion_reused == 'full':
stage1 = ReusableBallisticStageStorable(name='s1', m=0)
stage1_engine = StorableTurboFed(name='e1', m=0)
stage1_list = [stage1, stage1_engine]
elif stage_type == 'ballistic' and prop_choice == 'kerosene' and portion_reused == 'partial':
stage1_rec = ReusableBallisticStageStorable(name='s1', m=0)
stage1_disp = ExpendableTank(name='d1', m=0)
stage1_engine = StorableTurboFed(name='e1', m=0)
stage1_list = [stage1_rec, stage1_disp, stage1_engine]
elif stage_type == 'winged' and prop_choice == 'H2' and portion_reused == 'full' and ab_rec:
stage1 = VTOStageFlybackVehicle(name='s1', m=0)
stage1_rocket_engine = CryoLH2TurboFed(name='e1', m=0)
stage1_ab_engine = TurboJetEngine(name='ab', m=0)
stage1_list = [stage1, stage1_rocket_engine, stage1_ab_engine]
elif stage_type == 'winged' and prop_choice == 'H2' and portion_reused == 'full' and not ab_rec:
stage1_rec = VTOStageFlybackVehicle(name='s1', m=0)
stage1_rocket_engine = CryoLH2TurboFed(name='e1', m=0)
stage1_list = [stage1_rec, stage1_rocket_engine]
elif stage_type == 'winged' and prop_choice == 'H2' and portion_reused == 'partial' and ab_rec:
stage1_rec = VTOStageFlybackVehicle(name='s1', m=0)
stage1_disp = ExpendableTank(name='d1', m=0)
stage1_rocket_engine = CryoLH2TurboFed(name='e1', m=0)
stage1_ab_engine = TurboJetEngine(name='ab', m=0)
stage1_list = [stage1_rec, stage1_disp, stage1_rocket_engine, stage1_ab_engine]
elif stage_type == 'winged' and prop_choice == 'H2' and portion_reused == 'partial' and not ab_rec:
stage1_rec = VTOStageFlybackVehicle(name='s1', m=0)
stage1_disp = ExpendableTank(name='d1', m=0)
stage1_rocket_engine = CryoLH2TurboFed(name='e1', m=0)
stage1_list = [stage1_rec, stage1_disp, stage1_rocket_engine]
elif stage_type == 'winged' and prop_choice == 'kerosene' and portion_reused == 'full' and ab_rec:
stage1 = VTOStageFlybackVehicle(name='s1', m=0)
stage1_rocket_engine = StorableTurboFed(name='e1', m=0)
stage1_ab_engine = TurboJetEngine(name='ab', m=0)
stage1_list = [stage1, stage1_rocket_engine, stage1_ab_engine]
elif stage_type == 'winged' and prop_choice == 'kerosene' and portion_reused == 'full' and not ab_rec:
stage1 = VTOStageFlybackVehicle(name='s1', m=0)
stage1_rocket_engine = StorableTurboFed(name='e1', m=0)
stage1_list = [stage1, stage1_rocket_engine]
elif stage_type == 'winged' and prop_choice == 'kerosene' and portion_reused == 'partial' and ab_rec:
stage1_rec = VTOStageFlybackVehicle(name='s1', m=0)
stage1_disp = ExpendableTank(name='d1', m=0)
stage1_rocket_engine = StorableTurboFed(name='e1', m=0)
stage1_ab_engine = TurboJetEngine(name='ab', m=0)
stage1_list = [stage1_rec, stage1_disp, stage1_rocket_engine, stage1_ab_engine]
elif stage_type == 'winged' and prop_choice == 'kerosene' and portion_reused == 'partial' and not ab_rec:
stage1_rec = VTOStageFlybackVehicle(name='s1', m=0)
stage1_disp = ExpendableTank(name='d1', m=0)
stage1_rocket_engine = StorableTurboFed(name='e1', m=0)
stage1_list = [stage1_rec, stage1_disp, stage1_rocket_engine]
veh_element_list += stage1_list
if portion_reused == 'partial':
N_veh = 3
else:
N_veh = 2
launch_vehicle = LaunchVehicle(name='veh', M0=0, N=N_veh, element_list=veh_element_list)
return launch_vehicle
def demo():
from lvreuse.analysis.performance.strategy_perf_models import (Expendable, WingedPoweredLaunchSite,
WingedPoweredLaunchSitePartial,
ParachutePartial,
kero_GG_boost_tech,
kero_GG_upper_tech,
LEO, GTO)
import abc
import os.path
import math
import matplotlib.pyplot as plt
import rhodium as rdm
import seaborn as sns
import pandas
from lvreuse.cost.tools import cost_reduction_factor
from lvreuse.cost.CER_values import CERValues
from lvreuse.cost.cost_factors import ElementCostFactors, VehicleCostFactors, OperationsCostFactors
from lvreuse.cost.indirect_ops import indirect_ops_cost
from lvreuse.data.propellants import propellant_cost_list
from lvreuse.data.vehicle_cpf_data import ariane5G, falcon9, atlasV, deltaIV, electron, antares230
tech_1 = kero_GG_boost_tech
tech_2 = kero_GG_upper_tech
mission = LEO
wingpwr_part = WingedPoweredLaunchSitePartial(tech_1, tech_2, mission)
wingpwr_part_dict = wingpwr_part.get_masses(pi_star=0.01, a=0.60, E_1=0.06, E_2=0.04)
print(wingpwr_part_dict)
launch_veh = construct_launch_vehicle(stage_type='winged', prop_choice='kerosene', portion_reused='partial', ab_rec=True, num_ab_engines=2)
s1_CER_vals = CERValues(dev_a=10, dev_x=0.5, prod_a=1, prod_x=0.5)
e1_CER_vals = CERValues(dev_a=10, dev_x=0.5, prod_a=1, prod_x=0.5)
s2_CER_vals = CERValues(dev_a=10, dev_x=0.5, prod_a=1, prod_x=0.5)
e2_CER_vals = CERValues(dev_a=10, dev_x=0.5, prod_a=1, prod_x=0.5)
d1_CER_vals = CERValues(dev_a=10, dev_x=0.5, prod_a=1, prod_x=0.5)
ab_CER_vals = CERValues(dev_a=10, dev_x=0.5, prod_a=1, prod_x=0.5)
s1_cost_factors = ElementCostFactors(f1=1, f2=1, f3=1, f8=1,
f10=1, f11=1, p=1)
e1_cost_factors = ElementCostFactors(f1=1, f2=1, f3=1, f8=1,
f10=1, f11=1, p=1)
s2_cost_factors = ElementCostFactors(f1=1, f2=1, f3=1, f8=1,
f10=1, f11=1, p=1)
e2_cost_factors = ElementCostFactors(f1=1, f2=1, f3=1, f8=1,
f10=1, f11=1, p=1)
d1_dost_factors = ElementCostFactors(f1=1, f2=1, f3=1, f8=1,
f10=1, f11=1, p=1)
ab_cost_factors = ElementCostFactors(f1=1, f2=1, f3=1, f8=1,
f10=1, f11=1, p=1)
element_map = {'s1': [s1_CER_vals, s1_cost_factors, 1],
'e1': [e1_CER_vals, e1_cost_factors, 9],
's2': [s2_CER_vals, s2_cost_factors, 1],
'e2': [e2_CER_vals, e2_cost_factors, 1],
'd1': [d1_CER_vals, d1_dost_factors, 1],
'ab': [ab_CER_vals, ab_cost_factors, 2]}
veh_cost_factors = VehicleCostFactors(f0_dev=1, f0_prod=1, f6=1,
f7=1, f8=1, f9=1, p=1)
prod_nums = [1]
prod_cost = launch_veh.average_vehicle_production_cost(veh_cost_factors, prod_nums, element_map)
print('prod_cost: ', prod_cost)
if __name__ == '__main__':
demo()
print(range(4))
|
en
| 0.790514
|
Construct a LaunchVehicle given strategy choices and masses. Create a LaunchVehicle object given strategy choices and element masses. Arguments: stage_type: type of first stage vehicle, choose from 'winged' or 'ballistic' prop_choice: propellant choice for first stage, choose from 'kerosene' or 'H2' portion_reused: portion of first stage that is reused, choose from 'full', 'partial', or 'none' ab_rec (boolean): specifies whether the recovery scheme is powered or not, True for powered masses_dict: dictionary mapping vehicle element names to their dry masses in kg, i.e. {'element_name': mass} Returns: instance of the LaunchVehicle class describing the launch vehicle
| 3.001361
| 3
|
asciisciit/asciiart.py
|
sahwar/asciisciit
| 1
|
6626368
|
#!/usr/bin/env python
'''
ASCII Toolbox for Converting Images, Movies, Gifs, and Video Feed
Created on 14 Aug 2014
@author: <NAME>
'''
from __future__ import print_function
import time
import os
import platform
from subprocess import Popen, PIPE
import io
import cv2
import numpy as np
from asciisciit.conversions import *
from asciisciit.lut import get_lut, PY2
import asciisciit.console as console
class AsciiImage(object):
"""
An image representation of single frame or image file.
Parameters
----------
image : str, np.ndarray, PIL.Image
Image to convert to text. Can be file path, numpy array, or PIL image
scalefactor : float
Scale factor for image. Units are chars/pixel, automatically adjusted
for the rectangular-ness of characters.
invert : bool
Whether to invert the intensity values
equalize : True
Equalize the image histogram to increase contrast. This should be set
to True for most images.
Examples
--------
>>> ascii = AsciiImage('rubyrhod.jpeg')
>>> print(ascii)
"""
def __init__(self,
image,
scalefactor=0.1,
invert=False,
equalize=True,
lut='simple',
font_path=None):
self.image = open_pil_img(image) if isinstance(image, str) else image
self.scalefactor = scalefactor
self.invert = invert
self.equalize = equalize
self.font_path = font_path
self.aspect_correction_factor = DEFAULT_ASPECT_CORRECTION_FACTOR
self._lut = None
self.lut = lut
@property
def data(self):
return image_to_ascii(self.image,
self.scalefactor,
self.invert,
self.equalize,
self.lut,
self.aspect_correction_factor)
@property
def size(self):
return get_ascii_image_size(self.data)
@property
def lut(self):
return self._lut
@lut.setter
def lut(self, val):
self._lut = val
lookup = get_lut(val)
self.aspect_correction_factor = get_aspect_correction_factor(
lookup.exemplar, self.font_path) # default correction factor for converting
def __repr__(self):
if PY2:
return self.data.encode('utf-8') # error otherwise
return self.data
def __unicode__(self):
return self.data
def to_file(self, path):
with io.open(path, "w+") as f:
f.write(self.data)
def render(self, path, font_size=10, bg_color=(20,20,20), fg_color=(255,255,255)):
img = ascii_to_pil(self.data, font_size, bg_color, fg_color, font_path=self.font_path)
img.save(path)
def show(self, resize_term=False, rescale=False):
if resize_term:
try:
console.set_terminal_size(self.size)
except:
pass
print(self.data)
class AsciiMovie(object):
"""
Movie object for playing and rendering movies.
Parameters
----------
movie_path : str
File path or web address for movie.
scalefactor : float
Scale of the image in chars / pixel
invert : bool
Invert image before processing
Examples
--------
>>> movie = AsciiMovie('awesome_movie.avi')
>>> movie.play(fps=24.0)
"""
def __init__(self,
movie_path,
scalefactor=0.2,
invert=False,
equalize=True,
lut='simple',
font_path=None):
self.movie_path = movie_path
self.scalefactor = scalefactor
self.invert = invert
self.equalize = equalize
self.font_path = font_path
self.aspect_correction_factor = DEFAULT_ASPECT_CORRECTION_FACTOR
self._lut = lut
self.lut = lut
self.default_fps = 15.0
if type(self.movie_path) == str:
# movie is a file
_ , ext = os.path.splitext(self.movie_path)
if ext == ".gif":
self.data, frame_duration = gif_to_numpy(self.movie_path)
self.default_fps = 1000.0/frame_duration
self.shape = self.data.shape
self.play = self._play_gif
self.render = self._render_to_gif
elif ext in [".mp4", ".avi", ".mpeg", ".mpg"]:
self.play = self._play_movie
self.render = self._render_to_movie
else:
raise("movie_path must be a string")
self.frame_intervals = []
self.draw_times = []
@property
def lut(self):
return self._lut
@lut.setter
def lut(self, val):
self._lut = val
lookup = get_lut(val)
self.aspect_correction_factor = get_aspect_correction_factor(
lookup.exemplar, self.font_path) # default correction factor for converting
def _play_gif(self, fps=None, repeats=-1):
fps = fps or self.default_fps
seq = generate_sequence(self.data,
scalefactor=self.scalefactor,
invert=self.invert,
equalize=self.equalize,
lut=self.lut,
font_path=self.font_path)
if repeats < 0:
while True:
play_sequence(seq, fps)
else:
for i in range(repeats):
play_sequence(seq, fps)
def _play_movie(self, fps=None, repeats=1):
fps = fps or self.default_fps
if repeats < 0:
repeats = 1 # lets just play movies once by default
for i in range(repeats):
video = cv2.VideoCapture(self.movie_path)
frame = 0
t = time.clock()
while 1:
result, image = video.read()
if type(image) != np.ndarray:
print("End of movie.")
break
if result:
ascii_img = AsciiImage(image,
scalefactor=self.scalefactor,
invert=self.invert,
equalize=self.equalize,
lut=self.lut,
font_path=self.font_path)
#set terminal size on the first image?
if frame == 0:
try:
console.set_terminal_size(ascii_img.size)
except:
pass
console.clear_term()
print(ascii_img)
frame += 1
else:
break
draw_time = time.clock()-t
t = time.clock()
remaining = 1.0/fps-draw_time
if remaining > 0:
time.sleep(remaining)
interval = draw_time+remaining
else:
interval = draw_time
self.frame_intervals.append(interval)
self.draw_times.append(draw_time)
print("Total frames displayed:", frame)
print("Avg draw time:", np.mean(self.draw_times))
print("Avg frame interval:", np.mean(self.frame_intervals))
print("Max frame interval:", np.max(self.frame_intervals))
print("Min frame interval:", np.min(self.frame_intervals))
video.release()
def _render_to_gif(self, output_path, fps=None, font_size=10):
"""
Render text to gif of text.
Parameters
----------
output_path : str
Output file path.
"""
fps = fps or self.default_fps
seq = generate_sequence(self.data,
scalefactor=self.scalefactor,
invert=self.invert,
equalize=self.equalize,
lut=self.lut,
font_path=self.font_path)
ascii_seq_to_gif(seq,
output_path,
fps=fps,
font_size=font_size,
font_path=self.font_path)
def _render_to_movie(self,
output_path,
fourcc=None,
fps=None,
font_size=10):
"""
"""
fps = fps or self.default_fps
video = cv2.VideoCapture(self.movie_path)
frames = 0
status = StatusBar(text='Counting frames: ')
#get # of frames and img size
while 1:
result, frame = video.read()
if type(frame) != np.ndarray:
break
if frames == 0:
#get resulting image size once
ascii_img = AsciiImage(frame,
scalefactor=self.scalefactor,
invert=self.invert,
equalize=self.equalize,
lut=self.lut,
font_path=self.font_path)
pil_img = ascii_to_pil(ascii_img.data, font_path=self.font_path)
img_size = pil_img.size
frames += 1
status.update_custom(frames)
video.release()
status.complete()
#status = StatusBar(frames, "Rendering frames: ")
video = cv2.VideoCapture(self.movie_path)
# opencv solution?
# if not fourcc:
# fourcc = fourcc = cv2.cv.CV_FOURCC(*'MPEG')
# output = cv2.VideoWriter(output_path, -1, fps, img_size, 1)
# ffmpeg solution
p = Popen(['ffmpeg', '-y', '-f', 'image2pipe', '-vcodec',
'mjpeg', '-r', str(fps), '-i', '-', '-vcodec',
'mpeg4', '-qscale', '5', '-r', str(fps), output_path],
stdin=PIPE)
for i in range(frames):
result, frame = video.read()
if type(frame) != np.ndarray:
break
if result:
ascii_img = AsciiImage(frame,
scalefactor=self.scalefactor,
invert=self.invert,
equalize=self.equalize,
lut=self.lut,
font_path=self.font_path)
pil_img = ascii_to_pil(ascii_img.data,
font_size=font_size,
font_path=self.font_path)
pil_img.save(p.stdin, 'JPEG')
#numpy_img = np.array(pil_img)
#output.write(numpy_img) # opencv
#status.update(i)
else:
break
video.release()
#output.release() # opencv
p.stdin.close()
p.wait()
#status.complete()
class AsciiCamera(object):
def __init__(self,
camera_id=0,
scalefactor=0.2,
invert=False,
equalize=True,
lut="simple"):
self.scalefactor = scalefactor
self.invert = invert
self.camera_id = camera_id
self.equalize = equalize
self.lut = lut
#webcam?
self.video = cv2.VideoCapture(self.camera_id)
self.frame_intervals = []
self.draw_times = []
def stream(self, fps=15.0):
frame = 0
t = time.clock()
while 1:
result, image = self.video.read()
if type(image) != np.ndarray:
if frame == 0:
raise IOError("No frames available. Bro, do you even camera?")
##TODO: find some way to break out besides ^C
print("End of movie.")
break
if result:
ascii_img = AsciiImage(image,
scalefactor=self.scalefactor,
invert=self.invert,
equalize=self.equalize,
lut=self.lut)
#set terminal size on the first image?
if frame == 0:
try:
console.set_terminal_size(ascii_img.size)
except:
pass
console.clear_term()
print(ascii_img)
frame += 1
else:
break
draw_time = time.clock()-t
t = time.clock()
remaining = 1.0/fps-draw_time
if remaining > 0:
time.sleep(remaining)
interval = draw_time+remaining
else:
interval = draw_time
self.frame_intervals.append(interval)
self.draw_times.append(draw_time)
print("Total frames displayed:", frame)
print("Avg draw time:", np.mean(self.draw_times))
print("Avg frame interval:", np.mean(self.frame_intervals))
print("Max frame interval:", np.max(self.frame_intervals))
print("Min frame interval:", np.min(self.frame_intervals))
self.release()
def release(self):
self.video.release()
def generate_sequence(imageseq,
scalefactor=0.1,
invert=False,
equalize=True,
lut='simple',
font_path=None):
seq = []
for im in imageseq:
seq.append(
AsciiImage(
im,
scalefactor,
invert=invert,
equalize=equalize,
lut=lut,
font_path=font_path
)
)
return seq
def play_sequence(seq, fps=30, repeats=1):
shape = seq[0].size
console.set_terminal_size(shape)
t = time.clock()
for im in seq:
console.clear_term()
print(im)
interval = time.clock()-t
t = time.clock()
remaining = 1.0/fps-interval
if remaining > 0:
time.sleep(remaining)
if __name__ == '__main__':
pass
|
#!/usr/bin/env python
'''
ASCII Toolbox for Converting Images, Movies, Gifs, and Video Feed
Created on 14 Aug 2014
@author: <NAME>
'''
from __future__ import print_function
import time
import os
import platform
from subprocess import Popen, PIPE
import io
import cv2
import numpy as np
from asciisciit.conversions import *
from asciisciit.lut import get_lut, PY2
import asciisciit.console as console
class AsciiImage(object):
"""
An image representation of single frame or image file.
Parameters
----------
image : str, np.ndarray, PIL.Image
Image to convert to text. Can be file path, numpy array, or PIL image
scalefactor : float
Scale factor for image. Units are chars/pixel, automatically adjusted
for the rectangular-ness of characters.
invert : bool
Whether to invert the intensity values
equalize : True
Equalize the image histogram to increase contrast. This should be set
to True for most images.
Examples
--------
>>> ascii = AsciiImage('rubyrhod.jpeg')
>>> print(ascii)
"""
def __init__(self,
image,
scalefactor=0.1,
invert=False,
equalize=True,
lut='simple',
font_path=None):
self.image = open_pil_img(image) if isinstance(image, str) else image
self.scalefactor = scalefactor
self.invert = invert
self.equalize = equalize
self.font_path = font_path
self.aspect_correction_factor = DEFAULT_ASPECT_CORRECTION_FACTOR
self._lut = None
self.lut = lut
@property
def data(self):
return image_to_ascii(self.image,
self.scalefactor,
self.invert,
self.equalize,
self.lut,
self.aspect_correction_factor)
@property
def size(self):
return get_ascii_image_size(self.data)
@property
def lut(self):
return self._lut
@lut.setter
def lut(self, val):
self._lut = val
lookup = get_lut(val)
self.aspect_correction_factor = get_aspect_correction_factor(
lookup.exemplar, self.font_path) # default correction factor for converting
def __repr__(self):
if PY2:
return self.data.encode('utf-8') # error otherwise
return self.data
def __unicode__(self):
return self.data
def to_file(self, path):
with io.open(path, "w+") as f:
f.write(self.data)
def render(self, path, font_size=10, bg_color=(20,20,20), fg_color=(255,255,255)):
img = ascii_to_pil(self.data, font_size, bg_color, fg_color, font_path=self.font_path)
img.save(path)
def show(self, resize_term=False, rescale=False):
if resize_term:
try:
console.set_terminal_size(self.size)
except:
pass
print(self.data)
class AsciiMovie(object):
"""
Movie object for playing and rendering movies.
Parameters
----------
movie_path : str
File path or web address for movie.
scalefactor : float
Scale of the image in chars / pixel
invert : bool
Invert image before processing
Examples
--------
>>> movie = AsciiMovie('awesome_movie.avi')
>>> movie.play(fps=24.0)
"""
def __init__(self,
movie_path,
scalefactor=0.2,
invert=False,
equalize=True,
lut='simple',
font_path=None):
self.movie_path = movie_path
self.scalefactor = scalefactor
self.invert = invert
self.equalize = equalize
self.font_path = font_path
self.aspect_correction_factor = DEFAULT_ASPECT_CORRECTION_FACTOR
self._lut = lut
self.lut = lut
self.default_fps = 15.0
if type(self.movie_path) == str:
# movie is a file
_ , ext = os.path.splitext(self.movie_path)
if ext == ".gif":
self.data, frame_duration = gif_to_numpy(self.movie_path)
self.default_fps = 1000.0/frame_duration
self.shape = self.data.shape
self.play = self._play_gif
self.render = self._render_to_gif
elif ext in [".mp4", ".avi", ".mpeg", ".mpg"]:
self.play = self._play_movie
self.render = self._render_to_movie
else:
raise("movie_path must be a string")
self.frame_intervals = []
self.draw_times = []
@property
def lut(self):
return self._lut
@lut.setter
def lut(self, val):
self._lut = val
lookup = get_lut(val)
self.aspect_correction_factor = get_aspect_correction_factor(
lookup.exemplar, self.font_path) # default correction factor for converting
def _play_gif(self, fps=None, repeats=-1):
fps = fps or self.default_fps
seq = generate_sequence(self.data,
scalefactor=self.scalefactor,
invert=self.invert,
equalize=self.equalize,
lut=self.lut,
font_path=self.font_path)
if repeats < 0:
while True:
play_sequence(seq, fps)
else:
for i in range(repeats):
play_sequence(seq, fps)
def _play_movie(self, fps=None, repeats=1):
fps = fps or self.default_fps
if repeats < 0:
repeats = 1 # lets just play movies once by default
for i in range(repeats):
video = cv2.VideoCapture(self.movie_path)
frame = 0
t = time.clock()
while 1:
result, image = video.read()
if type(image) != np.ndarray:
print("End of movie.")
break
if result:
ascii_img = AsciiImage(image,
scalefactor=self.scalefactor,
invert=self.invert,
equalize=self.equalize,
lut=self.lut,
font_path=self.font_path)
#set terminal size on the first image?
if frame == 0:
try:
console.set_terminal_size(ascii_img.size)
except:
pass
console.clear_term()
print(ascii_img)
frame += 1
else:
break
draw_time = time.clock()-t
t = time.clock()
remaining = 1.0/fps-draw_time
if remaining > 0:
time.sleep(remaining)
interval = draw_time+remaining
else:
interval = draw_time
self.frame_intervals.append(interval)
self.draw_times.append(draw_time)
print("Total frames displayed:", frame)
print("Avg draw time:", np.mean(self.draw_times))
print("Avg frame interval:", np.mean(self.frame_intervals))
print("Max frame interval:", np.max(self.frame_intervals))
print("Min frame interval:", np.min(self.frame_intervals))
video.release()
def _render_to_gif(self, output_path, fps=None, font_size=10):
"""
Render text to gif of text.
Parameters
----------
output_path : str
Output file path.
"""
fps = fps or self.default_fps
seq = generate_sequence(self.data,
scalefactor=self.scalefactor,
invert=self.invert,
equalize=self.equalize,
lut=self.lut,
font_path=self.font_path)
ascii_seq_to_gif(seq,
output_path,
fps=fps,
font_size=font_size,
font_path=self.font_path)
def _render_to_movie(self,
output_path,
fourcc=None,
fps=None,
font_size=10):
"""
"""
fps = fps or self.default_fps
video = cv2.VideoCapture(self.movie_path)
frames = 0
status = StatusBar(text='Counting frames: ')
#get # of frames and img size
while 1:
result, frame = video.read()
if type(frame) != np.ndarray:
break
if frames == 0:
#get resulting image size once
ascii_img = AsciiImage(frame,
scalefactor=self.scalefactor,
invert=self.invert,
equalize=self.equalize,
lut=self.lut,
font_path=self.font_path)
pil_img = ascii_to_pil(ascii_img.data, font_path=self.font_path)
img_size = pil_img.size
frames += 1
status.update_custom(frames)
video.release()
status.complete()
#status = StatusBar(frames, "Rendering frames: ")
video = cv2.VideoCapture(self.movie_path)
# opencv solution?
# if not fourcc:
# fourcc = fourcc = cv2.cv.CV_FOURCC(*'MPEG')
# output = cv2.VideoWriter(output_path, -1, fps, img_size, 1)
# ffmpeg solution
p = Popen(['ffmpeg', '-y', '-f', 'image2pipe', '-vcodec',
'mjpeg', '-r', str(fps), '-i', '-', '-vcodec',
'mpeg4', '-qscale', '5', '-r', str(fps), output_path],
stdin=PIPE)
for i in range(frames):
result, frame = video.read()
if type(frame) != np.ndarray:
break
if result:
ascii_img = AsciiImage(frame,
scalefactor=self.scalefactor,
invert=self.invert,
equalize=self.equalize,
lut=self.lut,
font_path=self.font_path)
pil_img = ascii_to_pil(ascii_img.data,
font_size=font_size,
font_path=self.font_path)
pil_img.save(p.stdin, 'JPEG')
#numpy_img = np.array(pil_img)
#output.write(numpy_img) # opencv
#status.update(i)
else:
break
video.release()
#output.release() # opencv
p.stdin.close()
p.wait()
#status.complete()
class AsciiCamera(object):
def __init__(self,
camera_id=0,
scalefactor=0.2,
invert=False,
equalize=True,
lut="simple"):
self.scalefactor = scalefactor
self.invert = invert
self.camera_id = camera_id
self.equalize = equalize
self.lut = lut
#webcam?
self.video = cv2.VideoCapture(self.camera_id)
self.frame_intervals = []
self.draw_times = []
def stream(self, fps=15.0):
frame = 0
t = time.clock()
while 1:
result, image = self.video.read()
if type(image) != np.ndarray:
if frame == 0:
raise IOError("No frames available. Bro, do you even camera?")
##TODO: find some way to break out besides ^C
print("End of movie.")
break
if result:
ascii_img = AsciiImage(image,
scalefactor=self.scalefactor,
invert=self.invert,
equalize=self.equalize,
lut=self.lut)
#set terminal size on the first image?
if frame == 0:
try:
console.set_terminal_size(ascii_img.size)
except:
pass
console.clear_term()
print(ascii_img)
frame += 1
else:
break
draw_time = time.clock()-t
t = time.clock()
remaining = 1.0/fps-draw_time
if remaining > 0:
time.sleep(remaining)
interval = draw_time+remaining
else:
interval = draw_time
self.frame_intervals.append(interval)
self.draw_times.append(draw_time)
print("Total frames displayed:", frame)
print("Avg draw time:", np.mean(self.draw_times))
print("Avg frame interval:", np.mean(self.frame_intervals))
print("Max frame interval:", np.max(self.frame_intervals))
print("Min frame interval:", np.min(self.frame_intervals))
self.release()
def release(self):
self.video.release()
def generate_sequence(imageseq,
scalefactor=0.1,
invert=False,
equalize=True,
lut='simple',
font_path=None):
seq = []
for im in imageseq:
seq.append(
AsciiImage(
im,
scalefactor,
invert=invert,
equalize=equalize,
lut=lut,
font_path=font_path
)
)
return seq
def play_sequence(seq, fps=30, repeats=1):
shape = seq[0].size
console.set_terminal_size(shape)
t = time.clock()
for im in seq:
console.clear_term()
print(im)
interval = time.clock()-t
t = time.clock()
remaining = 1.0/fps-interval
if remaining > 0:
time.sleep(remaining)
if __name__ == '__main__':
pass
|
en
| 0.568382
|
#!/usr/bin/env python ASCII Toolbox for Converting Images, Movies, Gifs, and Video Feed Created on 14 Aug 2014 @author: <NAME> An image representation of single frame or image file. Parameters ---------- image : str, np.ndarray, PIL.Image Image to convert to text. Can be file path, numpy array, or PIL image scalefactor : float Scale factor for image. Units are chars/pixel, automatically adjusted for the rectangular-ness of characters. invert : bool Whether to invert the intensity values equalize : True Equalize the image histogram to increase contrast. This should be set to True for most images. Examples -------- >>> ascii = AsciiImage('rubyrhod.jpeg') >>> print(ascii) # default correction factor for converting # error otherwise Movie object for playing and rendering movies. Parameters ---------- movie_path : str File path or web address for movie. scalefactor : float Scale of the image in chars / pixel invert : bool Invert image before processing Examples -------- >>> movie = AsciiMovie('awesome_movie.avi') >>> movie.play(fps=24.0) # movie is a file # default correction factor for converting # lets just play movies once by default #set terminal size on the first image? Render text to gif of text. Parameters ---------- output_path : str Output file path. #get # of frames and img size #get resulting image size once #status = StatusBar(frames, "Rendering frames: ") # opencv solution? # if not fourcc: # fourcc = fourcc = cv2.cv.CV_FOURCC(*'MPEG') # output = cv2.VideoWriter(output_path, -1, fps, img_size, 1) # ffmpeg solution #numpy_img = np.array(pil_img) #output.write(numpy_img) # opencv #status.update(i) #output.release() # opencv #status.complete() #webcam? ##TODO: find some way to break out besides ^C #set terminal size on the first image?
| 3.37317
| 3
|
modules/dap/variable.py
|
daveleroy/sublime_debug
| 1
|
6626369
|
<reponame>daveleroy/sublime_debug
from __future__ import annotations
from dataclasses import dataclass
import os
from ..typecheck import *
from ..import core
from .import dap
if TYPE_CHECKING:
from .session import Session
@dataclass
class SourceLocation:
source: dap.Source
line: int|None = None
column: int|None = None
@staticmethod
def from_path(file: str, line: int|None, column: int|None) -> SourceLocation:
return SourceLocation(dap.Source(os.path.basename(file), file), line, column)
@property
def name(self) -> str:
name = os.path.basename(self.source.name or '??')
if self.column and self.line:
return f'{name}@{self.line}:{self.column}'
if self.line:
return f'{name}@{self.line}'
return name
class Variable:
def __init__(self, session: Session, name: str, value: str|None, variablesReference: int|None, containerVariablesReference: int|None = None, evaluateName: str|None = None, memoryReference: str|None = None) -> None:
self.session = session
self.name = name
self.evaluateName = evaluateName
self.value = value
self.variablesReference = variablesReference
self.containerVariablesReference = containerVariablesReference
self.memoryReference = memoryReference
self.fetched: core.Future[list[Variable]]|None = None
@staticmethod
def from_variable(session: Session, containerVariablesReference: int, variable: dap.Variable):
return Variable(
session,
variable.name,
variable.value,
variable.variablesReference,
containerVariablesReference,
variable.evaluateName,
variable.memoryReference,
)
@staticmethod
def from_scope(session: Session, scope: dap.Scope):
return Variable(
session,
scope.name,
None,
scope.variablesReference,
)
@staticmethod
def from_evaluate(session: Session, name: str, evaluate: dap.EvaluateResponse):
return Variable(
session,
name,
evaluate.result,
evaluate.variablesReference,
)
async def fetch(self):
assert self.variablesReference
return await self.session.get_variables(self.variablesReference)
async def children(self) -> list[Variable]:
if not self.has_children:
return []
if not self.fetched:
self.fetched = core.run(self.fetch())
children = await self.fetched
return children
@property
def has_children(self) -> bool:
return bool(self.variablesReference)
|
from __future__ import annotations
from dataclasses import dataclass
import os
from ..typecheck import *
from ..import core
from .import dap
if TYPE_CHECKING:
from .session import Session
@dataclass
class SourceLocation:
source: dap.Source
line: int|None = None
column: int|None = None
@staticmethod
def from_path(file: str, line: int|None, column: int|None) -> SourceLocation:
return SourceLocation(dap.Source(os.path.basename(file), file), line, column)
@property
def name(self) -> str:
name = os.path.basename(self.source.name or '??')
if self.column and self.line:
return f'{name}@{self.line}:{self.column}'
if self.line:
return f'{name}@{self.line}'
return name
class Variable:
def __init__(self, session: Session, name: str, value: str|None, variablesReference: int|None, containerVariablesReference: int|None = None, evaluateName: str|None = None, memoryReference: str|None = None) -> None:
self.session = session
self.name = name
self.evaluateName = evaluateName
self.value = value
self.variablesReference = variablesReference
self.containerVariablesReference = containerVariablesReference
self.memoryReference = memoryReference
self.fetched: core.Future[list[Variable]]|None = None
@staticmethod
def from_variable(session: Session, containerVariablesReference: int, variable: dap.Variable):
return Variable(
session,
variable.name,
variable.value,
variable.variablesReference,
containerVariablesReference,
variable.evaluateName,
variable.memoryReference,
)
@staticmethod
def from_scope(session: Session, scope: dap.Scope):
return Variable(
session,
scope.name,
None,
scope.variablesReference,
)
@staticmethod
def from_evaluate(session: Session, name: str, evaluate: dap.EvaluateResponse):
return Variable(
session,
name,
evaluate.result,
evaluate.variablesReference,
)
async def fetch(self):
assert self.variablesReference
return await self.session.get_variables(self.variablesReference)
async def children(self) -> list[Variable]:
if not self.has_children:
return []
if not self.fetched:
self.fetched = core.run(self.fetch())
children = await self.fetched
return children
@property
def has_children(self) -> bool:
return bool(self.variablesReference)
|
none
| 1
| 2.40186
| 2
|
|
tests/executor.py
|
Chisanan232/pyocean
| 0
|
6626370
|
<gh_stars>0
from multirunnable import RunningMode, SimpleExecutor
from multirunnable.parallel.strategy import ProcessStrategy
from multirunnable.concurrent.strategy import ThreadStrategy
from multirunnable.coroutine.strategy import GreenThreadStrategy, AsynchronousStrategy
from .test_config import Worker_Size, Running_Diff_Time, Test_Function_Sleep_Time
from abc import ABCMeta, abstractmethod
from typing import List
import threading
import pytest
import time
import os
_Worker_Size = Worker_Size
_Running_Diff_Time: int = Running_Diff_Time
_Test_Function_Sleep_Time = Test_Function_Sleep_Time
Running_Parent_PID = None
Running_Count = 0
Running_Thread_IDs: List = []
Running_PPIDs: List = []
Running_Current_Threads: List = []
Running_Finish_Timestamp: List = []
_Thread_Lock = threading.Lock()
_Thread_RLock = threading.RLock()
def reset_running_flag() -> None:
global Running_Count
Running_Count = 0
def reset_running_timer() -> None:
global Running_Thread_IDs, Running_PPIDs, Running_Current_Threads, Running_Finish_Timestamp
Running_Thread_IDs[:] = []
Running_PPIDs[:] = []
Running_Current_Threads[:] = []
Running_Finish_Timestamp[:] = []
@pytest.fixture(scope="function")
def executor_as_process():
return SimpleExecutor(mode=RunningMode.Parallel, executors=_Worker_Size)
@pytest.fixture(scope="function")
def executor_as_thread():
return SimpleExecutor(mode=RunningMode.Concurrent, executors=_Worker_Size)
@pytest.fixture(scope="function")
def executor_as_green_thread():
return SimpleExecutor(mode=RunningMode.GreenThread, executors=_Worker_Size)
@pytest.fixture(scope="function")
def executor_as_asynchronous():
return SimpleExecutor(mode=RunningMode.Asynchronous, executors=_Worker_Size)
class TestSimpleExecutor:
"""
Description:
Testing executor which may be as Process, Thread, Green Thread or Asynchronous object.
The responsibility of this object is calling the mapping method(s) by the RunningMode.
For example, it will use 'multiprocessing.Process.start' when you call 'run' with RunningMode.Parallel.
For the testing concern, we should pay the attention to the feature of responsibility which means
it should target at the feature about 'Procedure' and 'Adapter of features', doesn't working process.
"""
def test_initial_running_strategy_with_parallel(self, executor_as_process: SimpleExecutor):
executor_as_process._initial_running_strategy()
from multirunnable.executor import General_Runnable_Strategy
assert General_Runnable_Strategy is not None, f"It should be assign running-strategy instance."
assert isinstance(General_Runnable_Strategy, ProcessStrategy), f"It should be an sub-instance of 'ProcessStrategy'."
def test_initial_running_strategy_with_concurrent(self, executor_as_thread: SimpleExecutor):
executor_as_thread._initial_running_strategy()
from multirunnable.executor import General_Runnable_Strategy
assert General_Runnable_Strategy is not None, f"It should be assign running-strategy instance."
assert isinstance(General_Runnable_Strategy, ThreadStrategy), f"It should be an sub-instance of 'ThreadStrategy'."
def test_initial_running_strategy_with_coroutine(self, executor_as_green_thread: SimpleExecutor):
executor_as_green_thread._initial_running_strategy()
from multirunnable.executor import General_Runnable_Strategy
assert General_Runnable_Strategy is not None, f"It should be assign running-strategy instance."
assert isinstance(General_Runnable_Strategy, GreenThreadStrategy), f"It should be an sub-instance of 'GreenThreadStrategy'."
def test_initial_running_strategy_with_asynchronous(self, executor_as_asynchronous: SimpleExecutor):
executor_as_asynchronous._initial_running_strategy()
from multirunnable.executor import General_Runnable_Strategy
assert General_Runnable_Strategy is not None, f"It should be assign running-strategy instance."
assert isinstance(General_Runnable_Strategy, AsynchronousStrategy), f"It should be an sub-instance of 'AsynchronousStrategy'."
@pytest.mark.skip(reason="Not implement testing logic.")
def test_start_new_worker(self, executor_as_thread: SimpleExecutor):
def _target():
pass
executor_as_process.start_new_worker()
def test_run(self, executor_as_thread: SimpleExecutor):
TestSimpleExecutor._initial()
def _target(*args, **kwargs):
global Running_Count, Running_Thread_IDs, Running_PPIDs, Running_Current_Threads, Running_Finish_Timestamp
with _Thread_Lock:
Running_Count += 1
_pid = os.getpid()
_ppid = os.getppid()
_ident = threading.get_ident()
# _time = str(datetime.datetime.now())
_time = int(time.time())
Running_Thread_IDs.append(_ident)
Running_PPIDs.append(_ppid)
Running_Current_Threads.append(str(threading.current_thread()))
Running_Finish_Timestamp.append(_time)
time.sleep(Test_Function_Sleep_Time)
return f"result_{threading.current_thread()}"
executor_as_thread.run(function=_target)
# Do some checking
# 1. The amount of workers should be the same with the value of option *executors*.
# 3. The amount of thread IDs should be the same with the value of option *executors*.
# 2. The done-timestamp should be very close.
TestSimpleExecutor._chk_run_record()
def test_map(self, executor_as_thread: SimpleExecutor):
TestSimpleExecutor._initial()
# _args = ("index_1", "index_2", "index_3", "index_4", "index_5") # Bug 1.
_args = [("index_1",), ("index_2",), ("index_3",), ("index_4",), ("index_5",)]
def _target(*args, **kwargs):
global Running_Count, Running_Thread_IDs, Running_PPIDs, Running_Current_Threads, Running_Finish_Timestamp
with _Thread_Lock:
Running_Count += 1
if args:
if len(args) == 1:
assert {args} <= set(_args), f"The argument *args* should be one of element of the input outside."
else:
assert set(args) <= set(_args), f"The argument *args* should be one of element of the input outside."
if len(args) > 1:
assert args == _args, f"The argument *args* should be same as the global variable 'Test_Function_Args'."
if kwargs:
assert kwargs is None or kwargs == {}, f"The argument *kwargs* should be empty or None value."
_pid = os.getpid()
_ppid = os.getppid()
_ident = threading.get_ident()
# _time = str(datetime.datetime.now())
_time = int(time.time())
Running_Thread_IDs.append(_ident)
Running_PPIDs.append(_ppid)
Running_Current_Threads.append(str(threading.current_thread()))
Running_Finish_Timestamp.append(_time)
time.sleep(Test_Function_Sleep_Time)
return f"result_{threading.current_thread()}"
executor_as_thread.map(function=_target, args_iter=_args)
# Do some checking
# 1. The amount of workers should be the same with the amount of parameters.
# 3. The amount of thread IDs should be the same with the amount of parameters.
# 2. The done-timestamp should be very close.
TestSimpleExecutor._chk_map_record(len(_args))
def test_map_with_function(self, executor_as_thread: SimpleExecutor):
TestSimpleExecutor._initial()
_function_a_flag = 0
_function_b_flag = 0
_thread_ids = []
_threads = []
_done_timestamp = []
def _target_a():
# with _Thread_RLock:
nonlocal _function_a_flag
_function_a_flag += 1
_thread_ids.append(threading.get_ident())
_threads.append(threading.current_thread())
_done_timestamp.append(int(time.time()))
time.sleep(Test_Function_Sleep_Time)
def _target_b():
# with _Thread_RLock:
nonlocal _function_b_flag
_function_b_flag += 1
_thread_ids.append(threading.get_ident())
_threads.append(threading.current_thread())
_done_timestamp.append(int(time.time()))
time.sleep(Test_Function_Sleep_Time)
_functions = [_target_a, _target_b]
executor_as_thread.map_with_function(functions=_functions)
# Do some checking
# 1. The amount of workers should be the same with the amount of functions.
# 3. The amount of thread IDs should be the same with the amount of functions.
# 2. The done-timestamp should be very close.
TestSimpleExecutor._chk_map_with_function(_functions, _function_a_flag, _function_b_flag, _thread_ids, _threads, _done_timestamp)
def test_terminal(self, executor_as_thread: SimpleExecutor):
try:
executor_as_thread.terminal()
except Exception as e:
assert False, f"It should work finely without any issue. Please check it."
else:
assert True, f"It work finely without any issue."
def test_kill(self, executor_as_thread: SimpleExecutor):
try:
executor_as_thread.kill()
except Exception as e:
assert False, f"It should work finely without any issue. Please check it."
else:
assert True, f"It work finely without any issue."
@pytest.mark.skip(reason="Not implement testing logic.")
def test_result(self, executor_as_thread: SimpleExecutor):
executor_as_thread.result()
@staticmethod
def _initial():
# Test for parameters with '**kwargs'
reset_running_flag()
reset_running_timer()
global Running_Parent_PID
Running_Parent_PID = os.getpid()
@staticmethod
def _chk_run_record():
assert Running_Count == _Worker_Size, f"The running count should be the same as the process pool size."
_ppid_list = Running_PPIDs[:]
_thread_id_list = Running_Thread_IDs[:]
_current_thread_list = Running_Current_Threads[:]
_timestamp_list = Running_Finish_Timestamp[:]
# assert len(set(_ppid_list)) == 1, f"The PPID of each process should be the same."
# assert _ppid_list[0] == Running_Parent_PID, f"The PPID should equal to {Running_Parent_PID}. But it got {_ppid_list[0]}."
assert len(_thread_id_list) == _Worker_Size, f"The count of PID (no de-duplicate) should be the same as the count of processes."
assert len(set(_thread_id_list)) == _Worker_Size, f"The count of PID (de-duplicate) should be the same as the count of processes."
assert len(_thread_id_list) == len(_current_thread_list), f"The count of current process name (no de-duplicate) should be equal to count of PIDs."
assert len(set(_thread_id_list)) == len(set(_current_thread_list)), f"The count of current process name (de-duplicate) should be equal to count of PIDs."
_max_timestamp = max(_timestamp_list)
_min_timestamp = min(_timestamp_list)
_diff_timestamp = _max_timestamp - _min_timestamp
assert _diff_timestamp <= Running_Diff_Time, f"Processes should be run in the same time period."
@staticmethod
def _chk_map_record(_argument_size):
assert Running_Count == _argument_size, f"The running count should be the same as the process pool size."
_ppid_list = Running_PPIDs[:]
_thread_id_list = Running_Thread_IDs[:]
_current_thread_list = Running_Current_Threads[:]
_timestamp_list = Running_Finish_Timestamp[:]
# assert len(set(_ppid_list)) == 1, f"The PPID of each process should be the same."
# assert _ppid_list[0] == Running_Parent_PID, f"The PPID should equal to {Running_Parent_PID}. But it got {_ppid_list[0]}."
assert len(_thread_id_list) == _argument_size, f"The count of PID (no de-duplicate) should be the same as the count of processes."
assert len(set(_thread_id_list)) == _argument_size, f"The count of PID (de-duplicate) should be the same as the count of processes."
assert len(_thread_id_list) == len(_current_thread_list), f"The count of current process name (no de-duplicate) should be equal to count of PIDs."
assert len(set(_thread_id_list)) == len(set(_current_thread_list)), f"The count of current process name (de-duplicate) should be equal to count of PIDs."
_max_timestamp = max(_timestamp_list)
_min_timestamp = min(_timestamp_list)
_diff_timestamp = _max_timestamp - _min_timestamp
assert _diff_timestamp <= Running_Diff_Time, f"Processes should be run in the same time period."
@staticmethod
def _chk_map_with_function(_functions, _function_a_flag, _function_b_flag, _thread_ids, _threads, _done_timestamp):
assert _function_a_flag == 1, f"The running count should be the same as the amount of function '_target_a'."
assert _function_b_flag == 1, f"The running count should be the same as the amount of function '_target_b'."
_thread_id_list = _thread_ids[:]
_current_thread_list = _threads[:]
_timestamp_list = _done_timestamp[:]
_function_amount = len(_functions)
assert len(_thread_id_list) == _function_amount, f"The count of PID (no de-duplicate) should be the same as the count of processes."
assert len(set(_thread_id_list)) == _function_amount, f"The count of PID (de-duplicate) should be the same as the count of processes."
assert len(_thread_id_list) == len(_current_thread_list), f"The count of current process name (no de-duplicate) should be equal to count of PIDs."
assert len(set(_thread_id_list)) == len(set(_current_thread_list)), f"The count of current process name (de-duplicate) should be equal to count of PIDs."
_max_timestamp = max(_timestamp_list)
_min_timestamp = min(_timestamp_list)
_diff_timestamp = _max_timestamp - _min_timestamp
assert _diff_timestamp <= Running_Diff_Time, f"Processes should be run in the same time period."
|
from multirunnable import RunningMode, SimpleExecutor
from multirunnable.parallel.strategy import ProcessStrategy
from multirunnable.concurrent.strategy import ThreadStrategy
from multirunnable.coroutine.strategy import GreenThreadStrategy, AsynchronousStrategy
from .test_config import Worker_Size, Running_Diff_Time, Test_Function_Sleep_Time
from abc import ABCMeta, abstractmethod
from typing import List
import threading
import pytest
import time
import os
_Worker_Size = Worker_Size
_Running_Diff_Time: int = Running_Diff_Time
_Test_Function_Sleep_Time = Test_Function_Sleep_Time
Running_Parent_PID = None
Running_Count = 0
Running_Thread_IDs: List = []
Running_PPIDs: List = []
Running_Current_Threads: List = []
Running_Finish_Timestamp: List = []
_Thread_Lock = threading.Lock()
_Thread_RLock = threading.RLock()
def reset_running_flag() -> None:
global Running_Count
Running_Count = 0
def reset_running_timer() -> None:
global Running_Thread_IDs, Running_PPIDs, Running_Current_Threads, Running_Finish_Timestamp
Running_Thread_IDs[:] = []
Running_PPIDs[:] = []
Running_Current_Threads[:] = []
Running_Finish_Timestamp[:] = []
@pytest.fixture(scope="function")
def executor_as_process():
return SimpleExecutor(mode=RunningMode.Parallel, executors=_Worker_Size)
@pytest.fixture(scope="function")
def executor_as_thread():
return SimpleExecutor(mode=RunningMode.Concurrent, executors=_Worker_Size)
@pytest.fixture(scope="function")
def executor_as_green_thread():
return SimpleExecutor(mode=RunningMode.GreenThread, executors=_Worker_Size)
@pytest.fixture(scope="function")
def executor_as_asynchronous():
return SimpleExecutor(mode=RunningMode.Asynchronous, executors=_Worker_Size)
class TestSimpleExecutor:
"""
Description:
Testing executor which may be as Process, Thread, Green Thread or Asynchronous object.
The responsibility of this object is calling the mapping method(s) by the RunningMode.
For example, it will use 'multiprocessing.Process.start' when you call 'run' with RunningMode.Parallel.
For the testing concern, we should pay the attention to the feature of responsibility which means
it should target at the feature about 'Procedure' and 'Adapter of features', doesn't working process.
"""
def test_initial_running_strategy_with_parallel(self, executor_as_process: SimpleExecutor):
executor_as_process._initial_running_strategy()
from multirunnable.executor import General_Runnable_Strategy
assert General_Runnable_Strategy is not None, f"It should be assign running-strategy instance."
assert isinstance(General_Runnable_Strategy, ProcessStrategy), f"It should be an sub-instance of 'ProcessStrategy'."
def test_initial_running_strategy_with_concurrent(self, executor_as_thread: SimpleExecutor):
executor_as_thread._initial_running_strategy()
from multirunnable.executor import General_Runnable_Strategy
assert General_Runnable_Strategy is not None, f"It should be assign running-strategy instance."
assert isinstance(General_Runnable_Strategy, ThreadStrategy), f"It should be an sub-instance of 'ThreadStrategy'."
def test_initial_running_strategy_with_coroutine(self, executor_as_green_thread: SimpleExecutor):
executor_as_green_thread._initial_running_strategy()
from multirunnable.executor import General_Runnable_Strategy
assert General_Runnable_Strategy is not None, f"It should be assign running-strategy instance."
assert isinstance(General_Runnable_Strategy, GreenThreadStrategy), f"It should be an sub-instance of 'GreenThreadStrategy'."
def test_initial_running_strategy_with_asynchronous(self, executor_as_asynchronous: SimpleExecutor):
executor_as_asynchronous._initial_running_strategy()
from multirunnable.executor import General_Runnable_Strategy
assert General_Runnable_Strategy is not None, f"It should be assign running-strategy instance."
assert isinstance(General_Runnable_Strategy, AsynchronousStrategy), f"It should be an sub-instance of 'AsynchronousStrategy'."
@pytest.mark.skip(reason="Not implement testing logic.")
def test_start_new_worker(self, executor_as_thread: SimpleExecutor):
def _target():
pass
executor_as_process.start_new_worker()
def test_run(self, executor_as_thread: SimpleExecutor):
TestSimpleExecutor._initial()
def _target(*args, **kwargs):
global Running_Count, Running_Thread_IDs, Running_PPIDs, Running_Current_Threads, Running_Finish_Timestamp
with _Thread_Lock:
Running_Count += 1
_pid = os.getpid()
_ppid = os.getppid()
_ident = threading.get_ident()
# _time = str(datetime.datetime.now())
_time = int(time.time())
Running_Thread_IDs.append(_ident)
Running_PPIDs.append(_ppid)
Running_Current_Threads.append(str(threading.current_thread()))
Running_Finish_Timestamp.append(_time)
time.sleep(Test_Function_Sleep_Time)
return f"result_{threading.current_thread()}"
executor_as_thread.run(function=_target)
# Do some checking
# 1. The amount of workers should be the same with the value of option *executors*.
# 3. The amount of thread IDs should be the same with the value of option *executors*.
# 2. The done-timestamp should be very close.
TestSimpleExecutor._chk_run_record()
def test_map(self, executor_as_thread: SimpleExecutor):
TestSimpleExecutor._initial()
# _args = ("index_1", "index_2", "index_3", "index_4", "index_5") # Bug 1.
_args = [("index_1",), ("index_2",), ("index_3",), ("index_4",), ("index_5",)]
def _target(*args, **kwargs):
global Running_Count, Running_Thread_IDs, Running_PPIDs, Running_Current_Threads, Running_Finish_Timestamp
with _Thread_Lock:
Running_Count += 1
if args:
if len(args) == 1:
assert {args} <= set(_args), f"The argument *args* should be one of element of the input outside."
else:
assert set(args) <= set(_args), f"The argument *args* should be one of element of the input outside."
if len(args) > 1:
assert args == _args, f"The argument *args* should be same as the global variable 'Test_Function_Args'."
if kwargs:
assert kwargs is None or kwargs == {}, f"The argument *kwargs* should be empty or None value."
_pid = os.getpid()
_ppid = os.getppid()
_ident = threading.get_ident()
# _time = str(datetime.datetime.now())
_time = int(time.time())
Running_Thread_IDs.append(_ident)
Running_PPIDs.append(_ppid)
Running_Current_Threads.append(str(threading.current_thread()))
Running_Finish_Timestamp.append(_time)
time.sleep(Test_Function_Sleep_Time)
return f"result_{threading.current_thread()}"
executor_as_thread.map(function=_target, args_iter=_args)
# Do some checking
# 1. The amount of workers should be the same with the amount of parameters.
# 3. The amount of thread IDs should be the same with the amount of parameters.
# 2. The done-timestamp should be very close.
TestSimpleExecutor._chk_map_record(len(_args))
def test_map_with_function(self, executor_as_thread: SimpleExecutor):
TestSimpleExecutor._initial()
_function_a_flag = 0
_function_b_flag = 0
_thread_ids = []
_threads = []
_done_timestamp = []
def _target_a():
# with _Thread_RLock:
nonlocal _function_a_flag
_function_a_flag += 1
_thread_ids.append(threading.get_ident())
_threads.append(threading.current_thread())
_done_timestamp.append(int(time.time()))
time.sleep(Test_Function_Sleep_Time)
def _target_b():
# with _Thread_RLock:
nonlocal _function_b_flag
_function_b_flag += 1
_thread_ids.append(threading.get_ident())
_threads.append(threading.current_thread())
_done_timestamp.append(int(time.time()))
time.sleep(Test_Function_Sleep_Time)
_functions = [_target_a, _target_b]
executor_as_thread.map_with_function(functions=_functions)
# Do some checking
# 1. The amount of workers should be the same with the amount of functions.
# 3. The amount of thread IDs should be the same with the amount of functions.
# 2. The done-timestamp should be very close.
TestSimpleExecutor._chk_map_with_function(_functions, _function_a_flag, _function_b_flag, _thread_ids, _threads, _done_timestamp)
def test_terminal(self, executor_as_thread: SimpleExecutor):
try:
executor_as_thread.terminal()
except Exception as e:
assert False, f"It should work finely without any issue. Please check it."
else:
assert True, f"It work finely without any issue."
def test_kill(self, executor_as_thread: SimpleExecutor):
try:
executor_as_thread.kill()
except Exception as e:
assert False, f"It should work finely without any issue. Please check it."
else:
assert True, f"It work finely without any issue."
@pytest.mark.skip(reason="Not implement testing logic.")
def test_result(self, executor_as_thread: SimpleExecutor):
executor_as_thread.result()
@staticmethod
def _initial():
# Test for parameters with '**kwargs'
reset_running_flag()
reset_running_timer()
global Running_Parent_PID
Running_Parent_PID = os.getpid()
@staticmethod
def _chk_run_record():
assert Running_Count == _Worker_Size, f"The running count should be the same as the process pool size."
_ppid_list = Running_PPIDs[:]
_thread_id_list = Running_Thread_IDs[:]
_current_thread_list = Running_Current_Threads[:]
_timestamp_list = Running_Finish_Timestamp[:]
# assert len(set(_ppid_list)) == 1, f"The PPID of each process should be the same."
# assert _ppid_list[0] == Running_Parent_PID, f"The PPID should equal to {Running_Parent_PID}. But it got {_ppid_list[0]}."
assert len(_thread_id_list) == _Worker_Size, f"The count of PID (no de-duplicate) should be the same as the count of processes."
assert len(set(_thread_id_list)) == _Worker_Size, f"The count of PID (de-duplicate) should be the same as the count of processes."
assert len(_thread_id_list) == len(_current_thread_list), f"The count of current process name (no de-duplicate) should be equal to count of PIDs."
assert len(set(_thread_id_list)) == len(set(_current_thread_list)), f"The count of current process name (de-duplicate) should be equal to count of PIDs."
_max_timestamp = max(_timestamp_list)
_min_timestamp = min(_timestamp_list)
_diff_timestamp = _max_timestamp - _min_timestamp
assert _diff_timestamp <= Running_Diff_Time, f"Processes should be run in the same time period."
@staticmethod
def _chk_map_record(_argument_size):
assert Running_Count == _argument_size, f"The running count should be the same as the process pool size."
_ppid_list = Running_PPIDs[:]
_thread_id_list = Running_Thread_IDs[:]
_current_thread_list = Running_Current_Threads[:]
_timestamp_list = Running_Finish_Timestamp[:]
# assert len(set(_ppid_list)) == 1, f"The PPID of each process should be the same."
# assert _ppid_list[0] == Running_Parent_PID, f"The PPID should equal to {Running_Parent_PID}. But it got {_ppid_list[0]}."
assert len(_thread_id_list) == _argument_size, f"The count of PID (no de-duplicate) should be the same as the count of processes."
assert len(set(_thread_id_list)) == _argument_size, f"The count of PID (de-duplicate) should be the same as the count of processes."
assert len(_thread_id_list) == len(_current_thread_list), f"The count of current process name (no de-duplicate) should be equal to count of PIDs."
assert len(set(_thread_id_list)) == len(set(_current_thread_list)), f"The count of current process name (de-duplicate) should be equal to count of PIDs."
_max_timestamp = max(_timestamp_list)
_min_timestamp = min(_timestamp_list)
_diff_timestamp = _max_timestamp - _min_timestamp
assert _diff_timestamp <= Running_Diff_Time, f"Processes should be run in the same time period."
@staticmethod
def _chk_map_with_function(_functions, _function_a_flag, _function_b_flag, _thread_ids, _threads, _done_timestamp):
assert _function_a_flag == 1, f"The running count should be the same as the amount of function '_target_a'."
assert _function_b_flag == 1, f"The running count should be the same as the amount of function '_target_b'."
_thread_id_list = _thread_ids[:]
_current_thread_list = _threads[:]
_timestamp_list = _done_timestamp[:]
_function_amount = len(_functions)
assert len(_thread_id_list) == _function_amount, f"The count of PID (no de-duplicate) should be the same as the count of processes."
assert len(set(_thread_id_list)) == _function_amount, f"The count of PID (de-duplicate) should be the same as the count of processes."
assert len(_thread_id_list) == len(_current_thread_list), f"The count of current process name (no de-duplicate) should be equal to count of PIDs."
assert len(set(_thread_id_list)) == len(set(_current_thread_list)), f"The count of current process name (de-duplicate) should be equal to count of PIDs."
_max_timestamp = max(_timestamp_list)
_min_timestamp = min(_timestamp_list)
_diff_timestamp = _max_timestamp - _min_timestamp
assert _diff_timestamp <= Running_Diff_Time, f"Processes should be run in the same time period."
|
en
| 0.809377
|
Description: Testing executor which may be as Process, Thread, Green Thread or Asynchronous object. The responsibility of this object is calling the mapping method(s) by the RunningMode. For example, it will use 'multiprocessing.Process.start' when you call 'run' with RunningMode.Parallel. For the testing concern, we should pay the attention to the feature of responsibility which means it should target at the feature about 'Procedure' and 'Adapter of features', doesn't working process. # _time = str(datetime.datetime.now()) # Do some checking # 1. The amount of workers should be the same with the value of option *executors*. # 3. The amount of thread IDs should be the same with the value of option *executors*. # 2. The done-timestamp should be very close. # _args = ("index_1", "index_2", "index_3", "index_4", "index_5") # Bug 1. # _time = str(datetime.datetime.now()) # Do some checking # 1. The amount of workers should be the same with the amount of parameters. # 3. The amount of thread IDs should be the same with the amount of parameters. # 2. The done-timestamp should be very close. # with _Thread_RLock: # with _Thread_RLock: # Do some checking # 1. The amount of workers should be the same with the amount of functions. # 3. The amount of thread IDs should be the same with the amount of functions. # 2. The done-timestamp should be very close. # Test for parameters with '**kwargs' # assert len(set(_ppid_list)) == 1, f"The PPID of each process should be the same." # assert _ppid_list[0] == Running_Parent_PID, f"The PPID should equal to {Running_Parent_PID}. But it got {_ppid_list[0]}." # assert len(set(_ppid_list)) == 1, f"The PPID of each process should be the same." # assert _ppid_list[0] == Running_Parent_PID, f"The PPID should equal to {Running_Parent_PID}. But it got {_ppid_list[0]}."
| 2.471352
| 2
|
server.py
|
ar414-com/Image-to-Image-Search
| 215
|
6626371
|
import glob
import os
from PIL import Image
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from flask import Flask, render_template, request, Response
from werkzeug.utils import secure_filename
import json
from capgen import CaptionGenerator
os.environ['CUDA_VISIBLE_DEVICES'] = ''
es = Elasticsearch()
gencap = CaptionGenerator()
def description_search(query):
global es
results = es.search(
index="desearch",
body={
"size": 20,
"query": {
"match": {"description": query}
}
})
hitCount = results['hits']['total']['value']
print(results)
if hitCount > 0:
if hitCount is 1:
print(str(hitCount), ' result')
else:
print(str(hitCount), 'results')
answers = []
max_score = results['hits']['max_score']
if max_score >= 0.35:
for hit in results['hits']['hits']:
if hit['_score'] > 0.5 * max_score:
desc = hit['_source']['description']
imgurl = hit['_source']['imgurl']
answers.append([imgurl, desc])
else:
answers = []
return answers
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = os.path.join('static', 'database')
app.config['TEMP_UPLOAD_FOLDER'] = os.path.join('static', 'uploads')
app.config['ALLOWED_EXTENSIONS'] = set(['jpg', 'jpeg', 'png'])
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
@app.route('/')
def index():
return render_template('home.html')
@app.route('/search', methods=['GET', 'POST'])
def search():
global gencap
if request.method == 'POST':
if 'query_img' not in request.files or request.files['query_img'].filename == '' or not allowed_file(
request.files['query_img'].filename):
return render_template('search.html')
file = request.files['query_img']
img = Image.open(file.stream) # PIL image
uploaded_img_path = os.path.join(app.config['TEMP_UPLOAD_FOLDER'], file.filename)
img.save(uploaded_img_path)
query = gencap.get_caption(uploaded_img_path)
answers = description_search(query)
return render_template('search.html',
query_path=uploaded_img_path,
answers=answers)
else:
return render_template('search.html')
@app.route('/api/search', methods=['POST'])
def api_search():
global gencap
if 'query_img' not in request.files or request.files['query_img'].filename == '' or not allowed_file(
request.files['query_img'].filename):
return Response(response=json.dumps({'success': False, 'message': 'Uploaded image is invalid or not allowed'}),
status=400, mimetype="application/json")
file = request.files['query_img']
img = Image.open(file.stream) # PIL image
uploaded_img_path = os.path.join(app.config['TEMP_UPLOAD_FOLDER'], file.filename)
img.save(uploaded_img_path)
query = gencap.get_caption(uploaded_img_path)
answers = description_search(query)
return Response(response=json.dumps({'success': True, 'answers': answers}),
status=200, mimetype="application/json")
@app.route('/database')
def database():
images = glob.glob(os.path.join(app.config['UPLOAD_FOLDER'], '*'))
return render_template('database.html', database_images=images)
@app.route('/upload', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
if 'photos' not in request.files:
return render_template('database.html')
actions = []
for file in request.files.getlist('photos'):
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(file_path)
cap = gencap.get_caption(file_path)
doc = {'imgurl': file_path, 'description': cap}
actions.append(doc)
bulk(es, actions, index="desearch", doc_type="json")
return render_template('database.html')
@app.route('/caption', methods=['GET', 'POST'])
def caption():
if request.method == 'POST':
if 'query_img' not in request.files or request.files['query_img'].filename == '' or not allowed_file(
request.files['query_img'].filename):
return render_template('caption.html')
file = request.files['query_img']
img = Image.open(file.stream) # PIL image
uploaded_img_path = os.path.join(app.config['TEMP_UPLOAD_FOLDER'], file.filename)
img.save(uploaded_img_path)
cap = gencap.get_caption(uploaded_img_path)
return render_template('caption.html', caption=cap, query_path=uploaded_img_path)
else:
return render_template('caption.html')
@app.route('/api/caption', methods=['POST'])
def caption_api():
if 'query_img' not in request.files or request.files['query_img'].filename == '' or not allowed_file(
request.files['query_img'].filename):
return Response(response=json.dumps({'success': False, 'message': 'Uploaded image is invalid or not allowed'}),
status=400, mimetype="application/json")
file = request.files['query_img']
img = Image.open(file.stream) # PIL image
uploaded_img_path = os.path.join(app.config['TEMP_UPLOAD_FOLDER'], file.filename)
img.save(uploaded_img_path)
cap = gencap.get_caption(uploaded_img_path)
return Response(response=json.dumps({'success': True, 'caption': cap}),
status=200, mimetype="application/json")
if __name__ == "__main__":
app.run("127.0.0.1", debug=True)
|
import glob
import os
from PIL import Image
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from flask import Flask, render_template, request, Response
from werkzeug.utils import secure_filename
import json
from capgen import CaptionGenerator
os.environ['CUDA_VISIBLE_DEVICES'] = ''
es = Elasticsearch()
gencap = CaptionGenerator()
def description_search(query):
global es
results = es.search(
index="desearch",
body={
"size": 20,
"query": {
"match": {"description": query}
}
})
hitCount = results['hits']['total']['value']
print(results)
if hitCount > 0:
if hitCount is 1:
print(str(hitCount), ' result')
else:
print(str(hitCount), 'results')
answers = []
max_score = results['hits']['max_score']
if max_score >= 0.35:
for hit in results['hits']['hits']:
if hit['_score'] > 0.5 * max_score:
desc = hit['_source']['description']
imgurl = hit['_source']['imgurl']
answers.append([imgurl, desc])
else:
answers = []
return answers
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = os.path.join('static', 'database')
app.config['TEMP_UPLOAD_FOLDER'] = os.path.join('static', 'uploads')
app.config['ALLOWED_EXTENSIONS'] = set(['jpg', 'jpeg', 'png'])
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
@app.route('/')
def index():
return render_template('home.html')
@app.route('/search', methods=['GET', 'POST'])
def search():
global gencap
if request.method == 'POST':
if 'query_img' not in request.files or request.files['query_img'].filename == '' or not allowed_file(
request.files['query_img'].filename):
return render_template('search.html')
file = request.files['query_img']
img = Image.open(file.stream) # PIL image
uploaded_img_path = os.path.join(app.config['TEMP_UPLOAD_FOLDER'], file.filename)
img.save(uploaded_img_path)
query = gencap.get_caption(uploaded_img_path)
answers = description_search(query)
return render_template('search.html',
query_path=uploaded_img_path,
answers=answers)
else:
return render_template('search.html')
@app.route('/api/search', methods=['POST'])
def api_search():
global gencap
if 'query_img' not in request.files or request.files['query_img'].filename == '' or not allowed_file(
request.files['query_img'].filename):
return Response(response=json.dumps({'success': False, 'message': 'Uploaded image is invalid or not allowed'}),
status=400, mimetype="application/json")
file = request.files['query_img']
img = Image.open(file.stream) # PIL image
uploaded_img_path = os.path.join(app.config['TEMP_UPLOAD_FOLDER'], file.filename)
img.save(uploaded_img_path)
query = gencap.get_caption(uploaded_img_path)
answers = description_search(query)
return Response(response=json.dumps({'success': True, 'answers': answers}),
status=200, mimetype="application/json")
@app.route('/database')
def database():
images = glob.glob(os.path.join(app.config['UPLOAD_FOLDER'], '*'))
return render_template('database.html', database_images=images)
@app.route('/upload', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
if 'photos' not in request.files:
return render_template('database.html')
actions = []
for file in request.files.getlist('photos'):
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(file_path)
cap = gencap.get_caption(file_path)
doc = {'imgurl': file_path, 'description': cap}
actions.append(doc)
bulk(es, actions, index="desearch", doc_type="json")
return render_template('database.html')
@app.route('/caption', methods=['GET', 'POST'])
def caption():
if request.method == 'POST':
if 'query_img' not in request.files or request.files['query_img'].filename == '' or not allowed_file(
request.files['query_img'].filename):
return render_template('caption.html')
file = request.files['query_img']
img = Image.open(file.stream) # PIL image
uploaded_img_path = os.path.join(app.config['TEMP_UPLOAD_FOLDER'], file.filename)
img.save(uploaded_img_path)
cap = gencap.get_caption(uploaded_img_path)
return render_template('caption.html', caption=cap, query_path=uploaded_img_path)
else:
return render_template('caption.html')
@app.route('/api/caption', methods=['POST'])
def caption_api():
if 'query_img' not in request.files or request.files['query_img'].filename == '' or not allowed_file(
request.files['query_img'].filename):
return Response(response=json.dumps({'success': False, 'message': 'Uploaded image is invalid or not allowed'}),
status=400, mimetype="application/json")
file = request.files['query_img']
img = Image.open(file.stream) # PIL image
uploaded_img_path = os.path.join(app.config['TEMP_UPLOAD_FOLDER'], file.filename)
img.save(uploaded_img_path)
cap = gencap.get_caption(uploaded_img_path)
return Response(response=json.dumps({'success': True, 'caption': cap}),
status=200, mimetype="application/json")
if __name__ == "__main__":
app.run("127.0.0.1", debug=True)
|
en
| 0.75158
|
# PIL image # PIL image # PIL image # PIL image
| 2.180046
| 2
|
__main__.py
|
g0tmk/internet_connection_monitor
| 0
|
6626372
|
from monitoring import main
main()
|
from monitoring import main
main()
|
none
| 1
| 0.942188
| 1
|
|
extensions.py
|
choyiny/flask-api-starter
| 6
|
6626373
|
"""Extensions module - Set up for additional libraries can go in here."""
import logging
from celery import Celery
import config
# logging
logger = logging.getLogger("flask.general")
# celery
celery = Celery(
"app", broker=config.CELERY_BROKER_URL, backend=config.CELERY_RESULT_BACKEND
)
|
"""Extensions module - Set up for additional libraries can go in here."""
import logging
from celery import Celery
import config
# logging
logger = logging.getLogger("flask.general")
# celery
celery = Celery(
"app", broker=config.CELERY_BROKER_URL, backend=config.CELERY_RESULT_BACKEND
)
|
en
| 0.85025
|
Extensions module - Set up for additional libraries can go in here. # logging # celery
| 1.827999
| 2
|
fstream/protocol/base.py
|
33TU/fstream
| 0
|
6626374
|
<reponame>33TU/fstream
from asyncio import Transport, Future
from asyncio.exceptions import LimitOverrunError
from types import coroutine
from typing import Any, Awaitable, List, Optional, Union
# Awaitable with instant return
_completed = coroutine(lambda: None if True else (yield))()
class BaseStreamProtocol:
__slots__ = (
'_loop',
'_client_connected_cb',
'_transport',
'_closed',
'_exc',
'_writing_paused',
'_data_future',
'_drain_future',
'_close_future',
'data_buffer'
)
def __init__(self, loop, connected_cb) -> None:
self._loop = loop
self._client_connected_cb = connected_cb
self._transport = None
self._closed = False
self._exc = None
self._writing_paused = False
self._data_future: Optional[Future] = None
self._drain_future: Optional[Future] = None
self._close_future: Optional[Future] = None
self.data_buffer = bytearray()
@property
def transport(self) -> Transport:
return self.transport
def connection_made(self, transport) -> None:
self._transport = transport
if self._client_connected_cb is not None:
self._loop.create_task(self._client_connected_cb(
StreamReader(self),
StreamWriter(self),
))
def connection_lost(self, exc) -> None:
if self._closed: return
self._exc = exc
self._closed = True
if exc is not None:
if self._data_future is not None and not self._data_future.done():
self._data_future.set_exception(exc)
if self._drain_future is not None and not self._drain_future.done():
self._drain_future.set_exception(exc)
if self._close_future is not None and not self._close_future.done():
self._close_future.set_exception(exc)
else:
if self._data_future is not None and not self._data_future.done():
self._data_future.set_result(None)
if self._drain_future is not None and not self._drain_future.done():
self._drain_future.set_result(None)
if self._close_future is not None and not self._close_future.done():
self._close_future.set_result(None)
def pause_writing(self) -> None:
self._writing_paused = True
def resume_writing(self) -> None:
self._writing_paused = False
if self._drain_future is not None:
self._drain_future.set_result(None)
self._drain_future = None
def wait_data_notify(self) -> Awaitable:
if self._closed:
raise self._exc or ConnectionResetError('Connection lost')
if self._data_future is None:
self._data_future = self._loop.create_future()
self._transport.resume_reading()
return self._data_future
def wait_drain_notify(self) -> Awaitable:
if self._closed:
raise self._exc or ConnectionResetError('Connection lost')
if not self._writing_paused:
return _completed
if self._drain_future is None:
self._drain_future = self._loop.create_future()
return self._drain_future
def wait_close_notify(self) -> Awaitable:
if self._closed:
if self._exc is not None:
raise self._exc
else:
return _completed
if self._close_future is None:
self._close_future = self._loop.create_future()
return self._close_future
def get_exception(self) -> Optional[Exception]:
return self._exc
class StreamReader:
__slots__ = ('protocol',)
def __init__(self, protocol: BaseStreamProtocol) -> None:
self.protocol = protocol
async def readuntil(self, separator=b'\n', include_delimiter=True, limit=1024*1024) -> bytearray:
"""
Read data from the stream until ``separator`` is found.
"""
if self.protocol._exc is not None:
raise self.protocol._exc
data_buffer = self.protocol.data_buffer
sep_len = len(separator)
if sep_len == 0:
raise ValueError('Separator should be at least one-byte string')
sep_index = data_buffer.find(separator)
while sep_index == -1:
data_len = len(data_buffer)
if data_len > limit:
raise LimitOverrunError(
'Separator is not found, and chunk exceed the limit', data_len)
await self.protocol.wait_data_notify()
sep_start = 0 if sep_len > data_len else data_len - sep_len
sep_index = data_buffer.find(separator, sep_start)
buffer_len = sep_index + sep_len
buffer = data_buffer[:buffer_len if include_delimiter else sep_index]
del data_buffer[:buffer_len]
return buffer
async def read(self, nbytes: int) -> Union[bytearray, bytes]:
"""
Read max ``nbytes`` about of bytes.
Returns bytearray if ``nbytes`` > 0 otherwise bytes
"""
if self.protocol._exc is not None:
raise self.protocol._exc
if nbytes < 0:
raise ValueError('read size has to be greater than zero')
elif nbytes == 0:
return b''
data_buffer = self.protocol.data_buffer
buffer_len = len(data_buffer)
if buffer_len == 0:
await self.protocol.wait_data_notify()
buffer_len = len(data_buffer)
read_len = nbytes if nbytes < buffer_len else buffer_len
buffer = data_buffer[:read_len]
del data_buffer[:read_len]
return buffer
async def readexactly(self, nbytes: int) -> Union[bytearray, bytes]:
"""
Read exactly ``nbytes`` about of bytes.
Returns bytearray if ``nbytes`` > 0 otherwise bytes
"""
if self.protocol._exc is not None:
raise self.protocol._exc
if nbytes < 0:
raise ValueError('readexactly size can not be less than zero')
elif nbytes == 0:
return b''
data_buffer = self.protocol.data_buffer
while len(data_buffer) < nbytes:
await self.protocol.wait_data_notify()
buffer = data_buffer[:nbytes]
del data_buffer[:nbytes]
return buffer
async def readlen(self, limit: int = 1024*1024, endian='little') -> Union[bytearray, bytes]:
"""
Reads length prefixed message from the stream.
[u32: length | payload bytes ]
"""
if self.protocol._exc is not None:
raise self.protocol._exc
if limit < 0:
raise ValueError('limit size has to be greater than zero')
data_buffer = self.protocol.data_buffer
while len(data_buffer) < 4:
await self.protocol.wait_data_notify()
buffer_len = int.from_bytes(data_buffer[:4], endian)
if buffer_len > limit:
raise LimitOverrunError('buffer length exceed the limit', buffer_len)
elif buffer_len == 0:
del data_buffer[:4]
return b''
read_len = buffer_len + 4
while len(data_buffer) < read_len:
await self.protocol.wait_data_notify()
buffer = data_buffer[4:read_len]
del data_buffer[:read_len]
return buffer
class StreamWriter:
__slots__ = ('protocol',)
def __init__(self, protocol: BaseStreamProtocol) -> None:
self.protocol = protocol
def close(self) -> None:
self.protocol._transport.close()
def is_closing(self) -> bool:
return self.protocol._transport.is_closing()
def can_write_eof(self) -> bool:
return self.protocol._transport.can_write_eof()
def get_extra_info(self, name, default=None) -> Any:
return self.protocol._transport.get_extra_info(name, default)
def write(self, buffer: Union[bytes, bytearray]) -> None:
self.protocol._transport.write(buffer)
def writelines(self, buffers: List[Any]) -> None:
self.protocol._transport.writelines(buffers)
def writelen(self, buffer: Union[bytes, bytearray], endian='little') -> None:
"""
Writes length prefixed message to stream.
[u32: length | payload bytes ]
"""
self.protocol._transport.write(len(buffer).to_bytes(4, endian))
self.protocol._transport.write(buffer)
def write_eof(self) -> None:
return self.protocol._transport.write_eof()
def drain(self) -> Awaitable:
return self.protocol.wait_drain_notify()
def wait_closed(self) -> Awaitable:
return self.protocol.wait_close_notify()
|
from asyncio import Transport, Future
from asyncio.exceptions import LimitOverrunError
from types import coroutine
from typing import Any, Awaitable, List, Optional, Union
# Awaitable with instant return
_completed = coroutine(lambda: None if True else (yield))()
class BaseStreamProtocol:
__slots__ = (
'_loop',
'_client_connected_cb',
'_transport',
'_closed',
'_exc',
'_writing_paused',
'_data_future',
'_drain_future',
'_close_future',
'data_buffer'
)
def __init__(self, loop, connected_cb) -> None:
self._loop = loop
self._client_connected_cb = connected_cb
self._transport = None
self._closed = False
self._exc = None
self._writing_paused = False
self._data_future: Optional[Future] = None
self._drain_future: Optional[Future] = None
self._close_future: Optional[Future] = None
self.data_buffer = bytearray()
@property
def transport(self) -> Transport:
return self.transport
def connection_made(self, transport) -> None:
self._transport = transport
if self._client_connected_cb is not None:
self._loop.create_task(self._client_connected_cb(
StreamReader(self),
StreamWriter(self),
))
def connection_lost(self, exc) -> None:
if self._closed: return
self._exc = exc
self._closed = True
if exc is not None:
if self._data_future is not None and not self._data_future.done():
self._data_future.set_exception(exc)
if self._drain_future is not None and not self._drain_future.done():
self._drain_future.set_exception(exc)
if self._close_future is not None and not self._close_future.done():
self._close_future.set_exception(exc)
else:
if self._data_future is not None and not self._data_future.done():
self._data_future.set_result(None)
if self._drain_future is not None and not self._drain_future.done():
self._drain_future.set_result(None)
if self._close_future is not None and not self._close_future.done():
self._close_future.set_result(None)
def pause_writing(self) -> None:
self._writing_paused = True
def resume_writing(self) -> None:
self._writing_paused = False
if self._drain_future is not None:
self._drain_future.set_result(None)
self._drain_future = None
def wait_data_notify(self) -> Awaitable:
if self._closed:
raise self._exc or ConnectionResetError('Connection lost')
if self._data_future is None:
self._data_future = self._loop.create_future()
self._transport.resume_reading()
return self._data_future
def wait_drain_notify(self) -> Awaitable:
if self._closed:
raise self._exc or ConnectionResetError('Connection lost')
if not self._writing_paused:
return _completed
if self._drain_future is None:
self._drain_future = self._loop.create_future()
return self._drain_future
def wait_close_notify(self) -> Awaitable:
if self._closed:
if self._exc is not None:
raise self._exc
else:
return _completed
if self._close_future is None:
self._close_future = self._loop.create_future()
return self._close_future
def get_exception(self) -> Optional[Exception]:
return self._exc
class StreamReader:
__slots__ = ('protocol',)
def __init__(self, protocol: BaseStreamProtocol) -> None:
self.protocol = protocol
async def readuntil(self, separator=b'\n', include_delimiter=True, limit=1024*1024) -> bytearray:
"""
Read data from the stream until ``separator`` is found.
"""
if self.protocol._exc is not None:
raise self.protocol._exc
data_buffer = self.protocol.data_buffer
sep_len = len(separator)
if sep_len == 0:
raise ValueError('Separator should be at least one-byte string')
sep_index = data_buffer.find(separator)
while sep_index == -1:
data_len = len(data_buffer)
if data_len > limit:
raise LimitOverrunError(
'Separator is not found, and chunk exceed the limit', data_len)
await self.protocol.wait_data_notify()
sep_start = 0 if sep_len > data_len else data_len - sep_len
sep_index = data_buffer.find(separator, sep_start)
buffer_len = sep_index + sep_len
buffer = data_buffer[:buffer_len if include_delimiter else sep_index]
del data_buffer[:buffer_len]
return buffer
async def read(self, nbytes: int) -> Union[bytearray, bytes]:
"""
Read max ``nbytes`` about of bytes.
Returns bytearray if ``nbytes`` > 0 otherwise bytes
"""
if self.protocol._exc is not None:
raise self.protocol._exc
if nbytes < 0:
raise ValueError('read size has to be greater than zero')
elif nbytes == 0:
return b''
data_buffer = self.protocol.data_buffer
buffer_len = len(data_buffer)
if buffer_len == 0:
await self.protocol.wait_data_notify()
buffer_len = len(data_buffer)
read_len = nbytes if nbytes < buffer_len else buffer_len
buffer = data_buffer[:read_len]
del data_buffer[:read_len]
return buffer
async def readexactly(self, nbytes: int) -> Union[bytearray, bytes]:
"""
Read exactly ``nbytes`` about of bytes.
Returns bytearray if ``nbytes`` > 0 otherwise bytes
"""
if self.protocol._exc is not None:
raise self.protocol._exc
if nbytes < 0:
raise ValueError('readexactly size can not be less than zero')
elif nbytes == 0:
return b''
data_buffer = self.protocol.data_buffer
while len(data_buffer) < nbytes:
await self.protocol.wait_data_notify()
buffer = data_buffer[:nbytes]
del data_buffer[:nbytes]
return buffer
async def readlen(self, limit: int = 1024*1024, endian='little') -> Union[bytearray, bytes]:
"""
Reads length prefixed message from the stream.
[u32: length | payload bytes ]
"""
if self.protocol._exc is not None:
raise self.protocol._exc
if limit < 0:
raise ValueError('limit size has to be greater than zero')
data_buffer = self.protocol.data_buffer
while len(data_buffer) < 4:
await self.protocol.wait_data_notify()
buffer_len = int.from_bytes(data_buffer[:4], endian)
if buffer_len > limit:
raise LimitOverrunError('buffer length exceed the limit', buffer_len)
elif buffer_len == 0:
del data_buffer[:4]
return b''
read_len = buffer_len + 4
while len(data_buffer) < read_len:
await self.protocol.wait_data_notify()
buffer = data_buffer[4:read_len]
del data_buffer[:read_len]
return buffer
class StreamWriter:
__slots__ = ('protocol',)
def __init__(self, protocol: BaseStreamProtocol) -> None:
self.protocol = protocol
def close(self) -> None:
self.protocol._transport.close()
def is_closing(self) -> bool:
return self.protocol._transport.is_closing()
def can_write_eof(self) -> bool:
return self.protocol._transport.can_write_eof()
def get_extra_info(self, name, default=None) -> Any:
return self.protocol._transport.get_extra_info(name, default)
def write(self, buffer: Union[bytes, bytearray]) -> None:
self.protocol._transport.write(buffer)
def writelines(self, buffers: List[Any]) -> None:
self.protocol._transport.writelines(buffers)
def writelen(self, buffer: Union[bytes, bytearray], endian='little') -> None:
"""
Writes length prefixed message to stream.
[u32: length | payload bytes ]
"""
self.protocol._transport.write(len(buffer).to_bytes(4, endian))
self.protocol._transport.write(buffer)
def write_eof(self) -> None:
return self.protocol._transport.write_eof()
def drain(self) -> Awaitable:
return self.protocol.wait_drain_notify()
def wait_closed(self) -> Awaitable:
return self.protocol.wait_close_notify()
|
en
| 0.618579
|
# Awaitable with instant return Read data from the stream until ``separator`` is found. Read max ``nbytes`` about of bytes. Returns bytearray if ``nbytes`` > 0 otherwise bytes Read exactly ``nbytes`` about of bytes. Returns bytearray if ``nbytes`` > 0 otherwise bytes Reads length prefixed message from the stream. [u32: length | payload bytes ] Writes length prefixed message to stream. [u32: length | payload bytes ]
| 2.446983
| 2
|
rllab/algos/cem.py
|
prosello/rllab
| 10
|
6626375
|
<reponame>prosello/rllab<gh_stars>1-10
from rllab.algos.base import RLAlgorithm
import numpy as np
from rllab.misc.special import discount_cumsum
from rllab.sampler import parallel_sampler, stateful_pool
from rllab.sampler.utils import rollout
from rllab.core.serializable import Serializable
import rllab.misc.logger as logger
import rllab.plotter as plotter
def _worker_rollout_policy(G, args):
sample_std = args["sample_std"].flatten()
cur_mean = args["cur_mean"].flatten()
K = len(cur_mean)
params = np.random.standard_normal(K) * sample_std + cur_mean
G.policy.set_param_values(params)
path = rollout(G.env, G.policy, args["max_path_length"])
path["returns"] = discount_cumsum(path["rewards"], args["discount"])
path["undiscounted_return"] = sum(path["rewards"])
if args["criterion"] == "samples":
inc = len(path["rewards"])
elif args["criterion"] == "paths":
inc = 1
else:
raise NotImplementedError
return (params, path), inc
class CEM(RLAlgorithm, Serializable):
def __init__(
self,
env,
policy,
n_itr=500,
max_path_length=500,
discount=0.99,
init_std=1.,
n_samples=100,
batch_size=None,
best_frac=0.05,
extra_std=1.,
extra_decay_time=100,
plot=False,
**kwargs
):
"""
:param n_itr: Number of iterations.
:param max_path_length: Maximum length of a single rollout.
:param batch_size: # of samples from trajs from param distribution, when this
is set, n_samples is ignored
:param discount: Discount.
:param plot: Plot evaluation run after each iteration.
:param init_std: Initial std for param distribution
:param extra_std: Decaying std added to param distribution at each iteration
:param extra_decay_time: Iterations that it takes to decay extra std
:param n_samples: #of samples from param distribution
:param best_frac: Best fraction of the sampled params
:return:
"""
Serializable.quick_init(self, locals())
self.env = env
self.policy = policy
self.batch_size = batch_size
self.plot = plot
self.extra_decay_time = extra_decay_time
self.extra_std = extra_std
self.best_frac = best_frac
self.n_samples = n_samples
self.init_std = init_std
self.discount = discount
self.max_path_length = max_path_length
self.n_itr = n_itr
def train(self):
parallel_sampler.populate_task(self.env, self.policy)
if self.plot:
plotter.init_plot(self.env, self.policy)
cur_std = self.init_std
cur_mean = self.policy.get_param_values()
# K = cur_mean.size
n_best = max(1, int(self.n_samples * self.best_frac))
for itr in range(self.n_itr):
# sample around the current distribution
extra_var_mult = max(1.0 - itr / self.extra_decay_time, 0)
sample_std = np.sqrt(np.square(cur_std) + np.square(self.extra_std) * extra_var_mult)
if self.batch_size is None:
criterion = 'paths'
threshold = self.n_samples
else:
criterion = 'samples'
threshold = self.batch_size
infos = stateful_pool.singleton_pool.run_collect(
_worker_rollout_policy,
threshold=threshold,
args=(dict(cur_mean=cur_mean,
sample_std=sample_std,
max_path_length=self.max_path_length,
discount=self.discount,
criterion=criterion),)
)
xs = np.asarray([info[0] for info in infos])
paths = [info[1] for info in infos]
fs = np.array([path['returns'][0] for path in paths])
print(xs.shape, fs.shape)
best_inds = (-fs).argsort()[:n_best]
best_xs = xs[best_inds]
cur_mean = best_xs.mean(axis=0)
cur_std = best_xs.std(axis=0)
best_x = best_xs[0]
logger.push_prefix('itr #%d | ' % itr)
logger.record_tabular('Iteration', itr)
logger.record_tabular('CurStdMean', np.mean(cur_std))
undiscounted_returns = np.array([path['undiscounted_return'] for path in paths])
logger.record_tabular('AverageReturn',
np.mean(undiscounted_returns))
logger.record_tabular('StdReturn',
np.mean(undiscounted_returns))
logger.record_tabular('MaxReturn',
np.max(undiscounted_returns))
logger.record_tabular('MinReturn',
np.min(undiscounted_returns))
logger.record_tabular('AverageDiscountedReturn',
np.mean(fs))
logger.record_tabular('AvgTrajLen',
np.mean([len(path['returns']) for path in paths]))
logger.record_tabular('NumTrajs',
len(paths))
self.policy.set_param_values(best_x)
self.env.log_diagnostics(paths)
self.policy.log_diagnostics(paths)
logger.save_itr_params(itr, dict(
itr=itr,
policy=self.policy,
env=self.env,
cur_mean=cur_mean,
cur_std=cur_std,
))
logger.dump_tabular(with_prefix=False)
logger.pop_prefix()
if self.plot:
plotter.update_plot(self.policy, self.max_path_length)
parallel_sampler.terminate_task()
|
from rllab.algos.base import RLAlgorithm
import numpy as np
from rllab.misc.special import discount_cumsum
from rllab.sampler import parallel_sampler, stateful_pool
from rllab.sampler.utils import rollout
from rllab.core.serializable import Serializable
import rllab.misc.logger as logger
import rllab.plotter as plotter
def _worker_rollout_policy(G, args):
sample_std = args["sample_std"].flatten()
cur_mean = args["cur_mean"].flatten()
K = len(cur_mean)
params = np.random.standard_normal(K) * sample_std + cur_mean
G.policy.set_param_values(params)
path = rollout(G.env, G.policy, args["max_path_length"])
path["returns"] = discount_cumsum(path["rewards"], args["discount"])
path["undiscounted_return"] = sum(path["rewards"])
if args["criterion"] == "samples":
inc = len(path["rewards"])
elif args["criterion"] == "paths":
inc = 1
else:
raise NotImplementedError
return (params, path), inc
class CEM(RLAlgorithm, Serializable):
def __init__(
self,
env,
policy,
n_itr=500,
max_path_length=500,
discount=0.99,
init_std=1.,
n_samples=100,
batch_size=None,
best_frac=0.05,
extra_std=1.,
extra_decay_time=100,
plot=False,
**kwargs
):
"""
:param n_itr: Number of iterations.
:param max_path_length: Maximum length of a single rollout.
:param batch_size: # of samples from trajs from param distribution, when this
is set, n_samples is ignored
:param discount: Discount.
:param plot: Plot evaluation run after each iteration.
:param init_std: Initial std for param distribution
:param extra_std: Decaying std added to param distribution at each iteration
:param extra_decay_time: Iterations that it takes to decay extra std
:param n_samples: #of samples from param distribution
:param best_frac: Best fraction of the sampled params
:return:
"""
Serializable.quick_init(self, locals())
self.env = env
self.policy = policy
self.batch_size = batch_size
self.plot = plot
self.extra_decay_time = extra_decay_time
self.extra_std = extra_std
self.best_frac = best_frac
self.n_samples = n_samples
self.init_std = init_std
self.discount = discount
self.max_path_length = max_path_length
self.n_itr = n_itr
def train(self):
parallel_sampler.populate_task(self.env, self.policy)
if self.plot:
plotter.init_plot(self.env, self.policy)
cur_std = self.init_std
cur_mean = self.policy.get_param_values()
# K = cur_mean.size
n_best = max(1, int(self.n_samples * self.best_frac))
for itr in range(self.n_itr):
# sample around the current distribution
extra_var_mult = max(1.0 - itr / self.extra_decay_time, 0)
sample_std = np.sqrt(np.square(cur_std) + np.square(self.extra_std) * extra_var_mult)
if self.batch_size is None:
criterion = 'paths'
threshold = self.n_samples
else:
criterion = 'samples'
threshold = self.batch_size
infos = stateful_pool.singleton_pool.run_collect(
_worker_rollout_policy,
threshold=threshold,
args=(dict(cur_mean=cur_mean,
sample_std=sample_std,
max_path_length=self.max_path_length,
discount=self.discount,
criterion=criterion),)
)
xs = np.asarray([info[0] for info in infos])
paths = [info[1] for info in infos]
fs = np.array([path['returns'][0] for path in paths])
print(xs.shape, fs.shape)
best_inds = (-fs).argsort()[:n_best]
best_xs = xs[best_inds]
cur_mean = best_xs.mean(axis=0)
cur_std = best_xs.std(axis=0)
best_x = best_xs[0]
logger.push_prefix('itr #%d | ' % itr)
logger.record_tabular('Iteration', itr)
logger.record_tabular('CurStdMean', np.mean(cur_std))
undiscounted_returns = np.array([path['undiscounted_return'] for path in paths])
logger.record_tabular('AverageReturn',
np.mean(undiscounted_returns))
logger.record_tabular('StdReturn',
np.mean(undiscounted_returns))
logger.record_tabular('MaxReturn',
np.max(undiscounted_returns))
logger.record_tabular('MinReturn',
np.min(undiscounted_returns))
logger.record_tabular('AverageDiscountedReturn',
np.mean(fs))
logger.record_tabular('AvgTrajLen',
np.mean([len(path['returns']) for path in paths]))
logger.record_tabular('NumTrajs',
len(paths))
self.policy.set_param_values(best_x)
self.env.log_diagnostics(paths)
self.policy.log_diagnostics(paths)
logger.save_itr_params(itr, dict(
itr=itr,
policy=self.policy,
env=self.env,
cur_mean=cur_mean,
cur_std=cur_std,
))
logger.dump_tabular(with_prefix=False)
logger.pop_prefix()
if self.plot:
plotter.update_plot(self.policy, self.max_path_length)
parallel_sampler.terminate_task()
|
en
| 0.77541
|
:param n_itr: Number of iterations. :param max_path_length: Maximum length of a single rollout. :param batch_size: # of samples from trajs from param distribution, when this is set, n_samples is ignored :param discount: Discount. :param plot: Plot evaluation run after each iteration. :param init_std: Initial std for param distribution :param extra_std: Decaying std added to param distribution at each iteration :param extra_decay_time: Iterations that it takes to decay extra std :param n_samples: #of samples from param distribution :param best_frac: Best fraction of the sampled params :return: # K = cur_mean.size # sample around the current distribution #%d | ' % itr)
| 2.148929
| 2
|
charmcraft/linters.py
|
jguedez/charmcraft
| 0
|
6626376
|
<reponame>jguedez/charmcraft<filename>charmcraft/linters.py
# Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For further info, check https://github.com/canonical/charmcraft
"""Analyze and lint charm structures and files."""
import ast
import os
import pathlib
import shlex
from collections import namedtuple
from typing import List, Generator, Union
import yaml
from charmcraft import config
from charmcraft.metadata import parse_metadata_yaml
CheckType = namedtuple("CheckType", "attribute lint")(attribute="attribute", lint="lint")
# result information from each checker/linter
CheckResult = namedtuple("CheckResult", "name result url check_type text")
# generic constant for common results
UNKNOWN = "unknown"
IGNORED = "ignored"
WARNINGS = "warnings"
ERRORS = "errors"
FATAL = "fatal"
OK = "ok"
def check_dispatch_with_python_entrypoint(
basedir: pathlib.Path,
) -> Union[pathlib.Path, None]:
"""Verify if the charm has a dispatch file pointing to a Python entrypoint.
:returns: the entrypoint path if all succeeds, None otherwise.
"""
# get the entrypoint from the last useful dispatch line
dispatch = basedir / "dispatch"
entrypoint_str = ""
try:
with dispatch.open("rt", encoding="utf8") as fh:
last_line = None
for line in fh:
if line.strip():
last_line = line
if last_line:
entrypoint_str = shlex.split(last_line)[-1]
except (IOError, UnicodeDecodeError):
return
entrypoint = basedir / entrypoint_str
if entrypoint.suffix == ".py" and os.access(entrypoint, os.X_OK):
return entrypoint
class Language:
"""Check the language used to write the charm.
Currently only Python is detected, if the following checks are true:
- the charm has a text dispatch with a python call
- the charm has a `.py` entry point
- the entry point file is executable
"""
check_type = CheckType.attribute
name = "language"
url = "https://juju.is/docs/sdk/charmcraft-analyze#heading--language"
text = "The charm is written with Python."
# different result constants
Result = namedtuple("Result", "python unknown")(python="python", unknown=UNKNOWN)
def run(self, basedir: pathlib.Path) -> str:
"""Run the proper verifications."""
python_entrypoint = check_dispatch_with_python_entrypoint(basedir)
return self.Result.unknown if python_entrypoint is None else self.Result.python
class Framework:
"""Check the framework the charm is based on.
Currently it detects if the Operator Framework is used, if...
- the language attribute is set to python
- the charm contains venv/ops
- the charm imports ops in the entry point.
...or the Reactive Framework is used, if the charm...
- has a metadata.yaml with "name" in it
- has a reactive/<name>.py file that imports "charms.reactive"
- has a file name that starts with "charms.reactive-" inside the "wheelhouse" directory
"""
check_type = CheckType.attribute
name = "framework"
url = "https://juju.is/docs/sdk/charmcraft-analyze#heading--framework"
# different result constants
Result = namedtuple("Result", "operator reactive unknown")(
operator="operator", reactive="reactive", unknown=UNKNOWN
)
# different texts to be exposed as `text` (see the property below)
result_texts = {
Result.operator: "The charm is based on the Operator Framework.",
Result.reactive: "The charm is based on the Reactive Framework.",
Result.unknown: "The charm is not based on any known Framework.",
}
def __init__(self):
self.result = None
@property
def text(self):
"""Return a text in function of the result state."""
if self.result is None:
return None
return self.result_texts[self.result]
def _get_imports(self, filepath: pathlib.Path) -> Generator[List[str], None, None]:
"""Parse a Python filepath and yield its imports.
If the file does not exist or cannot be parsed, return empty. Otherwise
return the name for each imported module, split by possible dots.
"""
if not os.access(filepath, os.R_OK):
return
try:
parsed = ast.parse(filepath.read_bytes())
except SyntaxError:
return
for node in ast.walk(parsed):
if isinstance(node, ast.Import):
for name in node.names:
yield name.name.split(".")
elif isinstance(node, ast.ImportFrom):
yield node.module.split(".")
def _check_operator(self, basedir: pathlib.Path) -> bool:
"""Detect if the Operator Framework is used."""
python_entrypoint = check_dispatch_with_python_entrypoint(basedir)
if python_entrypoint is None:
return False
opsdir = basedir / "venv" / "ops"
if not opsdir.exists() or not opsdir.is_dir():
return False
for import_parts in self._get_imports(python_entrypoint):
if import_parts[0] == "ops":
return True
return False
def _check_reactive(self, basedir: pathlib.Path) -> bool:
"""Detect if the Reactive Framework is used."""
try:
metadata = parse_metadata_yaml(basedir)
except Exception:
# file not found, corrupted, or mandatory "name" not present
return False
wheelhouse_dir = basedir / "wheelhouse"
if not wheelhouse_dir.exists():
return False
if not any(f.name.startswith("charms.reactive-") for f in wheelhouse_dir.iterdir()):
return False
module_basename = metadata.name.replace("-", "_")
entrypoint = basedir / "reactive" / f"{module_basename}.py"
for import_parts in self._get_imports(entrypoint):
if import_parts[0] == "charms" and import_parts[1] == "reactive":
return True
return False
def run(self, basedir: pathlib.Path) -> str:
"""Run the proper verifications."""
if self._check_operator(basedir):
result = self.Result.operator
elif self._check_reactive(basedir):
result = self.Result.reactive
else:
result = self.Result.unknown
self.result = result
return result
class JujuMetadata:
"""Check that the metadata.yaml file exists and is sane.
The charm is considered to have a valid metadata if the following checks are true:
- the metadata.yaml is present
- it is a valid YAML file
- it has at least the following fields: name, summary, and description
"""
check_type = CheckType.lint
name = "metadata"
url = "https://juju.is/docs/sdk/charmcraft-analyze#heading--metadata"
text = "Problems found with metadata.yaml file."
# different result constants
Result = namedtuple("Result", "ok errors")(ok=OK, errors=ERRORS)
def run(self, basedir: pathlib.Path) -> str:
"""Run the proper verifications."""
try:
metadata = parse_metadata_yaml(basedir)
except Exception:
# file not found, corrupted, or mandatory "name" not present
return self.Result.errors
# no need to verify "name" as it's mandatory in the metadata parsing
if metadata.summary and metadata.description:
result = self.Result.ok
else:
result = self.Result.errors
return result
class JujuActions:
"""Check that the actions.yaml file is valid YAML if it exists."""
check_type = CheckType.lint
name = "juju-actions"
url = "https://juju.is/docs/sdk/charmcraft-analyze#heading--juju-actions"
text = "The actions.yaml file is not a valid YAML file."
# different result constants
Result = namedtuple("Result", "ok errors")(ok=OK, errors=ERRORS)
def run(self, basedir: pathlib.Path) -> str:
"""Run the proper verifications."""
filepath = basedir / "actions.yaml"
if not filepath.exists():
# it's optional
return self.Result.ok
try:
with filepath.open("rt", encoding="utf8") as fh:
yaml.safe_load(fh)
except Exception:
return self.Result.errors
return self.Result.ok
class JujuConfig:
"""Check that the config.yaml file (if it exists) is valid.
The file is considered valid if the following checks are true:
- has an 'options' key
- it is a dictionary
- each item inside has the mandatory 'type' key
"""
check_type = CheckType.lint
name = "juju-config"
url = "https://juju.is/docs/sdk/charmcraft-analyze#heading--juju-config"
# different result constants
Result = namedtuple("Result", "ok errors")(ok=OK, errors=ERRORS)
def __init__(self):
self.text = None
def run(self, basedir: pathlib.Path) -> str:
"""Run the proper verifications."""
filepath = basedir / "config.yaml"
if not filepath.exists():
# it's optional
return self.Result.ok
try:
with filepath.open("rt", encoding="utf8") as fh:
content = yaml.safe_load(fh)
except Exception:
self.text = "The config.yaml file is not a valid YAML file."
return self.Result.errors
options = content.get("options")
if not isinstance(options, dict):
self.text = "Error in config.yaml: must have an 'options' dictionary."
return self.Result.errors
for value in options.values():
if "type" not in value:
self.text = "Error in config.yaml: items under 'options' must have a 'type' key."
return self.Result.errors
return self.Result.ok
# all checkers to run; the order here is important, as some checkers depend on the
# results from others
CHECKERS = [
Language,
JujuActions,
JujuConfig,
JujuMetadata,
Framework,
]
def analyze(
config: config.Config,
basedir: pathlib.Path,
*,
override_ignore_config: bool = False,
) -> List[CheckResult]:
"""Run all checkers and linters."""
all_results = []
for cls in CHECKERS:
# do not run the ignored ones
if cls.check_type == CheckType.attribute:
ignore_list = config.analysis.ignore.attributes
else:
ignore_list = config.analysis.ignore.linters
if cls.name in ignore_list and not override_ignore_config:
all_results.append(
CheckResult(
check_type=cls.check_type,
name=cls.name,
result=IGNORED,
url=cls.url,
text="",
)
)
continue
checker = cls()
try:
result = checker.run(basedir)
except Exception:
result = UNKNOWN if checker.check_type == CheckType.attribute else FATAL
all_results.append(
CheckResult(
check_type=checker.check_type,
name=checker.name,
url=checker.url,
text=checker.text,
result=result,
)
)
return all_results
|
# Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For further info, check https://github.com/canonical/charmcraft
"""Analyze and lint charm structures and files."""
import ast
import os
import pathlib
import shlex
from collections import namedtuple
from typing import List, Generator, Union
import yaml
from charmcraft import config
from charmcraft.metadata import parse_metadata_yaml
CheckType = namedtuple("CheckType", "attribute lint")(attribute="attribute", lint="lint")
# result information from each checker/linter
CheckResult = namedtuple("CheckResult", "name result url check_type text")
# generic constant for common results
UNKNOWN = "unknown"
IGNORED = "ignored"
WARNINGS = "warnings"
ERRORS = "errors"
FATAL = "fatal"
OK = "ok"
def check_dispatch_with_python_entrypoint(
basedir: pathlib.Path,
) -> Union[pathlib.Path, None]:
"""Verify if the charm has a dispatch file pointing to a Python entrypoint.
:returns: the entrypoint path if all succeeds, None otherwise.
"""
# get the entrypoint from the last useful dispatch line
dispatch = basedir / "dispatch"
entrypoint_str = ""
try:
with dispatch.open("rt", encoding="utf8") as fh:
last_line = None
for line in fh:
if line.strip():
last_line = line
if last_line:
entrypoint_str = shlex.split(last_line)[-1]
except (IOError, UnicodeDecodeError):
return
entrypoint = basedir / entrypoint_str
if entrypoint.suffix == ".py" and os.access(entrypoint, os.X_OK):
return entrypoint
class Language:
"""Check the language used to write the charm.
Currently only Python is detected, if the following checks are true:
- the charm has a text dispatch with a python call
- the charm has a `.py` entry point
- the entry point file is executable
"""
check_type = CheckType.attribute
name = "language"
url = "https://juju.is/docs/sdk/charmcraft-analyze#heading--language"
text = "The charm is written with Python."
# different result constants
Result = namedtuple("Result", "python unknown")(python="python", unknown=UNKNOWN)
def run(self, basedir: pathlib.Path) -> str:
"""Run the proper verifications."""
python_entrypoint = check_dispatch_with_python_entrypoint(basedir)
return self.Result.unknown if python_entrypoint is None else self.Result.python
class Framework:
"""Check the framework the charm is based on.
Currently it detects if the Operator Framework is used, if...
- the language attribute is set to python
- the charm contains venv/ops
- the charm imports ops in the entry point.
...or the Reactive Framework is used, if the charm...
- has a metadata.yaml with "name" in it
- has a reactive/<name>.py file that imports "charms.reactive"
- has a file name that starts with "charms.reactive-" inside the "wheelhouse" directory
"""
check_type = CheckType.attribute
name = "framework"
url = "https://juju.is/docs/sdk/charmcraft-analyze#heading--framework"
# different result constants
Result = namedtuple("Result", "operator reactive unknown")(
operator="operator", reactive="reactive", unknown=UNKNOWN
)
# different texts to be exposed as `text` (see the property below)
result_texts = {
Result.operator: "The charm is based on the Operator Framework.",
Result.reactive: "The charm is based on the Reactive Framework.",
Result.unknown: "The charm is not based on any known Framework.",
}
def __init__(self):
self.result = None
@property
def text(self):
"""Return a text in function of the result state."""
if self.result is None:
return None
return self.result_texts[self.result]
def _get_imports(self, filepath: pathlib.Path) -> Generator[List[str], None, None]:
"""Parse a Python filepath and yield its imports.
If the file does not exist or cannot be parsed, return empty. Otherwise
return the name for each imported module, split by possible dots.
"""
if not os.access(filepath, os.R_OK):
return
try:
parsed = ast.parse(filepath.read_bytes())
except SyntaxError:
return
for node in ast.walk(parsed):
if isinstance(node, ast.Import):
for name in node.names:
yield name.name.split(".")
elif isinstance(node, ast.ImportFrom):
yield node.module.split(".")
def _check_operator(self, basedir: pathlib.Path) -> bool:
"""Detect if the Operator Framework is used."""
python_entrypoint = check_dispatch_with_python_entrypoint(basedir)
if python_entrypoint is None:
return False
opsdir = basedir / "venv" / "ops"
if not opsdir.exists() or not opsdir.is_dir():
return False
for import_parts in self._get_imports(python_entrypoint):
if import_parts[0] == "ops":
return True
return False
def _check_reactive(self, basedir: pathlib.Path) -> bool:
"""Detect if the Reactive Framework is used."""
try:
metadata = parse_metadata_yaml(basedir)
except Exception:
# file not found, corrupted, or mandatory "name" not present
return False
wheelhouse_dir = basedir / "wheelhouse"
if not wheelhouse_dir.exists():
return False
if not any(f.name.startswith("charms.reactive-") for f in wheelhouse_dir.iterdir()):
return False
module_basename = metadata.name.replace("-", "_")
entrypoint = basedir / "reactive" / f"{module_basename}.py"
for import_parts in self._get_imports(entrypoint):
if import_parts[0] == "charms" and import_parts[1] == "reactive":
return True
return False
def run(self, basedir: pathlib.Path) -> str:
"""Run the proper verifications."""
if self._check_operator(basedir):
result = self.Result.operator
elif self._check_reactive(basedir):
result = self.Result.reactive
else:
result = self.Result.unknown
self.result = result
return result
class JujuMetadata:
"""Check that the metadata.yaml file exists and is sane.
The charm is considered to have a valid metadata if the following checks are true:
- the metadata.yaml is present
- it is a valid YAML file
- it has at least the following fields: name, summary, and description
"""
check_type = CheckType.lint
name = "metadata"
url = "https://juju.is/docs/sdk/charmcraft-analyze#heading--metadata"
text = "Problems found with metadata.yaml file."
# different result constants
Result = namedtuple("Result", "ok errors")(ok=OK, errors=ERRORS)
def run(self, basedir: pathlib.Path) -> str:
"""Run the proper verifications."""
try:
metadata = parse_metadata_yaml(basedir)
except Exception:
# file not found, corrupted, or mandatory "name" not present
return self.Result.errors
# no need to verify "name" as it's mandatory in the metadata parsing
if metadata.summary and metadata.description:
result = self.Result.ok
else:
result = self.Result.errors
return result
class JujuActions:
"""Check that the actions.yaml file is valid YAML if it exists."""
check_type = CheckType.lint
name = "juju-actions"
url = "https://juju.is/docs/sdk/charmcraft-analyze#heading--juju-actions"
text = "The actions.yaml file is not a valid YAML file."
# different result constants
Result = namedtuple("Result", "ok errors")(ok=OK, errors=ERRORS)
def run(self, basedir: pathlib.Path) -> str:
"""Run the proper verifications."""
filepath = basedir / "actions.yaml"
if not filepath.exists():
# it's optional
return self.Result.ok
try:
with filepath.open("rt", encoding="utf8") as fh:
yaml.safe_load(fh)
except Exception:
return self.Result.errors
return self.Result.ok
class JujuConfig:
"""Check that the config.yaml file (if it exists) is valid.
The file is considered valid if the following checks are true:
- has an 'options' key
- it is a dictionary
- each item inside has the mandatory 'type' key
"""
check_type = CheckType.lint
name = "juju-config"
url = "https://juju.is/docs/sdk/charmcraft-analyze#heading--juju-config"
# different result constants
Result = namedtuple("Result", "ok errors")(ok=OK, errors=ERRORS)
def __init__(self):
self.text = None
def run(self, basedir: pathlib.Path) -> str:
"""Run the proper verifications."""
filepath = basedir / "config.yaml"
if not filepath.exists():
# it's optional
return self.Result.ok
try:
with filepath.open("rt", encoding="utf8") as fh:
content = yaml.safe_load(fh)
except Exception:
self.text = "The config.yaml file is not a valid YAML file."
return self.Result.errors
options = content.get("options")
if not isinstance(options, dict):
self.text = "Error in config.yaml: must have an 'options' dictionary."
return self.Result.errors
for value in options.values():
if "type" not in value:
self.text = "Error in config.yaml: items under 'options' must have a 'type' key."
return self.Result.errors
return self.Result.ok
# all checkers to run; the order here is important, as some checkers depend on the
# results from others
CHECKERS = [
Language,
JujuActions,
JujuConfig,
JujuMetadata,
Framework,
]
def analyze(
config: config.Config,
basedir: pathlib.Path,
*,
override_ignore_config: bool = False,
) -> List[CheckResult]:
"""Run all checkers and linters."""
all_results = []
for cls in CHECKERS:
# do not run the ignored ones
if cls.check_type == CheckType.attribute:
ignore_list = config.analysis.ignore.attributes
else:
ignore_list = config.analysis.ignore.linters
if cls.name in ignore_list and not override_ignore_config:
all_results.append(
CheckResult(
check_type=cls.check_type,
name=cls.name,
result=IGNORED,
url=cls.url,
text="",
)
)
continue
checker = cls()
try:
result = checker.run(basedir)
except Exception:
result = UNKNOWN if checker.check_type == CheckType.attribute else FATAL
all_results.append(
CheckResult(
check_type=checker.check_type,
name=checker.name,
url=checker.url,
text=checker.text,
result=result,
)
)
return all_results
|
en
| 0.836932
|
# Copyright 2021 Canonical Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # For further info, check https://github.com/canonical/charmcraft Analyze and lint charm structures and files. # result information from each checker/linter # generic constant for common results Verify if the charm has a dispatch file pointing to a Python entrypoint. :returns: the entrypoint path if all succeeds, None otherwise. # get the entrypoint from the last useful dispatch line Check the language used to write the charm. Currently only Python is detected, if the following checks are true: - the charm has a text dispatch with a python call - the charm has a `.py` entry point - the entry point file is executable #heading--language" # different result constants Run the proper verifications. Check the framework the charm is based on. Currently it detects if the Operator Framework is used, if... - the language attribute is set to python - the charm contains venv/ops - the charm imports ops in the entry point. ...or the Reactive Framework is used, if the charm... - has a metadata.yaml with "name" in it - has a reactive/<name>.py file that imports "charms.reactive" - has a file name that starts with "charms.reactive-" inside the "wheelhouse" directory #heading--framework" # different result constants # different texts to be exposed as `text` (see the property below) Return a text in function of the result state. Parse a Python filepath and yield its imports. If the file does not exist or cannot be parsed, return empty. Otherwise return the name for each imported module, split by possible dots. Detect if the Operator Framework is used. Detect if the Reactive Framework is used. # file not found, corrupted, or mandatory "name" not present Run the proper verifications. Check that the metadata.yaml file exists and is sane. The charm is considered to have a valid metadata if the following checks are true: - the metadata.yaml is present - it is a valid YAML file - it has at least the following fields: name, summary, and description #heading--metadata" # different result constants Run the proper verifications. # file not found, corrupted, or mandatory "name" not present # no need to verify "name" as it's mandatory in the metadata parsing Check that the actions.yaml file is valid YAML if it exists. #heading--juju-actions" # different result constants Run the proper verifications. # it's optional Check that the config.yaml file (if it exists) is valid. The file is considered valid if the following checks are true: - has an 'options' key - it is a dictionary - each item inside has the mandatory 'type' key #heading--juju-config" # different result constants Run the proper verifications. # it's optional # all checkers to run; the order here is important, as some checkers depend on the # results from others Run all checkers and linters. # do not run the ignored ones
| 2.126071
| 2
|
tests/src/check/check_assign.py
|
pystatic/pystatic
| 0
|
6626377
|
from typing import Union, Literal
class A:
...
# value undefined
a = c # E Cannot determine type of 'c'(unresolved reference 'c')
# any
a = 1
a = "s" # ok
b: int = "hjzs" # E Incompatible type in assignment(expression has type 'Literal['hjzs']', variable has type 'int')
b = a # E Incompatible type in assignment(expression has type 'Literal['s']', variable has type 'int')
b = A() # E Incompatible type in assignment(expression has type 'A', variable has type 'int')
# type[A] and A
c: A = A # E Incompatible type in assignment(expression has type 'Type[A]', variable has type 'A')
c = A()
# t1 = "s"
# if A:
# t1 = A()
# t2: int = t1
t3: int = "s" # E Incompatible type in assignment(expression has type 'Literal['s']', variable has type 'int')
t4: int = t3 # ok
d: Literal[1] = 1
|
from typing import Union, Literal
class A:
...
# value undefined
a = c # E Cannot determine type of 'c'(unresolved reference 'c')
# any
a = 1
a = "s" # ok
b: int = "hjzs" # E Incompatible type in assignment(expression has type 'Literal['hjzs']', variable has type 'int')
b = a # E Incompatible type in assignment(expression has type 'Literal['s']', variable has type 'int')
b = A() # E Incompatible type in assignment(expression has type 'A', variable has type 'int')
# type[A] and A
c: A = A # E Incompatible type in assignment(expression has type 'Type[A]', variable has type 'A')
c = A()
# t1 = "s"
# if A:
# t1 = A()
# t2: int = t1
t3: int = "s" # E Incompatible type in assignment(expression has type 'Literal['s']', variable has type 'int')
t4: int = t3 # ok
d: Literal[1] = 1
|
en
| 0.850538
|
# value undefined # E Cannot determine type of 'c'(unresolved reference 'c') # any # ok # E Incompatible type in assignment(expression has type 'Literal['hjzs']', variable has type 'int') # E Incompatible type in assignment(expression has type 'Literal['s']', variable has type 'int') # E Incompatible type in assignment(expression has type 'A', variable has type 'int') # type[A] and A # E Incompatible type in assignment(expression has type 'Type[A]', variable has type 'A') # t1 = "s" # if A: # t1 = A() # t2: int = t1 # E Incompatible type in assignment(expression has type 'Literal['s']', variable has type 'int') # ok
| 3.786494
| 4
|
mlens/externals/joblib/pool.py
|
mehrdad-shokri/mlens
| 760
|
6626378
|
<reponame>mehrdad-shokri/mlens
"""Custom implementation of multiprocessing.Pool with custom pickler.
This module provides efficient ways of working with data stored in
shared memory with numpy.memmap arrays without inducing any memory
copy between the parent and child processes.
This module should not be imported if multiprocessing is not
available as it implements subclasses of multiprocessing Pool
that uses a custom alternative to SimpleQueue.
"""
# Author: <NAME> <<EMAIL>>
# Copyright: 2012, <NAME>
# License: BSD 3 clause
from mmap import mmap
import errno
import os
import stat
import sys
import threading
import atexit
import tempfile
import shutil
import warnings
from time import sleep
try:
WindowsError
except NameError:
WindowsError = type(None)
from pickle import whichmodule
try:
# Python 2 compat
from cPickle import loads
from cPickle import dumps
except ImportError:
from pickle import loads
from pickle import dumps
import copyreg
# Customizable pure Python pickler in Python 2
# customizable C-optimized pickler under Python 3.3+
from pickle import Pickler
from pickle import HIGHEST_PROTOCOL
from io import BytesIO
from ._multiprocessing_helpers import mp, assert_spawning
# We need the class definition to derive from it not the multiprocessing.Pool
# factory function
from multiprocessing.pool import Pool
try:
import numpy as np
from numpy.lib.stride_tricks import as_strided
except ImportError:
np = None
from .numpy_pickle import load
from .numpy_pickle import dump
from .hashing import hash
from .backports import make_memmap
# Some system have a ramdisk mounted by default, we can use it instead of /tmp
# as the default folder to dump big arrays to share with subprocesses
SYSTEM_SHARED_MEM_FS = '/dev/shm'
# Folder and file permissions to chmod temporary files generated by the
# memmaping pool. Only the owner of the Python process can access the
# temporary files and folder.
FOLDER_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
FILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR
###############################################################################
# Support for efficient transient pickling of numpy data structures
def _get_backing_memmap(a):
"""Recursively look up the original np.memmap instance base if any."""
b = getattr(a, 'base', None)
if b is None:
# TODO: check scipy sparse datastructure if scipy is installed
# a nor its descendants do not have a memmap base
return None
elif isinstance(b, mmap):
# a is already a real memmap instance.
return a
else:
# Recursive exploration of the base ancestry
return _get_backing_memmap(b)
def _get_temp_dir(pool_folder_name, temp_folder=None):
"""Get the full path to a subfolder inside the temporary folder.
Parameters
----------
pool_folder_name : str
Sub-folder name used for the serialization of a pool instance.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment
variable,
- /dev/shm if the folder exists and is writable: this is a
RAMdisk filesystem available by default on modern Linux
distributions,
- the default system temporary folder that can be
overridden with TMP, _TMPDIR or TEMP environment
variables, typically /tmp under Unix operating systems.
Returns
-------
pool_folder : str
full path to the temporary folder
use_shared_mem : bool
whether the temporary folder is written to tmpfs
"""
use_shared_mem = False
if temp_folder is None:
temp_folder = os.environ.get('JOBLIB_TEMP_FOLDER', None)
if temp_folder is None:
if os.path.exists(SYSTEM_SHARED_MEM_FS):
try:
temp_folder = SYSTEM_SHARED_MEM_FS
pool_folder = os.path.join(temp_folder, pool_folder_name)
if not os.path.exists(pool_folder):
os.makedirs(pool_folder)
use_shared_mem = True
except IOError:
# Missing rights in the the /dev/shm partition,
# fallback to regular temp folder.
temp_folder = None
if temp_folder is None:
# Fallback to the default tmp folder, typically /tmp
temp_folder = tempfile.gettempdir()
temp_folder = os.path.abspath(os.path.expanduser(temp_folder))
pool_folder = os.path.join(temp_folder, pool_folder_name)
return pool_folder, use_shared_mem
def has_shareable_memory(a):
"""Return True if a is backed by some mmap buffer directly or not."""
return _get_backing_memmap(a) is not None
def _strided_from_memmap(filename, dtype, mode, offset, order, shape, strides,
total_buffer_len):
"""Reconstruct an array view on a memory mapped file."""
if mode == 'w+':
# Do not zero the original data when unpickling
mode = 'r+'
if strides is None:
# Simple, contiguous memmap
return make_memmap(filename, dtype=dtype, shape=shape, mode=mode,
offset=offset, order=order)
else:
# For non-contiguous data, memmap the total enclosing buffer and then
# extract the non-contiguous view with the stride-tricks API
base = make_memmap(filename, dtype=dtype, shape=total_buffer_len,
mode=mode, offset=offset, order=order)
return as_strided(base, shape=shape, strides=strides)
def _reduce_memmap_backed(a, m):
"""Pickling reduction for memmap backed arrays.
a is expected to be an instance of np.ndarray (or np.memmap)
m is expected to be an instance of np.memmap on the top of the ``base``
attribute ancestry of a. ``m.base`` should be the real python mmap object.
"""
# offset that comes from the striding differences between a and m
a_start, a_end = np.byte_bounds(a)
m_start = np.byte_bounds(m)[0]
offset = a_start - m_start
# offset from the backing memmap
offset += m.offset
if m.flags['F_CONTIGUOUS']:
order = 'F'
else:
# The backing memmap buffer is necessarily contiguous hence C if not
# Fortran
order = 'C'
if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']:
# If the array is a contiguous view, no need to pass the strides
strides = None
total_buffer_len = None
else:
# Compute the total number of items to map from which the strided
# view will be extracted.
strides = a.strides
total_buffer_len = (a_end - a_start) // a.itemsize
return (_strided_from_memmap,
(m.filename, a.dtype, m.mode, offset, order, a.shape, strides,
total_buffer_len))
def reduce_memmap(a):
"""Pickle the descriptors of a memmap instance to reopen on same file."""
m = _get_backing_memmap(a)
if m is not None:
# m is a real mmap backed memmap instance, reduce a preserving striding
# information
return _reduce_memmap_backed(a, m)
else:
# This memmap instance is actually backed by a regular in-memory
# buffer: this can happen when using binary operators on numpy.memmap
# instances
return (loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL),))
class ArrayMemmapReducer(object):
"""Reducer callable to dump large arrays to memmap files.
Parameters
----------
max_nbytes: int
Threshold to trigger memmaping of large arrays to files created
a folder.
temp_folder: str
Path of a folder where files for backing memmaped arrays are created.
mmap_mode: 'r', 'r+' or 'c'
Mode for the created memmap datastructure. See the documentation of
numpy.memmap for more details. Note: 'w+' is coerced to 'r+'
automatically to avoid zeroing the data on unpickling.
verbose: int, optional, 0 by default
If verbose > 0, memmap creations are logged.
If verbose > 1, both memmap creations, reuse and array pickling are
logged.
prewarm: bool, optional, False by default.
Force a read on newly memmaped array to make sure that OS pre-cache it
memory. This can be useful to avoid concurrent disk access when the
same data array is passed to different worker processes.
"""
def __init__(self, max_nbytes, temp_folder, mmap_mode, verbose=0,
context_id=None, prewarm=True):
self._max_nbytes = max_nbytes
self._temp_folder = temp_folder
self._mmap_mode = mmap_mode
self.verbose = int(verbose)
self._prewarm = prewarm
if context_id is not None:
warnings.warn('context_id is deprecated and ignored in joblib'
' 0.9.4 and will be removed in 0.11',
DeprecationWarning)
def __call__(self, a):
m = _get_backing_memmap(a)
if m is not None:
# a is already backed by a memmap file, let's reuse it directly
return _reduce_memmap_backed(a, m)
if (not a.dtype.hasobject
and self._max_nbytes is not None
and a.nbytes > self._max_nbytes):
# check that the folder exists (lazily create the pool temp folder
# if required)
try:
os.makedirs(self._temp_folder)
os.chmod(self._temp_folder, FOLDER_PERMISSIONS)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
# Find a unique, concurrent safe filename for writing the
# content of this array only once.
basename = "%d-%d-%s.pkl" % (
os.getpid(), id(threading.current_thread()), hash(a))
filename = os.path.join(self._temp_folder, basename)
# In case the same array with the same content is passed several
# times to the pool subprocess children, serialize it only once
# XXX: implement an explicit reference counting scheme to make it
# possible to delete temporary files as soon as the workers are
# done processing this data.
if not os.path.exists(filename):
if self.verbose > 0:
print("Memmaping (shape=%r, dtype=%s) to new file %s" % (
a.shape, a.dtype, filename))
for dumped_filename in dump(a, filename):
os.chmod(dumped_filename, FILE_PERMISSIONS)
if self._prewarm:
# Warm up the data to avoid concurrent disk access in
# multiple children processes
load(filename, mmap_mode=self._mmap_mode).max()
elif self.verbose > 1:
print("Memmaping (shape=%s, dtype=%s) to old file %s" % (
a.shape, a.dtype, filename))
# The worker process will use joblib.load to memmap the data
return (load, (filename, self._mmap_mode))
else:
# do not convert a into memmap, let pickler do its usual copy with
# the default system pickler
if self.verbose > 1:
print("Pickling array (shape=%r, dtype=%s)." % (
a.shape, a.dtype))
return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),))
###############################################################################
# Enable custom pickling in Pool queues
class CustomizablePickler(Pickler):
"""Pickler that accepts custom reducers.
HIGHEST_PROTOCOL is selected by default as this pickler is used
to pickle ephemeral datastructures for interprocess communication
hence no backward compatibility is required.
`reducers` is expected to be a dictionary with key/values
being `(type, callable)` pairs where `callable` is a function that
give an instance of `type` will return a tuple `(constructor,
tuple_of_objects)` to rebuild an instance out of the pickled
`tuple_of_objects` as would return a `__reduce__` method. See the
standard library documentation on pickling for more details.
"""
# We override the pure Python pickler as its the only way to be able to
# customize the dispatch table without side effects in Python 2.7
# to 3.2. For Python 3.3+ leverage the new dispatch_table
# feature from http://bugs.python.org/issue14166 that makes it possible
# to use the C implementation of the Pickler which is faster.
def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
Pickler.__init__(self, writer, protocol=protocol)
if reducers is None:
reducers = {}
if hasattr(Pickler, 'dispatch'):
# Make the dispatch registry an instance level attribute instead of
# a reference to the class dictionary under Python 2
self.dispatch = Pickler.dispatch.copy()
else:
# Under Python 3 initialize the dispatch table with a copy of the
# default registry
self.dispatch_table = copyreg.dispatch_table.copy()
for type, reduce_func in reducers.items():
self.register(type, reduce_func)
def register(self, type, reduce_func):
"""Attach a reducer function to a given type in the dispatch table."""
if hasattr(Pickler, 'dispatch'):
# Python 2 pickler dispatching is not explicitly customizable.
# Let us use a closure to workaround this limitation.
def dispatcher(self, obj):
reduced = reduce_func(obj)
self.save_reduce(obj=obj, *reduced)
self.dispatch[type] = dispatcher
else:
self.dispatch_table[type] = reduce_func
class CustomizablePicklingQueue(object):
"""Locked Pipe implementation that uses a customizable pickler.
This class is an alternative to the multiprocessing implementation
of SimpleQueue in order to make it possible to pass custom
pickling reducers, for instance to avoid memory copy when passing
memory mapped datastructures.
`reducers` is expected to be a dict with key / values being
`(type, callable)` pairs where `callable` is a function that, given an
instance of `type`, will return a tuple `(constructor, tuple_of_objects)`
to rebuild an instance out of the pickled `tuple_of_objects` as would
return a `__reduce__` method.
See the standard library documentation on pickling for more details.
"""
def __init__(self, context, reducers=None):
self._reducers = reducers
self._reader, self._writer = context.Pipe(duplex=False)
self._rlock = context.Lock()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = context.Lock()
self._make_methods()
def __getstate__(self):
assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock,
self._reducers)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock,
self._reducers) = state
self._make_methods()
def empty(self):
return not self._reader.poll()
def _make_methods(self):
self._recv = recv = self._reader.recv
racquire, rrelease = self._rlock.acquire, self._rlock.release
def get():
racquire()
try:
return recv()
finally:
rrelease()
self.get = get
if self._reducers:
def send(obj):
buffer = BytesIO()
CustomizablePickler(buffer, self._reducers).dump(obj)
self._writer.send_bytes(buffer.getvalue())
self._send = send
else:
self._send = send = self._writer.send
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self.put = send
else:
wlock_acquire, wlock_release = (
self._wlock.acquire, self._wlock.release)
def put(obj):
wlock_acquire()
try:
return send(obj)
finally:
wlock_release()
self.put = put
class PicklingPool(Pool):
"""Pool implementation with customizable pickling reducers.
This is useful to control how data is shipped between processes
and makes it possible to use shared memory without useless
copies induces by the default pickling methods of the original
objects passed as arguments to dispatch.
`forward_reducers` and `backward_reducers` are expected to be
dictionaries with key/values being `(type, callable)` pairs where
`callable` is a function that, given an instance of `type`, will return a
tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the
pickled `tuple_of_objects` as would return a `__reduce__` method.
See the standard library documentation about pickling for more details.
"""
def __init__(self, processes=None, forward_reducers=None,
backward_reducers=None, **kwargs):
if forward_reducers is None:
forward_reducers = dict()
if backward_reducers is None:
backward_reducers = dict()
self._forward_reducers = forward_reducers
self._backward_reducers = backward_reducers
poolargs = dict(processes=processes)
poolargs.update(kwargs)
super(PicklingPool, self).__init__(**poolargs)
def _setup_queues(self):
context = getattr(self, '_ctx', mp)
self._inqueue = CustomizablePicklingQueue(context,
self._forward_reducers)
self._outqueue = CustomizablePicklingQueue(context,
self._backward_reducers)
self._quick_put = self._inqueue._send
self._quick_get = self._outqueue._recv
def delete_folder(folder_path):
"""Utility function to cleanup a temporary folder if still existing."""
try:
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
except WindowsError:
warnings.warn("Failed to clean temporary folder: %s" % folder_path)
class MemmapingPool(PicklingPool):
"""Process pool that shares large arrays to avoid memory copy.
This drop-in replacement for `multiprocessing.pool.Pool` makes
it possible to work efficiently with shared memory in a numpy
context.
Existing instances of numpy.memmap are preserved: the child
suprocesses will have access to the same shared memory in the
original mode except for the 'w+' mode that is automatically
transformed as 'r+' to avoid zeroing the original data upon
instantiation.
Furthermore large arrays from the parent process are automatically
dumped to a temporary folder on the filesystem such as child
processes to access their content via memmaping (file system
backed shared memory).
Note: it is important to call the terminate method to collect
the temporary folder used by the pool.
Parameters
----------
processes: int, optional
Number of worker processes running concurrently in the pool.
initializer: callable, optional
Callable executed on worker process creation.
initargs: tuple, optional
Arguments passed to the initializer callable.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, _TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
max_nbytes int or None, optional, 1e6 by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder.
Use None to disable memmaping of large arrays.
mmap_mode: {'r+', 'r', 'w+', 'c'}
Memmapping mode for numpy arrays passed to workers.
See 'max_nbytes' parameter documentation for more details.
forward_reducers: dictionary, optional
Reducers used to pickle objects passed from master to worker
processes: see below.
backward_reducers: dictionary, optional
Reducers used to pickle return values from workers back to the
master process.
verbose: int, optional
Make it possible to monitor how the communication of numpy arrays
with the subprocess is handled (pickling or memmaping)
prewarm: bool or str, optional, "auto" by default.
If True, force a read on newly memmaped array to make sure that OS pre-
cache it in memory. This can be useful to avoid concurrent disk access
when the same data array is passed to different worker processes.
If "auto" (by default), prewarm is set to True, unless the Linux shared
memory partition /dev/shm is available and used as temp_folder.
`forward_reducers` and `backward_reducers` are expected to be
dictionaries with key/values being `(type, callable)` pairs where
`callable` is a function that give an instance of `type` will return
a tuple `(constructor, tuple_of_objects)` to rebuild an instance out
of the pickled `tuple_of_objects` as would return a `__reduce__`
method. See the standard library documentation on pickling for more
details.
"""
def __init__(self, processes=None, temp_folder=None, max_nbytes=1e6,
mmap_mode='r', forward_reducers=None, backward_reducers=None,
verbose=0, context_id=None, prewarm=False, **kwargs):
if forward_reducers is None:
forward_reducers = dict()
if backward_reducers is None:
backward_reducers = dict()
if context_id is not None:
warnings.warn('context_id is deprecated and ignored in joblib'
' 0.9.4 and will be removed in 0.11',
DeprecationWarning)
# Prepare a sub-folder name for the serialization of this particular
# pool instance (do not create in advance to spare FS write access if
# no array is to be dumped):
pool_folder_name = "joblib_memmaping_pool_%d_%d" % (
os.getpid(), id(self))
pool_folder, use_shared_mem = _get_temp_dir(pool_folder_name,
temp_folder)
self._temp_folder = pool_folder
# Register the garbage collector at program exit in case caller forgets
# to call terminate explicitly: note we do not pass any reference to
# self to ensure that this callback won't prevent garbage collection of
# the pool instance and related file handler resources such as POSIX
# semaphores and pipes
pool_module_name = whichmodule(delete_folder, 'delete_folder')
def _cleanup():
# In some cases the Python runtime seems to set delete_folder to
# None just before exiting when accessing the delete_folder
# function from the closure namespace. So instead we reimport
# the delete_folder function explicitly.
# https://github.com/joblib/joblib/issues/328
# We cannot just use from 'joblib.pool import delete_folder'
# because joblib should only use relative imports to allow
# easy vendoring.
delete_folder = __import__(
pool_module_name, fromlist=['delete_folder']).delete_folder
delete_folder(pool_folder)
atexit.register(_cleanup)
if np is not None:
# Register smart numpy.ndarray reducers that detects memmap backed
# arrays and that is alse able to dump to memmap large in-memory
# arrays over the max_nbytes threshold
if prewarm == "auto":
prewarm = not use_shared_mem
forward_reduce_ndarray = ArrayMemmapReducer(
max_nbytes, pool_folder, mmap_mode, verbose,
prewarm=prewarm)
forward_reducers[np.ndarray] = forward_reduce_ndarray
forward_reducers[np.memmap] = reduce_memmap
# Communication from child process to the parent process always
# pickles in-memory numpy.ndarray without dumping them as memmap
# to avoid confusing the caller and make it tricky to collect the
# temporary folder
backward_reduce_ndarray = ArrayMemmapReducer(
None, pool_folder, mmap_mode, verbose)
backward_reducers[np.ndarray] = backward_reduce_ndarray
backward_reducers[np.memmap] = reduce_memmap
poolargs = dict(
processes=processes,
forward_reducers=forward_reducers,
backward_reducers=backward_reducers)
poolargs.update(kwargs)
super(MemmapingPool, self).__init__(**poolargs)
def terminate(self):
n_retries = 10
for i in range(n_retries):
try:
super(MemmapingPool, self).terminate()
break
except OSError as e:
if isinstance(e, WindowsError):
# Workaround occasional "[Error 5] Access is denied" issue
# when trying to terminate a process under windows.
sleep(0.1)
if i + 1 == n_retries:
warnings.warn("Failed to terminate worker processes in"
" multiprocessing pool: %r" % e)
delete_folder(self._temp_folder)
|
"""Custom implementation of multiprocessing.Pool with custom pickler.
This module provides efficient ways of working with data stored in
shared memory with numpy.memmap arrays without inducing any memory
copy between the parent and child processes.
This module should not be imported if multiprocessing is not
available as it implements subclasses of multiprocessing Pool
that uses a custom alternative to SimpleQueue.
"""
# Author: <NAME> <<EMAIL>>
# Copyright: 2012, <NAME>
# License: BSD 3 clause
from mmap import mmap
import errno
import os
import stat
import sys
import threading
import atexit
import tempfile
import shutil
import warnings
from time import sleep
try:
WindowsError
except NameError:
WindowsError = type(None)
from pickle import whichmodule
try:
# Python 2 compat
from cPickle import loads
from cPickle import dumps
except ImportError:
from pickle import loads
from pickle import dumps
import copyreg
# Customizable pure Python pickler in Python 2
# customizable C-optimized pickler under Python 3.3+
from pickle import Pickler
from pickle import HIGHEST_PROTOCOL
from io import BytesIO
from ._multiprocessing_helpers import mp, assert_spawning
# We need the class definition to derive from it not the multiprocessing.Pool
# factory function
from multiprocessing.pool import Pool
try:
import numpy as np
from numpy.lib.stride_tricks import as_strided
except ImportError:
np = None
from .numpy_pickle import load
from .numpy_pickle import dump
from .hashing import hash
from .backports import make_memmap
# Some system have a ramdisk mounted by default, we can use it instead of /tmp
# as the default folder to dump big arrays to share with subprocesses
SYSTEM_SHARED_MEM_FS = '/dev/shm'
# Folder and file permissions to chmod temporary files generated by the
# memmaping pool. Only the owner of the Python process can access the
# temporary files and folder.
FOLDER_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
FILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR
###############################################################################
# Support for efficient transient pickling of numpy data structures
def _get_backing_memmap(a):
"""Recursively look up the original np.memmap instance base if any."""
b = getattr(a, 'base', None)
if b is None:
# TODO: check scipy sparse datastructure if scipy is installed
# a nor its descendants do not have a memmap base
return None
elif isinstance(b, mmap):
# a is already a real memmap instance.
return a
else:
# Recursive exploration of the base ancestry
return _get_backing_memmap(b)
def _get_temp_dir(pool_folder_name, temp_folder=None):
"""Get the full path to a subfolder inside the temporary folder.
Parameters
----------
pool_folder_name : str
Sub-folder name used for the serialization of a pool instance.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment
variable,
- /dev/shm if the folder exists and is writable: this is a
RAMdisk filesystem available by default on modern Linux
distributions,
- the default system temporary folder that can be
overridden with TMP, _TMPDIR or TEMP environment
variables, typically /tmp under Unix operating systems.
Returns
-------
pool_folder : str
full path to the temporary folder
use_shared_mem : bool
whether the temporary folder is written to tmpfs
"""
use_shared_mem = False
if temp_folder is None:
temp_folder = os.environ.get('JOBLIB_TEMP_FOLDER', None)
if temp_folder is None:
if os.path.exists(SYSTEM_SHARED_MEM_FS):
try:
temp_folder = SYSTEM_SHARED_MEM_FS
pool_folder = os.path.join(temp_folder, pool_folder_name)
if not os.path.exists(pool_folder):
os.makedirs(pool_folder)
use_shared_mem = True
except IOError:
# Missing rights in the the /dev/shm partition,
# fallback to regular temp folder.
temp_folder = None
if temp_folder is None:
# Fallback to the default tmp folder, typically /tmp
temp_folder = tempfile.gettempdir()
temp_folder = os.path.abspath(os.path.expanduser(temp_folder))
pool_folder = os.path.join(temp_folder, pool_folder_name)
return pool_folder, use_shared_mem
def has_shareable_memory(a):
"""Return True if a is backed by some mmap buffer directly or not."""
return _get_backing_memmap(a) is not None
def _strided_from_memmap(filename, dtype, mode, offset, order, shape, strides,
total_buffer_len):
"""Reconstruct an array view on a memory mapped file."""
if mode == 'w+':
# Do not zero the original data when unpickling
mode = 'r+'
if strides is None:
# Simple, contiguous memmap
return make_memmap(filename, dtype=dtype, shape=shape, mode=mode,
offset=offset, order=order)
else:
# For non-contiguous data, memmap the total enclosing buffer and then
# extract the non-contiguous view with the stride-tricks API
base = make_memmap(filename, dtype=dtype, shape=total_buffer_len,
mode=mode, offset=offset, order=order)
return as_strided(base, shape=shape, strides=strides)
def _reduce_memmap_backed(a, m):
"""Pickling reduction for memmap backed arrays.
a is expected to be an instance of np.ndarray (or np.memmap)
m is expected to be an instance of np.memmap on the top of the ``base``
attribute ancestry of a. ``m.base`` should be the real python mmap object.
"""
# offset that comes from the striding differences between a and m
a_start, a_end = np.byte_bounds(a)
m_start = np.byte_bounds(m)[0]
offset = a_start - m_start
# offset from the backing memmap
offset += m.offset
if m.flags['F_CONTIGUOUS']:
order = 'F'
else:
# The backing memmap buffer is necessarily contiguous hence C if not
# Fortran
order = 'C'
if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']:
# If the array is a contiguous view, no need to pass the strides
strides = None
total_buffer_len = None
else:
# Compute the total number of items to map from which the strided
# view will be extracted.
strides = a.strides
total_buffer_len = (a_end - a_start) // a.itemsize
return (_strided_from_memmap,
(m.filename, a.dtype, m.mode, offset, order, a.shape, strides,
total_buffer_len))
def reduce_memmap(a):
"""Pickle the descriptors of a memmap instance to reopen on same file."""
m = _get_backing_memmap(a)
if m is not None:
# m is a real mmap backed memmap instance, reduce a preserving striding
# information
return _reduce_memmap_backed(a, m)
else:
# This memmap instance is actually backed by a regular in-memory
# buffer: this can happen when using binary operators on numpy.memmap
# instances
return (loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL),))
class ArrayMemmapReducer(object):
"""Reducer callable to dump large arrays to memmap files.
Parameters
----------
max_nbytes: int
Threshold to trigger memmaping of large arrays to files created
a folder.
temp_folder: str
Path of a folder where files for backing memmaped arrays are created.
mmap_mode: 'r', 'r+' or 'c'
Mode for the created memmap datastructure. See the documentation of
numpy.memmap for more details. Note: 'w+' is coerced to 'r+'
automatically to avoid zeroing the data on unpickling.
verbose: int, optional, 0 by default
If verbose > 0, memmap creations are logged.
If verbose > 1, both memmap creations, reuse and array pickling are
logged.
prewarm: bool, optional, False by default.
Force a read on newly memmaped array to make sure that OS pre-cache it
memory. This can be useful to avoid concurrent disk access when the
same data array is passed to different worker processes.
"""
def __init__(self, max_nbytes, temp_folder, mmap_mode, verbose=0,
context_id=None, prewarm=True):
self._max_nbytes = max_nbytes
self._temp_folder = temp_folder
self._mmap_mode = mmap_mode
self.verbose = int(verbose)
self._prewarm = prewarm
if context_id is not None:
warnings.warn('context_id is deprecated and ignored in joblib'
' 0.9.4 and will be removed in 0.11',
DeprecationWarning)
def __call__(self, a):
m = _get_backing_memmap(a)
if m is not None:
# a is already backed by a memmap file, let's reuse it directly
return _reduce_memmap_backed(a, m)
if (not a.dtype.hasobject
and self._max_nbytes is not None
and a.nbytes > self._max_nbytes):
# check that the folder exists (lazily create the pool temp folder
# if required)
try:
os.makedirs(self._temp_folder)
os.chmod(self._temp_folder, FOLDER_PERMISSIONS)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
# Find a unique, concurrent safe filename for writing the
# content of this array only once.
basename = "%d-%d-%s.pkl" % (
os.getpid(), id(threading.current_thread()), hash(a))
filename = os.path.join(self._temp_folder, basename)
# In case the same array with the same content is passed several
# times to the pool subprocess children, serialize it only once
# XXX: implement an explicit reference counting scheme to make it
# possible to delete temporary files as soon as the workers are
# done processing this data.
if not os.path.exists(filename):
if self.verbose > 0:
print("Memmaping (shape=%r, dtype=%s) to new file %s" % (
a.shape, a.dtype, filename))
for dumped_filename in dump(a, filename):
os.chmod(dumped_filename, FILE_PERMISSIONS)
if self._prewarm:
# Warm up the data to avoid concurrent disk access in
# multiple children processes
load(filename, mmap_mode=self._mmap_mode).max()
elif self.verbose > 1:
print("Memmaping (shape=%s, dtype=%s) to old file %s" % (
a.shape, a.dtype, filename))
# The worker process will use joblib.load to memmap the data
return (load, (filename, self._mmap_mode))
else:
# do not convert a into memmap, let pickler do its usual copy with
# the default system pickler
if self.verbose > 1:
print("Pickling array (shape=%r, dtype=%s)." % (
a.shape, a.dtype))
return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),))
###############################################################################
# Enable custom pickling in Pool queues
class CustomizablePickler(Pickler):
"""Pickler that accepts custom reducers.
HIGHEST_PROTOCOL is selected by default as this pickler is used
to pickle ephemeral datastructures for interprocess communication
hence no backward compatibility is required.
`reducers` is expected to be a dictionary with key/values
being `(type, callable)` pairs where `callable` is a function that
give an instance of `type` will return a tuple `(constructor,
tuple_of_objects)` to rebuild an instance out of the pickled
`tuple_of_objects` as would return a `__reduce__` method. See the
standard library documentation on pickling for more details.
"""
# We override the pure Python pickler as its the only way to be able to
# customize the dispatch table without side effects in Python 2.7
# to 3.2. For Python 3.3+ leverage the new dispatch_table
# feature from http://bugs.python.org/issue14166 that makes it possible
# to use the C implementation of the Pickler which is faster.
def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
Pickler.__init__(self, writer, protocol=protocol)
if reducers is None:
reducers = {}
if hasattr(Pickler, 'dispatch'):
# Make the dispatch registry an instance level attribute instead of
# a reference to the class dictionary under Python 2
self.dispatch = Pickler.dispatch.copy()
else:
# Under Python 3 initialize the dispatch table with a copy of the
# default registry
self.dispatch_table = copyreg.dispatch_table.copy()
for type, reduce_func in reducers.items():
self.register(type, reduce_func)
def register(self, type, reduce_func):
"""Attach a reducer function to a given type in the dispatch table."""
if hasattr(Pickler, 'dispatch'):
# Python 2 pickler dispatching is not explicitly customizable.
# Let us use a closure to workaround this limitation.
def dispatcher(self, obj):
reduced = reduce_func(obj)
self.save_reduce(obj=obj, *reduced)
self.dispatch[type] = dispatcher
else:
self.dispatch_table[type] = reduce_func
class CustomizablePicklingQueue(object):
"""Locked Pipe implementation that uses a customizable pickler.
This class is an alternative to the multiprocessing implementation
of SimpleQueue in order to make it possible to pass custom
pickling reducers, for instance to avoid memory copy when passing
memory mapped datastructures.
`reducers` is expected to be a dict with key / values being
`(type, callable)` pairs where `callable` is a function that, given an
instance of `type`, will return a tuple `(constructor, tuple_of_objects)`
to rebuild an instance out of the pickled `tuple_of_objects` as would
return a `__reduce__` method.
See the standard library documentation on pickling for more details.
"""
def __init__(self, context, reducers=None):
self._reducers = reducers
self._reader, self._writer = context.Pipe(duplex=False)
self._rlock = context.Lock()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = context.Lock()
self._make_methods()
def __getstate__(self):
assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock,
self._reducers)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock,
self._reducers) = state
self._make_methods()
def empty(self):
return not self._reader.poll()
def _make_methods(self):
self._recv = recv = self._reader.recv
racquire, rrelease = self._rlock.acquire, self._rlock.release
def get():
racquire()
try:
return recv()
finally:
rrelease()
self.get = get
if self._reducers:
def send(obj):
buffer = BytesIO()
CustomizablePickler(buffer, self._reducers).dump(obj)
self._writer.send_bytes(buffer.getvalue())
self._send = send
else:
self._send = send = self._writer.send
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self.put = send
else:
wlock_acquire, wlock_release = (
self._wlock.acquire, self._wlock.release)
def put(obj):
wlock_acquire()
try:
return send(obj)
finally:
wlock_release()
self.put = put
class PicklingPool(Pool):
"""Pool implementation with customizable pickling reducers.
This is useful to control how data is shipped between processes
and makes it possible to use shared memory without useless
copies induces by the default pickling methods of the original
objects passed as arguments to dispatch.
`forward_reducers` and `backward_reducers` are expected to be
dictionaries with key/values being `(type, callable)` pairs where
`callable` is a function that, given an instance of `type`, will return a
tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the
pickled `tuple_of_objects` as would return a `__reduce__` method.
See the standard library documentation about pickling for more details.
"""
def __init__(self, processes=None, forward_reducers=None,
backward_reducers=None, **kwargs):
if forward_reducers is None:
forward_reducers = dict()
if backward_reducers is None:
backward_reducers = dict()
self._forward_reducers = forward_reducers
self._backward_reducers = backward_reducers
poolargs = dict(processes=processes)
poolargs.update(kwargs)
super(PicklingPool, self).__init__(**poolargs)
def _setup_queues(self):
context = getattr(self, '_ctx', mp)
self._inqueue = CustomizablePicklingQueue(context,
self._forward_reducers)
self._outqueue = CustomizablePicklingQueue(context,
self._backward_reducers)
self._quick_put = self._inqueue._send
self._quick_get = self._outqueue._recv
def delete_folder(folder_path):
"""Utility function to cleanup a temporary folder if still existing."""
try:
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
except WindowsError:
warnings.warn("Failed to clean temporary folder: %s" % folder_path)
class MemmapingPool(PicklingPool):
"""Process pool that shares large arrays to avoid memory copy.
This drop-in replacement for `multiprocessing.pool.Pool` makes
it possible to work efficiently with shared memory in a numpy
context.
Existing instances of numpy.memmap are preserved: the child
suprocesses will have access to the same shared memory in the
original mode except for the 'w+' mode that is automatically
transformed as 'r+' to avoid zeroing the original data upon
instantiation.
Furthermore large arrays from the parent process are automatically
dumped to a temporary folder on the filesystem such as child
processes to access their content via memmaping (file system
backed shared memory).
Note: it is important to call the terminate method to collect
the temporary folder used by the pool.
Parameters
----------
processes: int, optional
Number of worker processes running concurrently in the pool.
initializer: callable, optional
Callable executed on worker process creation.
initargs: tuple, optional
Arguments passed to the initializer callable.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, _TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
max_nbytes int or None, optional, 1e6 by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder.
Use None to disable memmaping of large arrays.
mmap_mode: {'r+', 'r', 'w+', 'c'}
Memmapping mode for numpy arrays passed to workers.
See 'max_nbytes' parameter documentation for more details.
forward_reducers: dictionary, optional
Reducers used to pickle objects passed from master to worker
processes: see below.
backward_reducers: dictionary, optional
Reducers used to pickle return values from workers back to the
master process.
verbose: int, optional
Make it possible to monitor how the communication of numpy arrays
with the subprocess is handled (pickling or memmaping)
prewarm: bool or str, optional, "auto" by default.
If True, force a read on newly memmaped array to make sure that OS pre-
cache it in memory. This can be useful to avoid concurrent disk access
when the same data array is passed to different worker processes.
If "auto" (by default), prewarm is set to True, unless the Linux shared
memory partition /dev/shm is available and used as temp_folder.
`forward_reducers` and `backward_reducers` are expected to be
dictionaries with key/values being `(type, callable)` pairs where
`callable` is a function that give an instance of `type` will return
a tuple `(constructor, tuple_of_objects)` to rebuild an instance out
of the pickled `tuple_of_objects` as would return a `__reduce__`
method. See the standard library documentation on pickling for more
details.
"""
def __init__(self, processes=None, temp_folder=None, max_nbytes=1e6,
mmap_mode='r', forward_reducers=None, backward_reducers=None,
verbose=0, context_id=None, prewarm=False, **kwargs):
if forward_reducers is None:
forward_reducers = dict()
if backward_reducers is None:
backward_reducers = dict()
if context_id is not None:
warnings.warn('context_id is deprecated and ignored in joblib'
' 0.9.4 and will be removed in 0.11',
DeprecationWarning)
# Prepare a sub-folder name for the serialization of this particular
# pool instance (do not create in advance to spare FS write access if
# no array is to be dumped):
pool_folder_name = "joblib_memmaping_pool_%d_%d" % (
os.getpid(), id(self))
pool_folder, use_shared_mem = _get_temp_dir(pool_folder_name,
temp_folder)
self._temp_folder = pool_folder
# Register the garbage collector at program exit in case caller forgets
# to call terminate explicitly: note we do not pass any reference to
# self to ensure that this callback won't prevent garbage collection of
# the pool instance and related file handler resources such as POSIX
# semaphores and pipes
pool_module_name = whichmodule(delete_folder, 'delete_folder')
def _cleanup():
# In some cases the Python runtime seems to set delete_folder to
# None just before exiting when accessing the delete_folder
# function from the closure namespace. So instead we reimport
# the delete_folder function explicitly.
# https://github.com/joblib/joblib/issues/328
# We cannot just use from 'joblib.pool import delete_folder'
# because joblib should only use relative imports to allow
# easy vendoring.
delete_folder = __import__(
pool_module_name, fromlist=['delete_folder']).delete_folder
delete_folder(pool_folder)
atexit.register(_cleanup)
if np is not None:
# Register smart numpy.ndarray reducers that detects memmap backed
# arrays and that is alse able to dump to memmap large in-memory
# arrays over the max_nbytes threshold
if prewarm == "auto":
prewarm = not use_shared_mem
forward_reduce_ndarray = ArrayMemmapReducer(
max_nbytes, pool_folder, mmap_mode, verbose,
prewarm=prewarm)
forward_reducers[np.ndarray] = forward_reduce_ndarray
forward_reducers[np.memmap] = reduce_memmap
# Communication from child process to the parent process always
# pickles in-memory numpy.ndarray without dumping them as memmap
# to avoid confusing the caller and make it tricky to collect the
# temporary folder
backward_reduce_ndarray = ArrayMemmapReducer(
None, pool_folder, mmap_mode, verbose)
backward_reducers[np.ndarray] = backward_reduce_ndarray
backward_reducers[np.memmap] = reduce_memmap
poolargs = dict(
processes=processes,
forward_reducers=forward_reducers,
backward_reducers=backward_reducers)
poolargs.update(kwargs)
super(MemmapingPool, self).__init__(**poolargs)
def terminate(self):
n_retries = 10
for i in range(n_retries):
try:
super(MemmapingPool, self).terminate()
break
except OSError as e:
if isinstance(e, WindowsError):
# Workaround occasional "[Error 5] Access is denied" issue
# when trying to terminate a process under windows.
sleep(0.1)
if i + 1 == n_retries:
warnings.warn("Failed to terminate worker processes in"
" multiprocessing pool: %r" % e)
delete_folder(self._temp_folder)
|
en
| 0.821382
|
Custom implementation of multiprocessing.Pool with custom pickler. This module provides efficient ways of working with data stored in shared memory with numpy.memmap arrays without inducing any memory copy between the parent and child processes. This module should not be imported if multiprocessing is not available as it implements subclasses of multiprocessing Pool that uses a custom alternative to SimpleQueue. # Author: <NAME> <<EMAIL>> # Copyright: 2012, <NAME> # License: BSD 3 clause # Python 2 compat # Customizable pure Python pickler in Python 2 # customizable C-optimized pickler under Python 3.3+ # We need the class definition to derive from it not the multiprocessing.Pool # factory function # Some system have a ramdisk mounted by default, we can use it instead of /tmp # as the default folder to dump big arrays to share with subprocesses # Folder and file permissions to chmod temporary files generated by the # memmaping pool. Only the owner of the Python process can access the # temporary files and folder. ############################################################################### # Support for efficient transient pickling of numpy data structures Recursively look up the original np.memmap instance base if any. # TODO: check scipy sparse datastructure if scipy is installed # a nor its descendants do not have a memmap base # a is already a real memmap instance. # Recursive exploration of the base ancestry Get the full path to a subfolder inside the temporary folder. Parameters ---------- pool_folder_name : str Sub-folder name used for the serialization of a pool instance. temp_folder: str, optional Folder to be used by the pool for memmaping large arrays for sharing memory with worker processes. If None, this will try in order: - a folder pointed by the JOBLIB_TEMP_FOLDER environment variable, - /dev/shm if the folder exists and is writable: this is a RAMdisk filesystem available by default on modern Linux distributions, - the default system temporary folder that can be overridden with TMP, _TMPDIR or TEMP environment variables, typically /tmp under Unix operating systems. Returns ------- pool_folder : str full path to the temporary folder use_shared_mem : bool whether the temporary folder is written to tmpfs # Missing rights in the the /dev/shm partition, # fallback to regular temp folder. # Fallback to the default tmp folder, typically /tmp Return True if a is backed by some mmap buffer directly or not. Reconstruct an array view on a memory mapped file. # Do not zero the original data when unpickling # Simple, contiguous memmap # For non-contiguous data, memmap the total enclosing buffer and then # extract the non-contiguous view with the stride-tricks API Pickling reduction for memmap backed arrays. a is expected to be an instance of np.ndarray (or np.memmap) m is expected to be an instance of np.memmap on the top of the ``base`` attribute ancestry of a. ``m.base`` should be the real python mmap object. # offset that comes from the striding differences between a and m # offset from the backing memmap # The backing memmap buffer is necessarily contiguous hence C if not # Fortran # If the array is a contiguous view, no need to pass the strides # Compute the total number of items to map from which the strided # view will be extracted. Pickle the descriptors of a memmap instance to reopen on same file. # m is a real mmap backed memmap instance, reduce a preserving striding # information # This memmap instance is actually backed by a regular in-memory # buffer: this can happen when using binary operators on numpy.memmap # instances Reducer callable to dump large arrays to memmap files. Parameters ---------- max_nbytes: int Threshold to trigger memmaping of large arrays to files created a folder. temp_folder: str Path of a folder where files for backing memmaped arrays are created. mmap_mode: 'r', 'r+' or 'c' Mode for the created memmap datastructure. See the documentation of numpy.memmap for more details. Note: 'w+' is coerced to 'r+' automatically to avoid zeroing the data on unpickling. verbose: int, optional, 0 by default If verbose > 0, memmap creations are logged. If verbose > 1, both memmap creations, reuse and array pickling are logged. prewarm: bool, optional, False by default. Force a read on newly memmaped array to make sure that OS pre-cache it memory. This can be useful to avoid concurrent disk access when the same data array is passed to different worker processes. # a is already backed by a memmap file, let's reuse it directly # check that the folder exists (lazily create the pool temp folder # if required) # Find a unique, concurrent safe filename for writing the # content of this array only once. # In case the same array with the same content is passed several # times to the pool subprocess children, serialize it only once # XXX: implement an explicit reference counting scheme to make it # possible to delete temporary files as soon as the workers are # done processing this data. # Warm up the data to avoid concurrent disk access in # multiple children processes # The worker process will use joblib.load to memmap the data # do not convert a into memmap, let pickler do its usual copy with # the default system pickler ############################################################################### # Enable custom pickling in Pool queues Pickler that accepts custom reducers. HIGHEST_PROTOCOL is selected by default as this pickler is used to pickle ephemeral datastructures for interprocess communication hence no backward compatibility is required. `reducers` is expected to be a dictionary with key/values being `(type, callable)` pairs where `callable` is a function that give an instance of `type` will return a tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the pickled `tuple_of_objects` as would return a `__reduce__` method. See the standard library documentation on pickling for more details. # We override the pure Python pickler as its the only way to be able to # customize the dispatch table without side effects in Python 2.7 # to 3.2. For Python 3.3+ leverage the new dispatch_table # feature from http://bugs.python.org/issue14166 that makes it possible # to use the C implementation of the Pickler which is faster. # Make the dispatch registry an instance level attribute instead of # a reference to the class dictionary under Python 2 # Under Python 3 initialize the dispatch table with a copy of the # default registry Attach a reducer function to a given type in the dispatch table. # Python 2 pickler dispatching is not explicitly customizable. # Let us use a closure to workaround this limitation. Locked Pipe implementation that uses a customizable pickler. This class is an alternative to the multiprocessing implementation of SimpleQueue in order to make it possible to pass custom pickling reducers, for instance to avoid memory copy when passing memory mapped datastructures. `reducers` is expected to be a dict with key / values being `(type, callable)` pairs where `callable` is a function that, given an instance of `type`, will return a tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the pickled `tuple_of_objects` as would return a `__reduce__` method. See the standard library documentation on pickling for more details. # writes to a message oriented win32 pipe are atomic Pool implementation with customizable pickling reducers. This is useful to control how data is shipped between processes and makes it possible to use shared memory without useless copies induces by the default pickling methods of the original objects passed as arguments to dispatch. `forward_reducers` and `backward_reducers` are expected to be dictionaries with key/values being `(type, callable)` pairs where `callable` is a function that, given an instance of `type`, will return a tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the pickled `tuple_of_objects` as would return a `__reduce__` method. See the standard library documentation about pickling for more details. Utility function to cleanup a temporary folder if still existing. Process pool that shares large arrays to avoid memory copy. This drop-in replacement for `multiprocessing.pool.Pool` makes it possible to work efficiently with shared memory in a numpy context. Existing instances of numpy.memmap are preserved: the child suprocesses will have access to the same shared memory in the original mode except for the 'w+' mode that is automatically transformed as 'r+' to avoid zeroing the original data upon instantiation. Furthermore large arrays from the parent process are automatically dumped to a temporary folder on the filesystem such as child processes to access their content via memmaping (file system backed shared memory). Note: it is important to call the terminate method to collect the temporary folder used by the pool. Parameters ---------- processes: int, optional Number of worker processes running concurrently in the pool. initializer: callable, optional Callable executed on worker process creation. initargs: tuple, optional Arguments passed to the initializer callable. temp_folder: str, optional Folder to be used by the pool for memmaping large arrays for sharing memory with worker processes. If None, this will try in order: - a folder pointed by the JOBLIB_TEMP_FOLDER environment variable, - /dev/shm if the folder exists and is writable: this is a RAMdisk filesystem available by default on modern Linux distributions, - the default system temporary folder that can be overridden with TMP, _TMPDIR or TEMP environment variables, typically /tmp under Unix operating systems. max_nbytes int or None, optional, 1e6 by default Threshold on the size of arrays passed to the workers that triggers automated memory mapping in temp_folder. Use None to disable memmaping of large arrays. mmap_mode: {'r+', 'r', 'w+', 'c'} Memmapping mode for numpy arrays passed to workers. See 'max_nbytes' parameter documentation for more details. forward_reducers: dictionary, optional Reducers used to pickle objects passed from master to worker processes: see below. backward_reducers: dictionary, optional Reducers used to pickle return values from workers back to the master process. verbose: int, optional Make it possible to monitor how the communication of numpy arrays with the subprocess is handled (pickling or memmaping) prewarm: bool or str, optional, "auto" by default. If True, force a read on newly memmaped array to make sure that OS pre- cache it in memory. This can be useful to avoid concurrent disk access when the same data array is passed to different worker processes. If "auto" (by default), prewarm is set to True, unless the Linux shared memory partition /dev/shm is available and used as temp_folder. `forward_reducers` and `backward_reducers` are expected to be dictionaries with key/values being `(type, callable)` pairs where `callable` is a function that give an instance of `type` will return a tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the pickled `tuple_of_objects` as would return a `__reduce__` method. See the standard library documentation on pickling for more details. # Prepare a sub-folder name for the serialization of this particular # pool instance (do not create in advance to spare FS write access if # no array is to be dumped): # Register the garbage collector at program exit in case caller forgets # to call terminate explicitly: note we do not pass any reference to # self to ensure that this callback won't prevent garbage collection of # the pool instance and related file handler resources such as POSIX # semaphores and pipes # In some cases the Python runtime seems to set delete_folder to # None just before exiting when accessing the delete_folder # function from the closure namespace. So instead we reimport # the delete_folder function explicitly. # https://github.com/joblib/joblib/issues/328 # We cannot just use from 'joblib.pool import delete_folder' # because joblib should only use relative imports to allow # easy vendoring. # Register smart numpy.ndarray reducers that detects memmap backed # arrays and that is alse able to dump to memmap large in-memory # arrays over the max_nbytes threshold # Communication from child process to the parent process always # pickles in-memory numpy.ndarray without dumping them as memmap # to avoid confusing the caller and make it tricky to collect the # temporary folder # Workaround occasional "[Error 5] Access is denied" issue # when trying to terminate a process under windows.
| 2.749232
| 3
|
src/control/matrix.py
|
MartinLesser/Procedural-generation-of-3d-trees
| 2
|
6626379
|
import math
from model.constants import MIN_BRANCH_ROTATION
def mult(vector, float):
"""
Multiplies a vector with a float and returns the result in a new vector.
:param vector: List with elements of the vector.
:param float: float is factor.
:return: vector
"""
new_vector = []
for item in vector:
new_vector.append(item * float)
return new_vector
def multVectors(vector1, vector2):
"""
Multiplies two vectors and returns the result as a new vector.
:param vector1: List
:param vector2: List
:return: vector
"""
new_vector = []
for i in range(len(vector1)):
new_vector.append(vector1[i] * vector2[i])
return new_vector
def matrixMultVec(matrix, vector):
"""
Multiplies a matrix with a vector and returns the result as a new vector.
:param matrix: Matrix
:param vector: vector
:return: vector
"""
new_vector = []
x = 0
for row in matrix:
for index, number in enumerate(row):
x += number * vector[index]
new_vector.append(x)
x = 0
return new_vector
def add(vector1, vector2):
"""
Adds a vector to a vector and returns the result as a new vector.
:param vector1: vector
:param vector2: vector
:return: vector
"""
new_vector = []
for index, item in enumerate(vector1):
new_vector.append(vector1[index] + vector2[index])
return new_vector
def sub(vector1, vector2):
"""
Subtracts a vector from a vector and returns the result as a new vector.
:param vector1: vector
:param vector2: vector
:return: vector
"""
new_vector = []
for index, item in enumerate(vector1):
new_vector.append(vector1[index] - vector2[index])
return new_vector
def rotate_left_z(vector, angle):
"""
Multiplies a vector with a matrix to rotate the vector.
:param vector: vector
:param angle: float dictates how many degrees the vector is rotated
:return: vector
"""
rotation_matrix = [
[ math.cos(math.radians(angle)), -math.sin(math.radians(angle)), 0],
[ math.sin(math.radians(angle)), math.cos(math.radians(angle)), 0],
[ 0, 0, 1],
]
return matrixMultVec(rotation_matrix, vector)
def rotate_right_z(vector, angle):
return rotate_left_z(vector, -angle)
def rotate_left_x(vector, angle):
"""
Multiplies a vector with a matrix to rotate the vector.
:param vector: vector
:param angle: float dictates how many degrees the vector is rotated
:return: vector
"""
rotation_matrix = [
[ 1, 0, 0 ],
[ 0, math.cos(math.radians(angle)), -math.sin(math.radians(angle))],
[ 0, math.sin(math.radians(angle)), math.cos(math.radians(angle)) ],
]
return matrixMultVec(rotation_matrix, vector)
def rotate_right_x(vector, angle):
return rotate_left_x(vector, -angle)
def rotate_left_y(vector, angle):
"""
Multiplies a vector with a matrix to rotate the vector.
:param vector: vector
:param angle: float dictates how many degrees the vector is rotated
:return: vector
"""
rotation_matrix = [
[ math.cos(math.radians(angle)), 0, math.sin(math.radians(angle))],
[ 0, 1, 0],
[ -math.sin(math.radians(angle)), 0, math.cos(math.radians(angle))],
]
return matrixMultVec(rotation_matrix, vector)
def rotate_right_y(vector, angle):
return rotate_left_y(vector, -angle)
def rotate_left_z_local(vector, point, angle):
"""
Moves the vector to the coordinates origin, rotates it and moves it back. This is used for the rotation of the
segments when generating the tree mesh.
:param vector: vector is the new point which is calculated in a circle
:param point: vector is the original point from the skeleton tree
:param angle: float dictates how many degrees the vector is rotated
:return: vector
"""
rotation_z = [
[ math.cos(math.radians(angle)), -math.sin(math.radians(angle)), 0],
[ math.sin(math.radians(angle)), math.cos(math.radians(angle)), 0],
[ 0, 0, 1],
]
v1 = matrixMultVec(rotation_z, sub(vector,point))
v2 = add(v1, point)
return v2
def rotate_left_y_local(vector, point, angle):
"""
Moves the vector to the coordinates origin, rotates it and moves it back. This is used for the rotation of the
segments when generating the tree mesh.
:param vector: vector is the new point which is calculated in a circle
:param point: vector is the original point from the skeleton tree
:param angle: float dictates how many degrees the vector is rotated
:return: vector
"""
rotation_y = [
[ math.cos(math.radians(angle)), -math.sin(math.radians(angle)), 0],
[ math.sin(math.radians(angle)), math.cos(math.radians(angle)), 0],
[ 0, 0, 1],
]
v1 = matrixMultVec(rotation_y, sub(vector,point))
v2 = add(v1, point)
return v2
def rotate_left_x_local(vector, point, angle):
"""
Moves the vector to the coordinates origin, rotates it and moves it back. This is used for the rotation of the
segments when generating the tree mesh.
:param vector: vector is the new point which is calculated in a circle
:param point: vector is the original point from the skeleton tree
:param angle: float dictates how many degrees the vector is rotated
:return: vector
"""
rotation_x = [
[ math.cos(math.radians(angle)), 0, math.sin(math.radians(angle))],
[ 0, 1, 0],
[ -math.sin(math.radians(angle)), 0, math.cos(math.radians(angle))],
]
v1 = matrixMultVec(rotation_x, sub(vector,point))
v2 = add(v1, point)
return v2
|
import math
from model.constants import MIN_BRANCH_ROTATION
def mult(vector, float):
"""
Multiplies a vector with a float and returns the result in a new vector.
:param vector: List with elements of the vector.
:param float: float is factor.
:return: vector
"""
new_vector = []
for item in vector:
new_vector.append(item * float)
return new_vector
def multVectors(vector1, vector2):
"""
Multiplies two vectors and returns the result as a new vector.
:param vector1: List
:param vector2: List
:return: vector
"""
new_vector = []
for i in range(len(vector1)):
new_vector.append(vector1[i] * vector2[i])
return new_vector
def matrixMultVec(matrix, vector):
"""
Multiplies a matrix with a vector and returns the result as a new vector.
:param matrix: Matrix
:param vector: vector
:return: vector
"""
new_vector = []
x = 0
for row in matrix:
for index, number in enumerate(row):
x += number * vector[index]
new_vector.append(x)
x = 0
return new_vector
def add(vector1, vector2):
"""
Adds a vector to a vector and returns the result as a new vector.
:param vector1: vector
:param vector2: vector
:return: vector
"""
new_vector = []
for index, item in enumerate(vector1):
new_vector.append(vector1[index] + vector2[index])
return new_vector
def sub(vector1, vector2):
"""
Subtracts a vector from a vector and returns the result as a new vector.
:param vector1: vector
:param vector2: vector
:return: vector
"""
new_vector = []
for index, item in enumerate(vector1):
new_vector.append(vector1[index] - vector2[index])
return new_vector
def rotate_left_z(vector, angle):
"""
Multiplies a vector with a matrix to rotate the vector.
:param vector: vector
:param angle: float dictates how many degrees the vector is rotated
:return: vector
"""
rotation_matrix = [
[ math.cos(math.radians(angle)), -math.sin(math.radians(angle)), 0],
[ math.sin(math.radians(angle)), math.cos(math.radians(angle)), 0],
[ 0, 0, 1],
]
return matrixMultVec(rotation_matrix, vector)
def rotate_right_z(vector, angle):
return rotate_left_z(vector, -angle)
def rotate_left_x(vector, angle):
"""
Multiplies a vector with a matrix to rotate the vector.
:param vector: vector
:param angle: float dictates how many degrees the vector is rotated
:return: vector
"""
rotation_matrix = [
[ 1, 0, 0 ],
[ 0, math.cos(math.radians(angle)), -math.sin(math.radians(angle))],
[ 0, math.sin(math.radians(angle)), math.cos(math.radians(angle)) ],
]
return matrixMultVec(rotation_matrix, vector)
def rotate_right_x(vector, angle):
return rotate_left_x(vector, -angle)
def rotate_left_y(vector, angle):
"""
Multiplies a vector with a matrix to rotate the vector.
:param vector: vector
:param angle: float dictates how many degrees the vector is rotated
:return: vector
"""
rotation_matrix = [
[ math.cos(math.radians(angle)), 0, math.sin(math.radians(angle))],
[ 0, 1, 0],
[ -math.sin(math.radians(angle)), 0, math.cos(math.radians(angle))],
]
return matrixMultVec(rotation_matrix, vector)
def rotate_right_y(vector, angle):
return rotate_left_y(vector, -angle)
def rotate_left_z_local(vector, point, angle):
"""
Moves the vector to the coordinates origin, rotates it and moves it back. This is used for the rotation of the
segments when generating the tree mesh.
:param vector: vector is the new point which is calculated in a circle
:param point: vector is the original point from the skeleton tree
:param angle: float dictates how many degrees the vector is rotated
:return: vector
"""
rotation_z = [
[ math.cos(math.radians(angle)), -math.sin(math.radians(angle)), 0],
[ math.sin(math.radians(angle)), math.cos(math.radians(angle)), 0],
[ 0, 0, 1],
]
v1 = matrixMultVec(rotation_z, sub(vector,point))
v2 = add(v1, point)
return v2
def rotate_left_y_local(vector, point, angle):
"""
Moves the vector to the coordinates origin, rotates it and moves it back. This is used for the rotation of the
segments when generating the tree mesh.
:param vector: vector is the new point which is calculated in a circle
:param point: vector is the original point from the skeleton tree
:param angle: float dictates how many degrees the vector is rotated
:return: vector
"""
rotation_y = [
[ math.cos(math.radians(angle)), -math.sin(math.radians(angle)), 0],
[ math.sin(math.radians(angle)), math.cos(math.radians(angle)), 0],
[ 0, 0, 1],
]
v1 = matrixMultVec(rotation_y, sub(vector,point))
v2 = add(v1, point)
return v2
def rotate_left_x_local(vector, point, angle):
"""
Moves the vector to the coordinates origin, rotates it and moves it back. This is used for the rotation of the
segments when generating the tree mesh.
:param vector: vector is the new point which is calculated in a circle
:param point: vector is the original point from the skeleton tree
:param angle: float dictates how many degrees the vector is rotated
:return: vector
"""
rotation_x = [
[ math.cos(math.radians(angle)), 0, math.sin(math.radians(angle))],
[ 0, 1, 0],
[ -math.sin(math.radians(angle)), 0, math.cos(math.radians(angle))],
]
v1 = matrixMultVec(rotation_x, sub(vector,point))
v2 = add(v1, point)
return v2
|
en
| 0.888316
|
Multiplies a vector with a float and returns the result in a new vector. :param vector: List with elements of the vector. :param float: float is factor. :return: vector Multiplies two vectors and returns the result as a new vector. :param vector1: List :param vector2: List :return: vector Multiplies a matrix with a vector and returns the result as a new vector. :param matrix: Matrix :param vector: vector :return: vector Adds a vector to a vector and returns the result as a new vector. :param vector1: vector :param vector2: vector :return: vector Subtracts a vector from a vector and returns the result as a new vector. :param vector1: vector :param vector2: vector :return: vector Multiplies a vector with a matrix to rotate the vector. :param vector: vector :param angle: float dictates how many degrees the vector is rotated :return: vector Multiplies a vector with a matrix to rotate the vector. :param vector: vector :param angle: float dictates how many degrees the vector is rotated :return: vector Multiplies a vector with a matrix to rotate the vector. :param vector: vector :param angle: float dictates how many degrees the vector is rotated :return: vector Moves the vector to the coordinates origin, rotates it and moves it back. This is used for the rotation of the segments when generating the tree mesh. :param vector: vector is the new point which is calculated in a circle :param point: vector is the original point from the skeleton tree :param angle: float dictates how many degrees the vector is rotated :return: vector Moves the vector to the coordinates origin, rotates it and moves it back. This is used for the rotation of the segments when generating the tree mesh. :param vector: vector is the new point which is calculated in a circle :param point: vector is the original point from the skeleton tree :param angle: float dictates how many degrees the vector is rotated :return: vector Moves the vector to the coordinates origin, rotates it and moves it back. This is used for the rotation of the segments when generating the tree mesh. :param vector: vector is the new point which is calculated in a circle :param point: vector is the original point from the skeleton tree :param angle: float dictates how many degrees the vector is rotated :return: vector
| 3.826857
| 4
|
scripts/utils.py
|
max-simon/master-thesis
| 4
|
6626380
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: <NAME>
# Year: 2020
import os
import sys
import numpy as np
from scipy.interpolate import NearestNDInterpolator, LinearNDInterpolator, griddata
from scipy.ndimage.filters import uniform_filter1d
import datetime
import netCDF4 as nc
import cftime
def get_area_map(grid_data, interpolate_to_psi=False):
"""
Calculate the area of grid cells
"""
pm = None
pn = None
if interpolate_to_psi:
# TODO: use interp.py
coords = np.vstack((grid_data.lon_rho.values.reshape(-1),
grid_data.lat_rho.values.reshape(-1))).T
pm = LinearNDInterpolator(coords, grid_data.pm.values.reshape(-1)
)(grid_data.lon_psi.values, grid_data.lat_psi.values)
pn = LinearNDInterpolator(coords, grid_data.pn.values.reshape(-1)
)(grid_data.lon_psi.values, grid_data.lat_psi.values)
else:
pm = grid_data.pm.values
pn = grid_data.pn.values
area = (1/pm) * (1/pn)
return area / (1000.*1000.)
def parse_slice(val):
"""
Convert a string with a Python-like slice notation to a slice object.
"""
if ':' not in val:
value = int(val)
stop_value = value + 1 if value != -1 else None
return slice(value, stop_value)
else:
value = val.split(':')
start = None if value[0] == '' else int(value[0])
stop = None if value[1] == '' else int(value[1])
step = None if len(value) < 3 or value[2] == '' else int(value[2])
return slice(start, stop, step)
def parse_datetime_string(date_string):
"""
Parse a string to a datetime object by checking different formats. Also returns the format.
"""
date = None
date_f = None
for date_format in ['%Y-%m-%dT%H:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S.%f']:
try:
date = datetime.datetime.strptime(date_string, date_format)
date_f = date_format
break
except ValueError:
pass
if date is None:
raise ValueError('Could not find a suitable date format.')
return date, date_f
def date_string_to_obj(date_string, sample_obj):
"""
Parse a string to an object given by sample_obj. The constructor must accept common datetime attributes (see code). This is especially useful when working with cftime.
"""
dt_obj, _ = parse_datetime_string(date_string)
return type(sample_obj)(year=dt_obj.year, month=dt_obj.month, day=dt_obj.day, hour=dt_obj.hour, minute=dt_obj.minute, second=dt_obj.second)
def add_to_date_string(date_string, dt):
"""
Add a timedelta object to a date string.
"""
# parse the date string
date_start, _ = parse_datetime_string(date_string)
# format it correctly for xarray
date_end = (date_start + dt).strftime('%Y-%m-%dT%H:%M:%S')
# bugfix: strftime strips leading zeros
first_idx = date_end.index('-')
if first_idx != 4:
date_end = '0'*(4 - first_idx) + date_end
# if not time was provided in initial string, just return the date part
if ':' in date_string:
return date_end
else:
return date_end.split('T')[0]
def get_lon_lat_dims(dataarray):
"""
Get the name of lon and lat corresponding to an dataarray (based on the dimensions of the dataarray).
"""
# get correct grid
dims = dataarray.dims
lon_name = 'lon_rho'
lat_name = 'lat_rho'
for dim in dims:
if dim.startswith('eta') or dim.startswith('lon'):
lon_name = dim.replace('eta_', 'lon_')
if dim.startswith('xi') or dim.startswith('lat'):
lat_name = dim.replace('xi_', 'lat_')
assert lon_name.replace('lon_', '') == lat_name.replace('lat_', ''), 'Ey, lon_rho != lon_u altough eta_rho == eta_u'
return lon_name, lat_name
def get_depth_dim(dataarray):
"""
Filter the depth dimension of a data array.
"""
if 'depth' in dataarray.dims:
return 'depth'
if 's_rho' in dataarray.dims:
return 's_rho'
return None
def check_output_path(output_path):
"""
Check that a file does not exist yet at an output path and ask the user what to do if it exists.
"""
if os.path.isfile(output_path):
print('WARNING: a file exist at the specified output path')
action = input('Do you want to overwrite (o) or cancel (c)? ')
if action.strip() == 'c':
sys.exit()
elif action.strip() == 'o':
# do same as it would not exist
pass
else:
print('ERROR: unknown option.')
sys.exit(1)
def get_num_days(dataset):
"""
Parse the time:calendar attribute of a dataset and get the number of days a year has
"""
if "time" in dataset:
# get the max days from calendar
calendar = dataset["time"].attrs['calendar']
max_days = int(calendar.replace("_day", ""))
return max_days
else:
return len(dataset["doy"])
def get_doys(t_obj, ds, days_around):
"""
Get an array of all doys which are `days_around` days around a time object.
"""
doy = t_obj.dayofyr - 1
num_days = get_num_days(ds)
doys = np.array([i % num_days for i in range(doy - days_around, doy + days_around + 1)])
assert len(doys) == days_around*2 + 1
return doys
def get_doys_around_doy(doy, num_days, days_around):
"""
Get an array of all doys which are `days_around` days around a doy.
"""
doys = np.array([i % num_days for i in range(doy - days_around, doy + days_around + 1)])
assert len(doys) == days_around*2 + 1
return doys
def str_tobj(t_obj):
"""
Pretty pring a time object (cftime)
"""
if type(t_obj) is not cftime._cftime.DatetimeNoLeap:
return str(t_obj)
else:
return '{:04d}-{:02d}-{:02d}'.format(t_obj.year, t_obj.month, t_obj.day)
def get_triangular_weights(doys):
"""
Get an array of weights for triangular weighting.
"""
weights = np.zeros(doys.shape[0]).astype(float)
width = doys.shape[0] // 2 + 1
half_weights = np.linspace(0, 1, width)
if doys.shape[0] % 2 == 0:
weights[:width-1] = half_weights[:-1]
weights[width-1:] = half_weights[::-1][1:]
else:
weights[:width] = half_weights
weights[width:] = half_weights[::-1][1:]
return weights / np.sum(weights)
def np_rolling_mean(data, num_points, mode="reflect", axis=0):
"""
Calculating a rolling mean on a numpy array.
"""
return uniform_filter1d(data, size=num_points, axis=axis, mode=mode)
def p(fn, *args, **kwargs):
"""
Get a callable which - when executed - executes a function with given arguments and keyword-arguments.
This is used in the context of `cache`.
"""
def s():
return fn(*args, **kwargs)
return s
def cache(path, *args, invalidate=False):
"""
Cache the result of a list of callables. The callables are only executed when the provided path does not exist.
"""
data = None
args_keys = ['{:d}'.format(i) for i in range(len(args))]
# load cache
if os.path.isfile(path+'.npz') and not invalidate:
print('Load cache')
data = np.load(path+'.npz', allow_pickle=True)
# execute all callables and save results to numpy
else:
data = {
args_keys[i]: args[i]() for i in range(len(args))
}
np.savez(path+'.npz', **data)
return [data[key] for key in args_keys]
def mean_with_error(x, dx, axis=None):
"""
Calculate an average and propagate the error accordingly.
"""
# calculate mean: f(x) = 1/N * (x1 + x2 + ...)
mean = np.nanmean(x, axis=axis)
num_nonnan = np.count_nonzero(~np.isnan(dx), axis=axis)
# error propagation: df(x) = 1/N * sqrt(dx1**2 + dx2**2 + ...)
dk = np.sqrt(
np.nansum(dx**2, axis=axis)
) / num_nonnan
return mean, dk
def ratio_with_error(x, y, dx, dy):
"""
Calculate a ratio and propagate the errors accordingly.
"""
# f(x, y) = x/y
rel = x/y
# df(x, y) = sqrt( (dx/y)**2 + (dy*x/(y**2))**2 )
d_rel = np.sqrt(
((dx/y)**2) +
((dy*x/(y**2))**2)
)
return rel, d_rel
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: <NAME>
# Year: 2020
import os
import sys
import numpy as np
from scipy.interpolate import NearestNDInterpolator, LinearNDInterpolator, griddata
from scipy.ndimage.filters import uniform_filter1d
import datetime
import netCDF4 as nc
import cftime
def get_area_map(grid_data, interpolate_to_psi=False):
"""
Calculate the area of grid cells
"""
pm = None
pn = None
if interpolate_to_psi:
# TODO: use interp.py
coords = np.vstack((grid_data.lon_rho.values.reshape(-1),
grid_data.lat_rho.values.reshape(-1))).T
pm = LinearNDInterpolator(coords, grid_data.pm.values.reshape(-1)
)(grid_data.lon_psi.values, grid_data.lat_psi.values)
pn = LinearNDInterpolator(coords, grid_data.pn.values.reshape(-1)
)(grid_data.lon_psi.values, grid_data.lat_psi.values)
else:
pm = grid_data.pm.values
pn = grid_data.pn.values
area = (1/pm) * (1/pn)
return area / (1000.*1000.)
def parse_slice(val):
"""
Convert a string with a Python-like slice notation to a slice object.
"""
if ':' not in val:
value = int(val)
stop_value = value + 1 if value != -1 else None
return slice(value, stop_value)
else:
value = val.split(':')
start = None if value[0] == '' else int(value[0])
stop = None if value[1] == '' else int(value[1])
step = None if len(value) < 3 or value[2] == '' else int(value[2])
return slice(start, stop, step)
def parse_datetime_string(date_string):
"""
Parse a string to a datetime object by checking different formats. Also returns the format.
"""
date = None
date_f = None
for date_format in ['%Y-%m-%dT%H:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S.%f']:
try:
date = datetime.datetime.strptime(date_string, date_format)
date_f = date_format
break
except ValueError:
pass
if date is None:
raise ValueError('Could not find a suitable date format.')
return date, date_f
def date_string_to_obj(date_string, sample_obj):
"""
Parse a string to an object given by sample_obj. The constructor must accept common datetime attributes (see code). This is especially useful when working with cftime.
"""
dt_obj, _ = parse_datetime_string(date_string)
return type(sample_obj)(year=dt_obj.year, month=dt_obj.month, day=dt_obj.day, hour=dt_obj.hour, minute=dt_obj.minute, second=dt_obj.second)
def add_to_date_string(date_string, dt):
"""
Add a timedelta object to a date string.
"""
# parse the date string
date_start, _ = parse_datetime_string(date_string)
# format it correctly for xarray
date_end = (date_start + dt).strftime('%Y-%m-%dT%H:%M:%S')
# bugfix: strftime strips leading zeros
first_idx = date_end.index('-')
if first_idx != 4:
date_end = '0'*(4 - first_idx) + date_end
# if not time was provided in initial string, just return the date part
if ':' in date_string:
return date_end
else:
return date_end.split('T')[0]
def get_lon_lat_dims(dataarray):
"""
Get the name of lon and lat corresponding to an dataarray (based on the dimensions of the dataarray).
"""
# get correct grid
dims = dataarray.dims
lon_name = 'lon_rho'
lat_name = 'lat_rho'
for dim in dims:
if dim.startswith('eta') or dim.startswith('lon'):
lon_name = dim.replace('eta_', 'lon_')
if dim.startswith('xi') or dim.startswith('lat'):
lat_name = dim.replace('xi_', 'lat_')
assert lon_name.replace('lon_', '') == lat_name.replace('lat_', ''), 'Ey, lon_rho != lon_u altough eta_rho == eta_u'
return lon_name, lat_name
def get_depth_dim(dataarray):
"""
Filter the depth dimension of a data array.
"""
if 'depth' in dataarray.dims:
return 'depth'
if 's_rho' in dataarray.dims:
return 's_rho'
return None
def check_output_path(output_path):
"""
Check that a file does not exist yet at an output path and ask the user what to do if it exists.
"""
if os.path.isfile(output_path):
print('WARNING: a file exist at the specified output path')
action = input('Do you want to overwrite (o) or cancel (c)? ')
if action.strip() == 'c':
sys.exit()
elif action.strip() == 'o':
# do same as it would not exist
pass
else:
print('ERROR: unknown option.')
sys.exit(1)
def get_num_days(dataset):
"""
Parse the time:calendar attribute of a dataset and get the number of days a year has
"""
if "time" in dataset:
# get the max days from calendar
calendar = dataset["time"].attrs['calendar']
max_days = int(calendar.replace("_day", ""))
return max_days
else:
return len(dataset["doy"])
def get_doys(t_obj, ds, days_around):
"""
Get an array of all doys which are `days_around` days around a time object.
"""
doy = t_obj.dayofyr - 1
num_days = get_num_days(ds)
doys = np.array([i % num_days for i in range(doy - days_around, doy + days_around + 1)])
assert len(doys) == days_around*2 + 1
return doys
def get_doys_around_doy(doy, num_days, days_around):
"""
Get an array of all doys which are `days_around` days around a doy.
"""
doys = np.array([i % num_days for i in range(doy - days_around, doy + days_around + 1)])
assert len(doys) == days_around*2 + 1
return doys
def str_tobj(t_obj):
"""
Pretty pring a time object (cftime)
"""
if type(t_obj) is not cftime._cftime.DatetimeNoLeap:
return str(t_obj)
else:
return '{:04d}-{:02d}-{:02d}'.format(t_obj.year, t_obj.month, t_obj.day)
def get_triangular_weights(doys):
"""
Get an array of weights for triangular weighting.
"""
weights = np.zeros(doys.shape[0]).astype(float)
width = doys.shape[0] // 2 + 1
half_weights = np.linspace(0, 1, width)
if doys.shape[0] % 2 == 0:
weights[:width-1] = half_weights[:-1]
weights[width-1:] = half_weights[::-1][1:]
else:
weights[:width] = half_weights
weights[width:] = half_weights[::-1][1:]
return weights / np.sum(weights)
def np_rolling_mean(data, num_points, mode="reflect", axis=0):
"""
Calculating a rolling mean on a numpy array.
"""
return uniform_filter1d(data, size=num_points, axis=axis, mode=mode)
def p(fn, *args, **kwargs):
"""
Get a callable which - when executed - executes a function with given arguments and keyword-arguments.
This is used in the context of `cache`.
"""
def s():
return fn(*args, **kwargs)
return s
def cache(path, *args, invalidate=False):
"""
Cache the result of a list of callables. The callables are only executed when the provided path does not exist.
"""
data = None
args_keys = ['{:d}'.format(i) for i in range(len(args))]
# load cache
if os.path.isfile(path+'.npz') and not invalidate:
print('Load cache')
data = np.load(path+'.npz', allow_pickle=True)
# execute all callables and save results to numpy
else:
data = {
args_keys[i]: args[i]() for i in range(len(args))
}
np.savez(path+'.npz', **data)
return [data[key] for key in args_keys]
def mean_with_error(x, dx, axis=None):
"""
Calculate an average and propagate the error accordingly.
"""
# calculate mean: f(x) = 1/N * (x1 + x2 + ...)
mean = np.nanmean(x, axis=axis)
num_nonnan = np.count_nonzero(~np.isnan(dx), axis=axis)
# error propagation: df(x) = 1/N * sqrt(dx1**2 + dx2**2 + ...)
dk = np.sqrt(
np.nansum(dx**2, axis=axis)
) / num_nonnan
return mean, dk
def ratio_with_error(x, y, dx, dy):
"""
Calculate a ratio and propagate the errors accordingly.
"""
# f(x, y) = x/y
rel = x/y
# df(x, y) = sqrt( (dx/y)**2 + (dy*x/(y**2))**2 )
d_rel = np.sqrt(
((dx/y)**2) +
((dy*x/(y**2))**2)
)
return rel, d_rel
|
en
| 0.791488
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Author: <NAME> # Year: 2020 Calculate the area of grid cells # TODO: use interp.py Convert a string with a Python-like slice notation to a slice object. Parse a string to a datetime object by checking different formats. Also returns the format. Parse a string to an object given by sample_obj. The constructor must accept common datetime attributes (see code). This is especially useful when working with cftime. Add a timedelta object to a date string. # parse the date string # format it correctly for xarray # bugfix: strftime strips leading zeros # if not time was provided in initial string, just return the date part Get the name of lon and lat corresponding to an dataarray (based on the dimensions of the dataarray). # get correct grid Filter the depth dimension of a data array. Check that a file does not exist yet at an output path and ask the user what to do if it exists. # do same as it would not exist Parse the time:calendar attribute of a dataset and get the number of days a year has # get the max days from calendar Get an array of all doys which are `days_around` days around a time object. Get an array of all doys which are `days_around` days around a doy. Pretty pring a time object (cftime) Get an array of weights for triangular weighting. Calculating a rolling mean on a numpy array. Get a callable which - when executed - executes a function with given arguments and keyword-arguments. This is used in the context of `cache`. Cache the result of a list of callables. The callables are only executed when the provided path does not exist. # load cache # execute all callables and save results to numpy Calculate an average and propagate the error accordingly. # calculate mean: f(x) = 1/N * (x1 + x2 + ...) # error propagation: df(x) = 1/N * sqrt(dx1**2 + dx2**2 + ...) Calculate a ratio and propagate the errors accordingly. # f(x, y) = x/y # df(x, y) = sqrt( (dx/y)**2 + (dy*x/(y**2))**2 )
| 2.718734
| 3
|
tools/generate_taint_models/get_globals.py
|
GreyElaina/pyre-check
| 0
|
6626381
|
<reponame>GreyElaina/pyre-check
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import ast
import glob
import logging
import os
from typing import Callable, Iterable, Optional, Set, Tuple, Union
from typing_extensions import Final
from .model import AssignmentModel, FunctionDefinitionModel, Model
from .model_generator import ModelGenerator, qualifier
from .module_loader import find_all_paths, load_module
LOG: logging.Logger = logging.getLogger(__name__)
FunctionDefinition = Union[ast.FunctionDef, ast.AsyncFunctionDef]
class GlobalModelGenerator(ModelGenerator[Model]):
def __init__(
self,
root: str,
stub_root: Optional[str] = None,
blacklisted_globals: Optional[Set[str]] = None,
blacklisted_global_directories: Optional[Set[str]] = None,
) -> None:
self.root: str = root
self.stub_root: Final[Optional[str]] = stub_root
self.blacklisted_globals: Set[str] = (blacklisted_globals or set())
self.blacklisted_global_directories: Set[str] = (
blacklisted_global_directories or set()
)
def _globals(self, root: str, path: str) -> Iterable[Model]:
globals = set()
# The parent of the property needs to be stored as well, as we only store the
# module qualifier.
cached_properties: Set[Tuple[Optional[str], FunctionDefinition]] = set()
module = load_module(path)
if not module:
return globals
class NameVisitor(ast.NodeVisitor):
def __init__(self, globals: Set) -> None:
self.globals = globals
self.blacklist: Optional[Set[str]] = None
self.parent: Optional[str] = None
def visit_Name(self, name: ast.Name) -> None:
blacklist = self.blacklist
if blacklist is not None and name.id in blacklist:
return
parent = self.parent
if parent is not None:
name_to_register = f"{parent}.__class__.{name.id}"
else:
name_to_register = name.id
self.globals.add(name_to_register)
# Ensure that we stop recursing when we're in a complex assign, such as
# a.b = ... or a[b] = ... .
def visit_Attribute(self, attribute: ast.Attribute) -> None:
return
def visit_Subscript(self, subscript: ast.Subscript) -> None:
return
visitor: NameVisitor = NameVisitor(globals)
def visit_assignment(target: ast.expr, value: ast.expr) -> None:
if value is not None:
# namedtuples get preprocessed out by Pyre, and shouldn't be added
# as globals.
if isinstance(value, ast.Call):
callee = value.func
if (
isinstance(callee, ast.Attribute)
and callee.attr == "namedtuple"
):
return
if isinstance(callee, ast.Name) and callee.id == "namedtuple":
return
# Omit pure aliases of the form `x = alias`.
if isinstance(value, ast.Name) or isinstance(value, ast.Attribute):
return
# x = lambda: _ can safely be avoided, as the models confuse our taint
# analysis.
if isinstance(value, ast.Lambda):
return
visitor.visit(target)
def should_visit_class(class_definition: ast.ClassDef) -> bool:
# Ensure that we don't visit nested classes for now.
if visitor.parent is not None:
return False
# TypedDicts use top-level attribute declarations to declare attributes.
for base in class_definition.bases:
base_name = None
if isinstance(base, ast.Name):
base_name = base.id
if isinstance(base, ast.Attribute):
base_name = base.attr
if base_name == "TypedDict":
return False
def is_dataclass_decorator(expression: ast.expr) -> bool:
if isinstance(expression, ast.Call):
return is_dataclass_decorator(expression.func)
if isinstance(expression, ast.Name):
return expression.id == "dataclass"
if isinstance(expression, ast.Attribute):
base = expression.value
if isinstance(base, ast.Name) and base.id == "dataclasses":
return expression.attr == "dataclass"
return False
for decorator in class_definition.decorator_list:
# Skip visiting dataclasses, as they use class variables to generate
# instance variables. They can have one of the following forms:
# @dataclass(args), @dataclass, or `@dataclasses.dataclass(args)`.
if is_dataclass_decorator(decorator):
return False
return True
def all_attributes(class_definition: ast.ClassDef) -> Set[str]:
attributes = set()
for statement in class_definition.body:
if not isinstance(statement, ast.FunctionDef):
continue
for assignment in statement.body:
if isinstance(assignment, ast.Assign):
for target in assignment.targets:
attribute = _get_self_attribute(target)
if attribute is not None:
attributes.add(attribute)
elif isinstance(assignment, ast.AnnAssign):
attribute = _get_self_attribute(assignment.target)
if attribute is not None:
attributes.add(attribute)
return attributes
def visit_statement(statement: ast.stmt) -> None:
if isinstance(statement, ast.Assign):
# Omit pure aliases of the form `x = alias`.
for target in statement.targets:
visit_assignment(target, statement.value)
elif isinstance(statement, ast.AugAssign):
visitor.visit(statement.target)
# Don't attempt to register statements of the form `x: int`.
elif isinstance(statement, ast.AnnAssign):
value = statement.value
if value is not None:
visit_assignment(statement.target, value)
elif isinstance(statement, ast.FunctionDef) or isinstance(
statement, ast.AsyncFunctionDef
):
for decorator in statement.decorator_list:
if _is_cached_property_decorator(decorator):
cached_properties.add((visitor.parent, statement))
elif isinstance(statement, ast.ClassDef) and should_visit_class(statement):
visitor.parent = statement.name
visitor.blacklist = all_attributes(statement)
for toplevel_statement in statement.body:
visit_statement(toplevel_statement)
visitor.parent = None
visitor.blacklist = None
for statement in module.body:
visit_statement(statement)
module_qualifier = qualifier(root, path)
models = set()
for target in globals:
if target == "__all__":
continue
qualified_target = f"{module_qualifier}.{target}"
if qualified_target in self.blacklisted_globals:
continue
try:
generated = AssignmentModel(
annotation="TaintSink[Global]", target=qualified_target
)
models.add(generated)
except ValueError:
pass
for (parent, function_definition) in cached_properties:
is_class_property = any(
(
_is_class_property_decorator(decorator)
for decorator in function_definition.decorator_list
)
)
if is_class_property:
returns = "TaintSink[Global, Via[cached_class_property]]"
else:
returns = "TaintSink[Global, Via[cached_property]]"
if parent is not None:
function_qualifier = f"{module_qualifier}.{parent}"
else:
function_qualifier = module_qualifier
try:
function_definition_model = FunctionDefinitionModel(
qualifier=function_qualifier,
definition=function_definition,
returns=returns,
)
models.add(function_definition_model)
except ValueError:
pass
return models
def gather_functions_to_model(self) -> Iterable[Callable[..., object]]:
return []
def compute_models(
self, functions_to_model: Iterable[Callable[..., object]]
) -> Iterable[Model]:
sinks: Set[Model] = set()
for path in find_all_paths(self.root):
relative_path = os.path.relpath(path, self.root)
should_skip = any(
(
relative_path.startswith(blacklisted)
for blacklisted in self.blacklisted_global_directories
)
)
if should_skip:
LOG.info("Skipping %s", os.path.relpath(path, self.root))
else:
sinks = sinks.union(self._globals(self.root, path))
stub_root = self.stub_root
if stub_root is not None:
stub_root = os.path.abspath(stub_root)
paths = glob.glob(stub_root + "/**/*.pyi", recursive=True)
for path in paths:
sinks = sinks.union(self._globals(stub_root, path))
return sinks
def _get_self_attribute(target: ast.expr) -> Optional[str]:
if isinstance(target, ast.Attribute):
value = target.value
if isinstance(value, ast.Name) and value.id == "self":
return target.attr
return None
def _is_cached_property_decorator(decorator: ast.expr) -> bool:
if isinstance(decorator, ast.Name):
name = decorator.id
elif isinstance(decorator, ast.Attribute):
name = decorator.attr
else:
name = None
if name is None:
return False
return "cached" in name and "property" in name
def _is_class_property_decorator(decorator: ast.expr) -> bool:
if isinstance(decorator, ast.Name):
name = decorator.id
elif isinstance(decorator, ast.Attribute):
name = decorator.attr
else:
name = None
if name is None:
return False
return "class" in name and "property" in name
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import ast
import glob
import logging
import os
from typing import Callable, Iterable, Optional, Set, Tuple, Union
from typing_extensions import Final
from .model import AssignmentModel, FunctionDefinitionModel, Model
from .model_generator import ModelGenerator, qualifier
from .module_loader import find_all_paths, load_module
LOG: logging.Logger = logging.getLogger(__name__)
FunctionDefinition = Union[ast.FunctionDef, ast.AsyncFunctionDef]
class GlobalModelGenerator(ModelGenerator[Model]):
def __init__(
self,
root: str,
stub_root: Optional[str] = None,
blacklisted_globals: Optional[Set[str]] = None,
blacklisted_global_directories: Optional[Set[str]] = None,
) -> None:
self.root: str = root
self.stub_root: Final[Optional[str]] = stub_root
self.blacklisted_globals: Set[str] = (blacklisted_globals or set())
self.blacklisted_global_directories: Set[str] = (
blacklisted_global_directories or set()
)
def _globals(self, root: str, path: str) -> Iterable[Model]:
globals = set()
# The parent of the property needs to be stored as well, as we only store the
# module qualifier.
cached_properties: Set[Tuple[Optional[str], FunctionDefinition]] = set()
module = load_module(path)
if not module:
return globals
class NameVisitor(ast.NodeVisitor):
def __init__(self, globals: Set) -> None:
self.globals = globals
self.blacklist: Optional[Set[str]] = None
self.parent: Optional[str] = None
def visit_Name(self, name: ast.Name) -> None:
blacklist = self.blacklist
if blacklist is not None and name.id in blacklist:
return
parent = self.parent
if parent is not None:
name_to_register = f"{parent}.__class__.{name.id}"
else:
name_to_register = name.id
self.globals.add(name_to_register)
# Ensure that we stop recursing when we're in a complex assign, such as
# a.b = ... or a[b] = ... .
def visit_Attribute(self, attribute: ast.Attribute) -> None:
return
def visit_Subscript(self, subscript: ast.Subscript) -> None:
return
visitor: NameVisitor = NameVisitor(globals)
def visit_assignment(target: ast.expr, value: ast.expr) -> None:
if value is not None:
# namedtuples get preprocessed out by Pyre, and shouldn't be added
# as globals.
if isinstance(value, ast.Call):
callee = value.func
if (
isinstance(callee, ast.Attribute)
and callee.attr == "namedtuple"
):
return
if isinstance(callee, ast.Name) and callee.id == "namedtuple":
return
# Omit pure aliases of the form `x = alias`.
if isinstance(value, ast.Name) or isinstance(value, ast.Attribute):
return
# x = lambda: _ can safely be avoided, as the models confuse our taint
# analysis.
if isinstance(value, ast.Lambda):
return
visitor.visit(target)
def should_visit_class(class_definition: ast.ClassDef) -> bool:
# Ensure that we don't visit nested classes for now.
if visitor.parent is not None:
return False
# TypedDicts use top-level attribute declarations to declare attributes.
for base in class_definition.bases:
base_name = None
if isinstance(base, ast.Name):
base_name = base.id
if isinstance(base, ast.Attribute):
base_name = base.attr
if base_name == "TypedDict":
return False
def is_dataclass_decorator(expression: ast.expr) -> bool:
if isinstance(expression, ast.Call):
return is_dataclass_decorator(expression.func)
if isinstance(expression, ast.Name):
return expression.id == "dataclass"
if isinstance(expression, ast.Attribute):
base = expression.value
if isinstance(base, ast.Name) and base.id == "dataclasses":
return expression.attr == "dataclass"
return False
for decorator in class_definition.decorator_list:
# Skip visiting dataclasses, as they use class variables to generate
# instance variables. They can have one of the following forms:
# @dataclass(args), @dataclass, or `@dataclasses.dataclass(args)`.
if is_dataclass_decorator(decorator):
return False
return True
def all_attributes(class_definition: ast.ClassDef) -> Set[str]:
attributes = set()
for statement in class_definition.body:
if not isinstance(statement, ast.FunctionDef):
continue
for assignment in statement.body:
if isinstance(assignment, ast.Assign):
for target in assignment.targets:
attribute = _get_self_attribute(target)
if attribute is not None:
attributes.add(attribute)
elif isinstance(assignment, ast.AnnAssign):
attribute = _get_self_attribute(assignment.target)
if attribute is not None:
attributes.add(attribute)
return attributes
def visit_statement(statement: ast.stmt) -> None:
if isinstance(statement, ast.Assign):
# Omit pure aliases of the form `x = alias`.
for target in statement.targets:
visit_assignment(target, statement.value)
elif isinstance(statement, ast.AugAssign):
visitor.visit(statement.target)
# Don't attempt to register statements of the form `x: int`.
elif isinstance(statement, ast.AnnAssign):
value = statement.value
if value is not None:
visit_assignment(statement.target, value)
elif isinstance(statement, ast.FunctionDef) or isinstance(
statement, ast.AsyncFunctionDef
):
for decorator in statement.decorator_list:
if _is_cached_property_decorator(decorator):
cached_properties.add((visitor.parent, statement))
elif isinstance(statement, ast.ClassDef) and should_visit_class(statement):
visitor.parent = statement.name
visitor.blacklist = all_attributes(statement)
for toplevel_statement in statement.body:
visit_statement(toplevel_statement)
visitor.parent = None
visitor.blacklist = None
for statement in module.body:
visit_statement(statement)
module_qualifier = qualifier(root, path)
models = set()
for target in globals:
if target == "__all__":
continue
qualified_target = f"{module_qualifier}.{target}"
if qualified_target in self.blacklisted_globals:
continue
try:
generated = AssignmentModel(
annotation="TaintSink[Global]", target=qualified_target
)
models.add(generated)
except ValueError:
pass
for (parent, function_definition) in cached_properties:
is_class_property = any(
(
_is_class_property_decorator(decorator)
for decorator in function_definition.decorator_list
)
)
if is_class_property:
returns = "TaintSink[Global, Via[cached_class_property]]"
else:
returns = "TaintSink[Global, Via[cached_property]]"
if parent is not None:
function_qualifier = f"{module_qualifier}.{parent}"
else:
function_qualifier = module_qualifier
try:
function_definition_model = FunctionDefinitionModel(
qualifier=function_qualifier,
definition=function_definition,
returns=returns,
)
models.add(function_definition_model)
except ValueError:
pass
return models
def gather_functions_to_model(self) -> Iterable[Callable[..., object]]:
return []
def compute_models(
self, functions_to_model: Iterable[Callable[..., object]]
) -> Iterable[Model]:
sinks: Set[Model] = set()
for path in find_all_paths(self.root):
relative_path = os.path.relpath(path, self.root)
should_skip = any(
(
relative_path.startswith(blacklisted)
for blacklisted in self.blacklisted_global_directories
)
)
if should_skip:
LOG.info("Skipping %s", os.path.relpath(path, self.root))
else:
sinks = sinks.union(self._globals(self.root, path))
stub_root = self.stub_root
if stub_root is not None:
stub_root = os.path.abspath(stub_root)
paths = glob.glob(stub_root + "/**/*.pyi", recursive=True)
for path in paths:
sinks = sinks.union(self._globals(stub_root, path))
return sinks
def _get_self_attribute(target: ast.expr) -> Optional[str]:
if isinstance(target, ast.Attribute):
value = target.value
if isinstance(value, ast.Name) and value.id == "self":
return target.attr
return None
def _is_cached_property_decorator(decorator: ast.expr) -> bool:
if isinstance(decorator, ast.Name):
name = decorator.id
elif isinstance(decorator, ast.Attribute):
name = decorator.attr
else:
name = None
if name is None:
return False
return "cached" in name and "property" in name
def _is_class_property_decorator(decorator: ast.expr) -> bool:
if isinstance(decorator, ast.Name):
name = decorator.id
elif isinstance(decorator, ast.Attribute):
name = decorator.attr
else:
name = None
if name is None:
return False
return "class" in name and "property" in name
|
en
| 0.895691
|
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict # The parent of the property needs to be stored as well, as we only store the # module qualifier. # Ensure that we stop recursing when we're in a complex assign, such as # a.b = ... or a[b] = ... . # namedtuples get preprocessed out by Pyre, and shouldn't be added # as globals. # Omit pure aliases of the form `x = alias`. # x = lambda: _ can safely be avoided, as the models confuse our taint # analysis. # Ensure that we don't visit nested classes for now. # TypedDicts use top-level attribute declarations to declare attributes. # Skip visiting dataclasses, as they use class variables to generate # instance variables. They can have one of the following forms: # @dataclass(args), @dataclass, or `@dataclasses.dataclass(args)`. # Omit pure aliases of the form `x = alias`. # Don't attempt to register statements of the form `x: int`.
| 1.727123
| 2
|
igibson/controllers/multi_finger_gripper_controller.py
|
StanfordVL/InteractiveGibsonEnv
| 51
|
6626382
|
import numpy as np
from igibson.controllers import ControlType, ManipulationController
from igibson.utils.python_utils import assert_valid_key
VALID_MODES = {
"binary",
"ternary",
}
class MultiFingerGripperController(ManipulationController):
"""
Controller class for **discrete** multi finger gripper control. This either interprets an input as a binary
command (open / close), or ternary (open / stay at current position / close). Ternary mode can only be used as a
position controller.
**For continuous gripper control, the JointController should be used instead.**
Each controller step consists of the following:
1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits
2a. Convert command into gripper joint control signals
2b. Clips the resulting control by the motor limits
"""
def __init__(
self,
control_freq,
motor_type,
control_limits,
joint_idx,
command_input_limits="default",
inverted=False,
mode="binary",
limit_tolerance=0.001,
):
"""
:param control_freq: int, controller loop frequency
:param control_limits: Dict[str, Tuple[Array[float], Array[float]]]: The min/max limits to the outputted
control signal. Should specify per-actuator type limits, i.e.:
"position": [[min], [max]]
"velocity": [[min], [max]]
"torque": [[min], [max]]
"has_limit": [...bool...]
Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
:param joint_idx: Array[int], specific joint indices controlled by this robot. Used for inferring
controller-relevant values during control computations
:param command_input_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]],
if set, is the min/max acceptable inputted command. Values outside of this range will be clipped.
If None, no clipping will be used. If "default", range will be set to (-1, 1)
:param inverted: bool, whether or not the command direction (grasp is negative) and the control direction are
inverted, e.g. if True, to grasp you need to apply commands in the positive direction.
:param mode: str, mode for this controller. Valid options are:
"binary": 1D command, if preprocessed value > 0 is interpreted as an max open
(send max pos / vel / tor signal), otherwise send max close control signals
"ternary": 1D command, if preprocessed value > 0.33, is interpreted as max open (send max position) signal.
if -0.33 < value < 0.33, the value is interpreted as "keep still", where position control to current
position is sent. If value < -0.33, maximum close signal is sent.
:param limit_tolerance: float, sets the tolerance from the joint limit ends, below which controls will be zeroed
out if the control is using velocity or torque control
"""
# Store arguments
assert_valid_key(key=motor_type.lower(), valid_keys=ControlType.VALID_TYPES_STR, name="motor_type")
self.motor_type = motor_type.lower()
assert_valid_key(key=mode, valid_keys=VALID_MODES, name="mode for multi finger gripper")
self.inverted = inverted
self.mode = mode
self.limit_tolerance = limit_tolerance
assert not (
self.mode == "ternary" and self.motor_type != "position"
), "MultiFingerGripperController's ternary mode only works with position control."
# Run super init
super().__init__(
control_freq=control_freq,
control_limits=control_limits,
joint_idx=joint_idx,
command_input_limits=command_input_limits,
command_output_limits=(-1.0, 1.0),
inverted=inverted,
)
def reset(self):
# No-op
pass
def _command_to_control(self, command, control_dict):
"""
Converts the (already preprocessed) inputted @command into deployable (non-clipped!) gripper
joint control signal
:param command: Array[float], desired (already preprocessed) command to convert into control signals.
This should always be 2D command for each gripper joint
:param control_dict: Dict[str, Any], dictionary that should include any relevant keyword-mapped
states necessary for controller computation. Must include the following keys:
joint_position: Array of current joint positions
:return: Array[float], outputted (non-clipped!) control signal to deploy
"""
joint_pos = control_dict["joint_position"][self.joint_idx]
# Choose what to do based on control mode
if self.mode == "binary":
u = (
self.control_limits[ControlType.get_type(self.motor_type)][1][self.joint_idx]
if command[0] >= 0.0
else self.control_limits[ControlType.get_type(self.motor_type)][0][self.joint_idx]
)
else: # Ternary mode
if command[0] > 0.33: # Closer to 1
u = self.control_limits[ControlType.get_type(self.motor_type)][1][self.joint_idx]
elif command[0] > -0.33: # Closer to 0
u = joint_pos # This is why ternary mode only works with position control.
else: # Closer to -1
u = self.control_limits[ControlType.get_type(self.motor_type)][0][self.joint_idx]
# If we're near the joint limits and we're using velocity / torque control, we zero out the action
if self.motor_type in {"velocity", "torque"}:
violate_upper_limit = (
joint_pos > self.control_limits[ControlType.POSITION][1][self.joint_idx] - self.limit_tolerance
)
violate_lower_limit = (
joint_pos < self.control_limits[ControlType.POSITION][0][self.joint_idx] + self.limit_tolerance
)
violation = np.logical_or(violate_upper_limit * (u > 0), violate_lower_limit * (u < 0))
u *= ~violation
# Return control
return u
@property
def control_type(self):
return ControlType.get_type(type_str=self.motor_type)
@property
def command_dim(self):
return 1
|
import numpy as np
from igibson.controllers import ControlType, ManipulationController
from igibson.utils.python_utils import assert_valid_key
VALID_MODES = {
"binary",
"ternary",
}
class MultiFingerGripperController(ManipulationController):
"""
Controller class for **discrete** multi finger gripper control. This either interprets an input as a binary
command (open / close), or ternary (open / stay at current position / close). Ternary mode can only be used as a
position controller.
**For continuous gripper control, the JointController should be used instead.**
Each controller step consists of the following:
1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits
2a. Convert command into gripper joint control signals
2b. Clips the resulting control by the motor limits
"""
def __init__(
self,
control_freq,
motor_type,
control_limits,
joint_idx,
command_input_limits="default",
inverted=False,
mode="binary",
limit_tolerance=0.001,
):
"""
:param control_freq: int, controller loop frequency
:param control_limits: Dict[str, Tuple[Array[float], Array[float]]]: The min/max limits to the outputted
control signal. Should specify per-actuator type limits, i.e.:
"position": [[min], [max]]
"velocity": [[min], [max]]
"torque": [[min], [max]]
"has_limit": [...bool...]
Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
:param joint_idx: Array[int], specific joint indices controlled by this robot. Used for inferring
controller-relevant values during control computations
:param command_input_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]],
if set, is the min/max acceptable inputted command. Values outside of this range will be clipped.
If None, no clipping will be used. If "default", range will be set to (-1, 1)
:param inverted: bool, whether or not the command direction (grasp is negative) and the control direction are
inverted, e.g. if True, to grasp you need to apply commands in the positive direction.
:param mode: str, mode for this controller. Valid options are:
"binary": 1D command, if preprocessed value > 0 is interpreted as an max open
(send max pos / vel / tor signal), otherwise send max close control signals
"ternary": 1D command, if preprocessed value > 0.33, is interpreted as max open (send max position) signal.
if -0.33 < value < 0.33, the value is interpreted as "keep still", where position control to current
position is sent. If value < -0.33, maximum close signal is sent.
:param limit_tolerance: float, sets the tolerance from the joint limit ends, below which controls will be zeroed
out if the control is using velocity or torque control
"""
# Store arguments
assert_valid_key(key=motor_type.lower(), valid_keys=ControlType.VALID_TYPES_STR, name="motor_type")
self.motor_type = motor_type.lower()
assert_valid_key(key=mode, valid_keys=VALID_MODES, name="mode for multi finger gripper")
self.inverted = inverted
self.mode = mode
self.limit_tolerance = limit_tolerance
assert not (
self.mode == "ternary" and self.motor_type != "position"
), "MultiFingerGripperController's ternary mode only works with position control."
# Run super init
super().__init__(
control_freq=control_freq,
control_limits=control_limits,
joint_idx=joint_idx,
command_input_limits=command_input_limits,
command_output_limits=(-1.0, 1.0),
inverted=inverted,
)
def reset(self):
# No-op
pass
def _command_to_control(self, command, control_dict):
"""
Converts the (already preprocessed) inputted @command into deployable (non-clipped!) gripper
joint control signal
:param command: Array[float], desired (already preprocessed) command to convert into control signals.
This should always be 2D command for each gripper joint
:param control_dict: Dict[str, Any], dictionary that should include any relevant keyword-mapped
states necessary for controller computation. Must include the following keys:
joint_position: Array of current joint positions
:return: Array[float], outputted (non-clipped!) control signal to deploy
"""
joint_pos = control_dict["joint_position"][self.joint_idx]
# Choose what to do based on control mode
if self.mode == "binary":
u = (
self.control_limits[ControlType.get_type(self.motor_type)][1][self.joint_idx]
if command[0] >= 0.0
else self.control_limits[ControlType.get_type(self.motor_type)][0][self.joint_idx]
)
else: # Ternary mode
if command[0] > 0.33: # Closer to 1
u = self.control_limits[ControlType.get_type(self.motor_type)][1][self.joint_idx]
elif command[0] > -0.33: # Closer to 0
u = joint_pos # This is why ternary mode only works with position control.
else: # Closer to -1
u = self.control_limits[ControlType.get_type(self.motor_type)][0][self.joint_idx]
# If we're near the joint limits and we're using velocity / torque control, we zero out the action
if self.motor_type in {"velocity", "torque"}:
violate_upper_limit = (
joint_pos > self.control_limits[ControlType.POSITION][1][self.joint_idx] - self.limit_tolerance
)
violate_lower_limit = (
joint_pos < self.control_limits[ControlType.POSITION][0][self.joint_idx] + self.limit_tolerance
)
violation = np.logical_or(violate_upper_limit * (u > 0), violate_lower_limit * (u < 0))
u *= ~violation
# Return control
return u
@property
def control_type(self):
return ControlType.get_type(type_str=self.motor_type)
@property
def command_dim(self):
return 1
|
en
| 0.746798
|
Controller class for **discrete** multi finger gripper control. This either interprets an input as a binary command (open / close), or ternary (open / stay at current position / close). Ternary mode can only be used as a position controller. **For continuous gripper control, the JointController should be used instead.** Each controller step consists of the following: 1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits 2a. Convert command into gripper joint control signals 2b. Clips the resulting control by the motor limits :param control_freq: int, controller loop frequency :param control_limits: Dict[str, Tuple[Array[float], Array[float]]]: The min/max limits to the outputted control signal. Should specify per-actuator type limits, i.e.: "position": [[min], [max]] "velocity": [[min], [max]] "torque": [[min], [max]] "has_limit": [...bool...] Values outside of this range will be clipped, if the corresponding joint index in has_limit is True. :param joint_idx: Array[int], specific joint indices controlled by this robot. Used for inferring controller-relevant values during control computations :param command_input_limits: None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]], if set, is the min/max acceptable inputted command. Values outside of this range will be clipped. If None, no clipping will be used. If "default", range will be set to (-1, 1) :param inverted: bool, whether or not the command direction (grasp is negative) and the control direction are inverted, e.g. if True, to grasp you need to apply commands in the positive direction. :param mode: str, mode for this controller. Valid options are: "binary": 1D command, if preprocessed value > 0 is interpreted as an max open (send max pos / vel / tor signal), otherwise send max close control signals "ternary": 1D command, if preprocessed value > 0.33, is interpreted as max open (send max position) signal. if -0.33 < value < 0.33, the value is interpreted as "keep still", where position control to current position is sent. If value < -0.33, maximum close signal is sent. :param limit_tolerance: float, sets the tolerance from the joint limit ends, below which controls will be zeroed out if the control is using velocity or torque control # Store arguments # Run super init # No-op Converts the (already preprocessed) inputted @command into deployable (non-clipped!) gripper joint control signal :param command: Array[float], desired (already preprocessed) command to convert into control signals. This should always be 2D command for each gripper joint :param control_dict: Dict[str, Any], dictionary that should include any relevant keyword-mapped states necessary for controller computation. Must include the following keys: joint_position: Array of current joint positions :return: Array[float], outputted (non-clipped!) control signal to deploy # Choose what to do based on control mode # Ternary mode # Closer to 1 # Closer to 0 # This is why ternary mode only works with position control. # Closer to -1 # If we're near the joint limits and we're using velocity / torque control, we zero out the action # Return control
| 3.058588
| 3
|
scipy/misc/__init__.py
|
Ennosigaeon/scipy
| 353
|
6626383
|
<reponame>Ennosigaeon/scipy<filename>scipy/misc/__init__.py<gh_stars>100-1000
"""
==========================================
Miscellaneous routines (:mod:`scipy.misc`)
==========================================
.. currentmodule:: scipy.misc
Various utilities that don't have another home.
.. autosummary::
:toctree: generated/
ascent - Get example image for processing
central_diff_weights - Weights for an n-point central mth derivative
derivative - Find the nth derivative of a function at a point
face - Get example image for processing
electrocardiogram - Load an example of a 1-D signal.
"""
from . import doccer
from .common import *
__all__ = ['doccer']
from . import common
__all__ += common.__all__
del common
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
|
"""
==========================================
Miscellaneous routines (:mod:`scipy.misc`)
==========================================
.. currentmodule:: scipy.misc
Various utilities that don't have another home.
.. autosummary::
:toctree: generated/
ascent - Get example image for processing
central_diff_weights - Weights for an n-point central mth derivative
derivative - Find the nth derivative of a function at a point
face - Get example image for processing
electrocardiogram - Load an example of a 1-D signal.
"""
from . import doccer
from .common import *
__all__ = ['doccer']
from . import common
__all__ += common.__all__
del common
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
|
en
| 0.729744
|
========================================== Miscellaneous routines (:mod:`scipy.misc`) ========================================== .. currentmodule:: scipy.misc Various utilities that don't have another home. .. autosummary:: :toctree: generated/ ascent - Get example image for processing central_diff_weights - Weights for an n-point central mth derivative derivative - Find the nth derivative of a function at a point face - Get example image for processing electrocardiogram - Load an example of a 1-D signal.
| 2.223475
| 2
|
td3/td3_networks.py
|
AABL-Lab/Standard_ML_Library
| 0
|
6626384
|
"""
This code mainly follows a td3 YouTube tutorial found at:
https://www.youtube.com/watch?v=ZhFO8EWADmY&t=1895s
Channel name: Machine Learning with Phil
Any modifiations are made by the AABL Lab.
"""
import numpy as np
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
class CriticNetwork(nn.Module):
def __init__(self, beta, input_dims, fc1_dims, fc2_dims, n_actions,
name, chkpt_dir="tmp/td3", device=None):
super(CriticNetwork, self).__init__()
self.input_dims = input_dims
self.fc1_dims = fc1_dims
self.fc2_dims = fc2_dims
self.n_actions = n_actions
self.name = name
self.checkpoint_dir = chkpt_dir
self.checkpoint_file = os.path.join(self.checkpoint_dir, name+"_td3")
self.fc1 = nn.Linear(self.input_dims[0] + n_actions, self.fc1_dims)
self.fc2 = nn.Linear(self.fc1_dims, self.fc2_dims)
self.q1 = nn.Linear(self.fc2_dims, 1)
self.optimizer = optim.Adam(self.parameters(), lr=beta)
if not device is None:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.device = device
self.to(self.device)
def forward(self, state, action):
q1_action_value = self.fc1(torch.cat([state, action], dim=1))
q1_action_value = F.relu(q1_action_value)
q1_action_value = self.fc2(q1_action_value)
q1_action_value = F.relu(q1_action_value)
q1 = self.q1(q1_action_value)
return q1
def save_checkpoint(self):
print("saving checkpoint")
torch.save(self.state_dict(), self.checkpoint_file)
def load_checkpoint(self):
print("loading checkpoint")
class ActorNetwork(nn.Module):
def __init__(self, alpha, input_dims, fc1_dims, fc2_dims,
n_actions, name, chkpt_dir="t,p/td3", device=None):
super(ActorNetwork, self).__init__()
self.input_dims = input_dims
self.fc1_dims = fc1_dims
self.fc2_dims = fc2_dims
self.n_actions = n_actions
self.name = name
self.checkpoint_dir = chkpt_dir
self.checkpoint_file = os.path.join(self.checkpoint_dir, name+"td3")
# the * is for tuple unpacking
self.fc1 = nn.Linear(*self.input_dims, self.fc1_dims)
self.fc2 = nn.Linear(self.fc1_dims, self.fc2_dims)
self.mu = nn.Linear(self.fc2_dims, self.n_actions)
self.optimizer = optim.Adam(self.parameters(), lr=alpha)
if not device is None:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.device = device
self.to(self.device)
def forward(self, state):
prob = self.fc1(state)
prob = F.relu(prob)
prob = self.fc2(prob)
prob = F.relu(prob)
mu = torch.tanh(self.mu(prob))
return mu
def save_checkpoint(self):
print("saving checkpoint")
torch.save(self.state_dict(), self.checkpoint_file)
def load_checkpoint(self):
print("loading checkpoint")
|
"""
This code mainly follows a td3 YouTube tutorial found at:
https://www.youtube.com/watch?v=ZhFO8EWADmY&t=1895s
Channel name: Machine Learning with Phil
Any modifiations are made by the AABL Lab.
"""
import numpy as np
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
class CriticNetwork(nn.Module):
def __init__(self, beta, input_dims, fc1_dims, fc2_dims, n_actions,
name, chkpt_dir="tmp/td3", device=None):
super(CriticNetwork, self).__init__()
self.input_dims = input_dims
self.fc1_dims = fc1_dims
self.fc2_dims = fc2_dims
self.n_actions = n_actions
self.name = name
self.checkpoint_dir = chkpt_dir
self.checkpoint_file = os.path.join(self.checkpoint_dir, name+"_td3")
self.fc1 = nn.Linear(self.input_dims[0] + n_actions, self.fc1_dims)
self.fc2 = nn.Linear(self.fc1_dims, self.fc2_dims)
self.q1 = nn.Linear(self.fc2_dims, 1)
self.optimizer = optim.Adam(self.parameters(), lr=beta)
if not device is None:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.device = device
self.to(self.device)
def forward(self, state, action):
q1_action_value = self.fc1(torch.cat([state, action], dim=1))
q1_action_value = F.relu(q1_action_value)
q1_action_value = self.fc2(q1_action_value)
q1_action_value = F.relu(q1_action_value)
q1 = self.q1(q1_action_value)
return q1
def save_checkpoint(self):
print("saving checkpoint")
torch.save(self.state_dict(), self.checkpoint_file)
def load_checkpoint(self):
print("loading checkpoint")
class ActorNetwork(nn.Module):
def __init__(self, alpha, input_dims, fc1_dims, fc2_dims,
n_actions, name, chkpt_dir="t,p/td3", device=None):
super(ActorNetwork, self).__init__()
self.input_dims = input_dims
self.fc1_dims = fc1_dims
self.fc2_dims = fc2_dims
self.n_actions = n_actions
self.name = name
self.checkpoint_dir = chkpt_dir
self.checkpoint_file = os.path.join(self.checkpoint_dir, name+"td3")
# the * is for tuple unpacking
self.fc1 = nn.Linear(*self.input_dims, self.fc1_dims)
self.fc2 = nn.Linear(self.fc1_dims, self.fc2_dims)
self.mu = nn.Linear(self.fc2_dims, self.n_actions)
self.optimizer = optim.Adam(self.parameters(), lr=alpha)
if not device is None:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.device = device
self.to(self.device)
def forward(self, state):
prob = self.fc1(state)
prob = F.relu(prob)
prob = self.fc2(prob)
prob = F.relu(prob)
mu = torch.tanh(self.mu(prob))
return mu
def save_checkpoint(self):
print("saving checkpoint")
torch.save(self.state_dict(), self.checkpoint_file)
def load_checkpoint(self):
print("loading checkpoint")
|
en
| 0.857323
|
This code mainly follows a td3 YouTube tutorial found at: https://www.youtube.com/watch?v=ZhFO8EWADmY&t=1895s Channel name: Machine Learning with Phil Any modifiations are made by the AABL Lab. # the * is for tuple unpacking
| 2.664901
| 3
|
venv/Lib/site-packages/wsproto/connection.py
|
gilbertekalea/booking.com_crawler
| 179
|
6626385
|
<reponame>gilbertekalea/booking.com_crawler
"""
wsproto/connection
~~~~~~~~~~~~~~~~~~
An implementation of a WebSocket connection.
"""
from collections import deque
from enum import Enum
from typing import Deque, Generator, List, Optional
from .events import (
BytesMessage,
CloseConnection,
Event,
Message,
Ping,
Pong,
TextMessage,
)
from .extensions import Extension
from .frame_protocol import CloseReason, FrameProtocol, Opcode, ParseFailed
from .utilities import LocalProtocolError
class ConnectionState(Enum):
"""
RFC 6455, Section 4 - Opening Handshake
"""
#: The opening handshake is in progress.
CONNECTING = 0
#: The opening handshake is complete.
OPEN = 1
#: The remote WebSocket has initiated a connection close.
REMOTE_CLOSING = 2
#: The local WebSocket (i.e. this instance) has initiated a connection close.
LOCAL_CLOSING = 3
#: The closing handshake has completed.
CLOSED = 4
#: The connection was rejected during the opening handshake.
REJECTING = 5
class ConnectionType(Enum):
"""An enumeration of connection types."""
#: This connection will act as client and talk to a remote server
CLIENT = 1
#: This connection will as as server and waits for client connections
SERVER = 2
CLIENT = ConnectionType.CLIENT
SERVER = ConnectionType.SERVER
class Connection:
"""
A low-level WebSocket connection object.
This wraps two other protocol objects, an HTTP/1.1 protocol object used
to do the initial HTTP upgrade handshake and a WebSocket frame protocol
object used to exchange messages and other control frames.
:param conn_type: Whether this object is on the client- or server-side of
a connection. To initialise as a client pass ``CLIENT`` otherwise
pass ``SERVER``.
:type conn_type: ``ConnectionType``
"""
def __init__(
self,
connection_type: ConnectionType,
extensions: Optional[List[Extension]] = None,
trailing_data: bytes = b"",
) -> None:
self.client = connection_type is ConnectionType.CLIENT
self._events: Deque[Event] = deque()
self._proto = FrameProtocol(self.client, extensions or [])
self._state = ConnectionState.OPEN
self.receive_data(trailing_data)
@property
def state(self) -> ConnectionState:
return self._state
def send(self, event: Event) -> bytes:
data = b""
if isinstance(event, Message):
data += self._proto.send_data(event.data, event.message_finished)
elif isinstance(event, Ping):
data += self._proto.ping(event.payload)
elif isinstance(event, Pong):
data += self._proto.pong(event.payload)
elif isinstance(event, CloseConnection):
if self.state not in {ConnectionState.OPEN, ConnectionState.REMOTE_CLOSING}:
raise LocalProtocolError(
"Connection cannot be closed in state %s" % self.state
)
data += self._proto.close(event.code, event.reason)
if self.state == ConnectionState.REMOTE_CLOSING:
self._state = ConnectionState.CLOSED
else:
self._state = ConnectionState.LOCAL_CLOSING
else:
raise LocalProtocolError(f"Event {event} cannot be sent.")
return data
def receive_data(self, data: Optional[bytes]) -> None:
"""
Pass some received data to the connection for handling.
A list of events that the remote peer triggered by sending this data can
be retrieved with :meth:`~wsproto.connection.Connection.events`.
:param data: The data received from the remote peer on the network.
:type data: ``bytes``
"""
if data is None:
# "If _The WebSocket Connection is Closed_ and no Close control
# frame was received by the endpoint (such as could occur if the
# underlying transport connection is lost), _The WebSocket
# Connection Close Code_ is considered to be 1006."
self._events.append(CloseConnection(code=CloseReason.ABNORMAL_CLOSURE))
self._state = ConnectionState.CLOSED
return
if self.state in (ConnectionState.OPEN, ConnectionState.LOCAL_CLOSING):
self._proto.receive_bytes(data)
elif self.state is ConnectionState.CLOSED:
raise LocalProtocolError("Connection already closed.")
else:
pass # pragma: no cover
def events(self) -> Generator[Event, None, None]:
"""
Return a generator that provides any events that have been generated
by protocol activity.
:returns: generator of :class:`Event <wsproto.events.Event>` subclasses
"""
while self._events:
yield self._events.popleft()
try:
for frame in self._proto.received_frames():
if frame.opcode is Opcode.PING:
assert frame.frame_finished and frame.message_finished
assert isinstance(frame.payload, (bytes, bytearray))
yield Ping(payload=frame.payload)
elif frame.opcode is Opcode.PONG:
assert frame.frame_finished and frame.message_finished
assert isinstance(frame.payload, (bytes, bytearray))
yield Pong(payload=frame.payload)
elif frame.opcode is Opcode.CLOSE:
assert isinstance(frame.payload, tuple)
code, reason = frame.payload
if self.state is ConnectionState.LOCAL_CLOSING:
self._state = ConnectionState.CLOSED
else:
self._state = ConnectionState.REMOTE_CLOSING
yield CloseConnection(code=code, reason=reason)
elif frame.opcode is Opcode.TEXT:
assert isinstance(frame.payload, str)
yield TextMessage(
data=frame.payload,
frame_finished=frame.frame_finished,
message_finished=frame.message_finished,
)
elif frame.opcode is Opcode.BINARY:
assert isinstance(frame.payload, (bytes, bytearray))
yield BytesMessage(
data=frame.payload,
frame_finished=frame.frame_finished,
message_finished=frame.message_finished,
)
else:
pass # pragma: no cover
except ParseFailed as exc:
yield CloseConnection(code=exc.code, reason=str(exc))
|
"""
wsproto/connection
~~~~~~~~~~~~~~~~~~
An implementation of a WebSocket connection.
"""
from collections import deque
from enum import Enum
from typing import Deque, Generator, List, Optional
from .events import (
BytesMessage,
CloseConnection,
Event,
Message,
Ping,
Pong,
TextMessage,
)
from .extensions import Extension
from .frame_protocol import CloseReason, FrameProtocol, Opcode, ParseFailed
from .utilities import LocalProtocolError
class ConnectionState(Enum):
"""
RFC 6455, Section 4 - Opening Handshake
"""
#: The opening handshake is in progress.
CONNECTING = 0
#: The opening handshake is complete.
OPEN = 1
#: The remote WebSocket has initiated a connection close.
REMOTE_CLOSING = 2
#: The local WebSocket (i.e. this instance) has initiated a connection close.
LOCAL_CLOSING = 3
#: The closing handshake has completed.
CLOSED = 4
#: The connection was rejected during the opening handshake.
REJECTING = 5
class ConnectionType(Enum):
"""An enumeration of connection types."""
#: This connection will act as client and talk to a remote server
CLIENT = 1
#: This connection will as as server and waits for client connections
SERVER = 2
CLIENT = ConnectionType.CLIENT
SERVER = ConnectionType.SERVER
class Connection:
"""
A low-level WebSocket connection object.
This wraps two other protocol objects, an HTTP/1.1 protocol object used
to do the initial HTTP upgrade handshake and a WebSocket frame protocol
object used to exchange messages and other control frames.
:param conn_type: Whether this object is on the client- or server-side of
a connection. To initialise as a client pass ``CLIENT`` otherwise
pass ``SERVER``.
:type conn_type: ``ConnectionType``
"""
def __init__(
self,
connection_type: ConnectionType,
extensions: Optional[List[Extension]] = None,
trailing_data: bytes = b"",
) -> None:
self.client = connection_type is ConnectionType.CLIENT
self._events: Deque[Event] = deque()
self._proto = FrameProtocol(self.client, extensions or [])
self._state = ConnectionState.OPEN
self.receive_data(trailing_data)
@property
def state(self) -> ConnectionState:
return self._state
def send(self, event: Event) -> bytes:
data = b""
if isinstance(event, Message):
data += self._proto.send_data(event.data, event.message_finished)
elif isinstance(event, Ping):
data += self._proto.ping(event.payload)
elif isinstance(event, Pong):
data += self._proto.pong(event.payload)
elif isinstance(event, CloseConnection):
if self.state not in {ConnectionState.OPEN, ConnectionState.REMOTE_CLOSING}:
raise LocalProtocolError(
"Connection cannot be closed in state %s" % self.state
)
data += self._proto.close(event.code, event.reason)
if self.state == ConnectionState.REMOTE_CLOSING:
self._state = ConnectionState.CLOSED
else:
self._state = ConnectionState.LOCAL_CLOSING
else:
raise LocalProtocolError(f"Event {event} cannot be sent.")
return data
def receive_data(self, data: Optional[bytes]) -> None:
"""
Pass some received data to the connection for handling.
A list of events that the remote peer triggered by sending this data can
be retrieved with :meth:`~wsproto.connection.Connection.events`.
:param data: The data received from the remote peer on the network.
:type data: ``bytes``
"""
if data is None:
# "If _The WebSocket Connection is Closed_ and no Close control
# frame was received by the endpoint (such as could occur if the
# underlying transport connection is lost), _The WebSocket
# Connection Close Code_ is considered to be 1006."
self._events.append(CloseConnection(code=CloseReason.ABNORMAL_CLOSURE))
self._state = ConnectionState.CLOSED
return
if self.state in (ConnectionState.OPEN, ConnectionState.LOCAL_CLOSING):
self._proto.receive_bytes(data)
elif self.state is ConnectionState.CLOSED:
raise LocalProtocolError("Connection already closed.")
else:
pass # pragma: no cover
def events(self) -> Generator[Event, None, None]:
"""
Return a generator that provides any events that have been generated
by protocol activity.
:returns: generator of :class:`Event <wsproto.events.Event>` subclasses
"""
while self._events:
yield self._events.popleft()
try:
for frame in self._proto.received_frames():
if frame.opcode is Opcode.PING:
assert frame.frame_finished and frame.message_finished
assert isinstance(frame.payload, (bytes, bytearray))
yield Ping(payload=frame.payload)
elif frame.opcode is Opcode.PONG:
assert frame.frame_finished and frame.message_finished
assert isinstance(frame.payload, (bytes, bytearray))
yield Pong(payload=frame.payload)
elif frame.opcode is Opcode.CLOSE:
assert isinstance(frame.payload, tuple)
code, reason = frame.payload
if self.state is ConnectionState.LOCAL_CLOSING:
self._state = ConnectionState.CLOSED
else:
self._state = ConnectionState.REMOTE_CLOSING
yield CloseConnection(code=code, reason=reason)
elif frame.opcode is Opcode.TEXT:
assert isinstance(frame.payload, str)
yield TextMessage(
data=frame.payload,
frame_finished=frame.frame_finished,
message_finished=frame.message_finished,
)
elif frame.opcode is Opcode.BINARY:
assert isinstance(frame.payload, (bytes, bytearray))
yield BytesMessage(
data=frame.payload,
frame_finished=frame.frame_finished,
message_finished=frame.message_finished,
)
else:
pass # pragma: no cover
except ParseFailed as exc:
yield CloseConnection(code=exc.code, reason=str(exc))
|
en
| 0.896737
|
wsproto/connection ~~~~~~~~~~~~~~~~~~ An implementation of a WebSocket connection. RFC 6455, Section 4 - Opening Handshake #: The opening handshake is in progress. #: The opening handshake is complete. #: The remote WebSocket has initiated a connection close. #: The local WebSocket (i.e. this instance) has initiated a connection close. #: The closing handshake has completed. #: The connection was rejected during the opening handshake. An enumeration of connection types. #: This connection will act as client and talk to a remote server #: This connection will as as server and waits for client connections A low-level WebSocket connection object. This wraps two other protocol objects, an HTTP/1.1 protocol object used to do the initial HTTP upgrade handshake and a WebSocket frame protocol object used to exchange messages and other control frames. :param conn_type: Whether this object is on the client- or server-side of a connection. To initialise as a client pass ``CLIENT`` otherwise pass ``SERVER``. :type conn_type: ``ConnectionType`` Pass some received data to the connection for handling. A list of events that the remote peer triggered by sending this data can be retrieved with :meth:`~wsproto.connection.Connection.events`. :param data: The data received from the remote peer on the network. :type data: ``bytes`` # "If _The WebSocket Connection is Closed_ and no Close control # frame was received by the endpoint (such as could occur if the # underlying transport connection is lost), _The WebSocket # Connection Close Code_ is considered to be 1006." # pragma: no cover Return a generator that provides any events that have been generated by protocol activity. :returns: generator of :class:`Event <wsproto.events.Event>` subclasses # pragma: no cover
| 2.938201
| 3
|
client/commands/v2/tests/statistics_test.py
|
ekmixon/pyre-check
| 5,975
|
6626386
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
import textwrap
from pathlib import Path
import testslide
from .... import configuration, command_arguments
from ....tests import setup
from ..statistics import (
find_roots,
find_paths_to_parse,
parse_text_to_module,
parse_path_to_module,
collect_statistics,
aggregate_statistics,
AggregatedStatisticsData,
)
class StatisticsTest(testslide.TestCase):
def test_find_roots__filter_path_duplicate(self) -> None:
self.assertCountEqual(
find_roots(
configuration.Configuration(
project_root="/root", dot_pyre_directory=Path("/irrelevant")
),
command_arguments.StatisticsArguments(
filter_paths=["/root/foo.py", "/root/bar.py", "/root/foo.py"]
),
),
[Path("/root/foo.py"), Path("/root/bar.py")],
)
def test_find_roots__filter_path_expand(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
with setup.switch_working_directory(root_path):
self.assertCountEqual(
find_roots(
configuration.Configuration(
project_root="/root", dot_pyre_directory=Path("/irrelevant")
),
command_arguments.StatisticsArguments(
filter_paths=["foo.py", "bar.py"]
),
),
[root_path / "foo.py", root_path / "bar.py"],
)
def test_find_roots__local_root(self) -> None:
self.assertCountEqual(
find_roots(
configuration.Configuration(
project_root="/root",
dot_pyre_directory=Path("/irrelevant"),
relative_local_root="local",
),
command_arguments.StatisticsArguments(filter_paths=[]),
),
[Path("/root/local")],
)
def test_find_roots__current_working_directory(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
with setup.switch_working_directory(root_path):
self.assertCountEqual(
find_roots(
configuration.Configuration(
project_root="/root", dot_pyre_directory=Path("/irrelevant")
),
command_arguments.StatisticsArguments(filter_paths=[]),
),
[root_path],
)
def test_find_paths_to_parse(self) -> None:
pyre_configuration = configuration.Configuration(
project_root="/root", dot_pyre_directory=Path("/irrelevant")
)
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
setup.ensure_files_exist(
root_path,
["s0.py", "a/s1.py", "b/s2.py", "b/c/s3.py", "b/s4.txt", "b/__s5.py"],
)
setup.ensure_directories_exists(root_path, ["b/d"])
self.assertCountEqual(
find_paths_to_parse(
pyre_configuration,
[
root_path / "a/s1.py",
root_path / "b/s2.py",
root_path / "b/s4.txt",
],
),
[
root_path / "a/s1.py",
root_path / "b/s2.py",
],
)
self.assertCountEqual(
find_paths_to_parse(pyre_configuration, [root_path]),
[
root_path / "s0.py",
root_path / "a/s1.py",
root_path / "b/s2.py",
root_path / "b/c/s3.py",
],
)
pyre_configuration = configuration.Configuration(
project_root="/root",
dot_pyre_directory=Path("/irrelevant"),
excludes=[r".*2\.py"],
)
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
setup.ensure_files_exist(
root_path,
["s0.py", "a/s1.py", "b/s2.py", "b/c/s3.py", "b/s4.txt", "b/__s5.py"],
)
setup.ensure_directories_exists(root_path, ["b/d"])
self.assertCountEqual(
find_paths_to_parse(
pyre_configuration,
[
root_path / "a/s1.py",
root_path / "b/s2.py",
root_path / "b/s4.txt",
],
),
[
root_path / "a/s1.py",
],
)
self.assertCountEqual(
find_paths_to_parse(pyre_configuration, [root_path]),
[
root_path / "s0.py",
root_path / "a/s1.py",
root_path / "b/c/s3.py",
],
)
def test_parse_text_to_module(self) -> None:
self.assertIsNotNone(
parse_text_to_module(
textwrap.dedent(
"""
def foo() -> int:
pass
"""
)
)
)
self.assertIsNone(
parse_text_to_module(
textwrap.dedent(
"""
def foo() ->
"""
)
)
)
def test_parse_path_to_module(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
source_path = root_path / "source.py"
source_path.write_text("reveal_type(42)")
self.assertIsNotNone(parse_path_to_module(source_path))
self.assertIsNone(parse_path_to_module(root_path / "nonexistent.py"))
def test_collect_statistics(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
setup.ensure_files_exist(root_path, ["foo.py", "bar.py"])
foo_path = root_path / "foo.py"
bar_path = root_path / "bar.py"
data = collect_statistics([foo_path, bar_path], strict_default=False)
self.assertIn(str(foo_path), data.annotations)
self.assertIn(str(foo_path), data.fixmes)
self.assertIn(str(foo_path), data.ignores)
self.assertIn(str(foo_path), data.strict)
self.assertIn(str(bar_path), data.annotations)
self.assertIn(str(bar_path), data.fixmes)
self.assertIn(str(bar_path), data.ignores)
self.assertIn(str(bar_path), data.strict)
def test_aggregate_statistics__single_file(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
a_path = root_path / "a.py"
a_path.write_text(
textwrap.dedent(
"""
# pyre-unsafe
def foo():
return 1
""".rstrip()
)
)
self.assertEqual(
aggregate_statistics(
collect_statistics([a_path], strict_default=False)
),
AggregatedStatisticsData(
annotations={
"return_count": 1,
"annotated_return_count": 0,
"globals_count": 0,
"annotated_globals_count": 0,
"parameter_count": 0,
"annotated_parameter_count": 0,
"attribute_count": 0,
"annotated_attribute_count": 0,
"partially_annotated_function_count": 0,
"fully_annotated_function_count": 0,
"line_count": 6,
},
fixmes=0,
ignores=0,
strict=0,
unsafe=1,
),
)
def test_aggregate_statistics__multiple_files(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
a_path = root_path / "a.py"
b_path = root_path / "b.py"
a_path.write_text(
textwrap.dedent(
"""
# pyre-unsafe
def foo():
return 1
""".rstrip()
)
)
b_path.write_text(
textwrap.dedent(
"""
# pyre-strict
def foo(x: int) -> int:
return 1
""".rstrip()
)
)
self.assertEqual(
aggregate_statistics(
collect_statistics([a_path, b_path], strict_default=False)
),
AggregatedStatisticsData(
annotations={
"return_count": 2,
"annotated_return_count": 1,
"globals_count": 0,
"annotated_globals_count": 0,
"parameter_count": 1,
"annotated_parameter_count": 1,
"attribute_count": 0,
"annotated_attribute_count": 0,
"partially_annotated_function_count": 0,
"fully_annotated_function_count": 1,
"line_count": 12,
},
fixmes=0,
ignores=0,
strict=1,
unsafe=1,
),
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
import textwrap
from pathlib import Path
import testslide
from .... import configuration, command_arguments
from ....tests import setup
from ..statistics import (
find_roots,
find_paths_to_parse,
parse_text_to_module,
parse_path_to_module,
collect_statistics,
aggregate_statistics,
AggregatedStatisticsData,
)
class StatisticsTest(testslide.TestCase):
def test_find_roots__filter_path_duplicate(self) -> None:
self.assertCountEqual(
find_roots(
configuration.Configuration(
project_root="/root", dot_pyre_directory=Path("/irrelevant")
),
command_arguments.StatisticsArguments(
filter_paths=["/root/foo.py", "/root/bar.py", "/root/foo.py"]
),
),
[Path("/root/foo.py"), Path("/root/bar.py")],
)
def test_find_roots__filter_path_expand(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
with setup.switch_working_directory(root_path):
self.assertCountEqual(
find_roots(
configuration.Configuration(
project_root="/root", dot_pyre_directory=Path("/irrelevant")
),
command_arguments.StatisticsArguments(
filter_paths=["foo.py", "bar.py"]
),
),
[root_path / "foo.py", root_path / "bar.py"],
)
def test_find_roots__local_root(self) -> None:
self.assertCountEqual(
find_roots(
configuration.Configuration(
project_root="/root",
dot_pyre_directory=Path("/irrelevant"),
relative_local_root="local",
),
command_arguments.StatisticsArguments(filter_paths=[]),
),
[Path("/root/local")],
)
def test_find_roots__current_working_directory(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
with setup.switch_working_directory(root_path):
self.assertCountEqual(
find_roots(
configuration.Configuration(
project_root="/root", dot_pyre_directory=Path("/irrelevant")
),
command_arguments.StatisticsArguments(filter_paths=[]),
),
[root_path],
)
def test_find_paths_to_parse(self) -> None:
pyre_configuration = configuration.Configuration(
project_root="/root", dot_pyre_directory=Path("/irrelevant")
)
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
setup.ensure_files_exist(
root_path,
["s0.py", "a/s1.py", "b/s2.py", "b/c/s3.py", "b/s4.txt", "b/__s5.py"],
)
setup.ensure_directories_exists(root_path, ["b/d"])
self.assertCountEqual(
find_paths_to_parse(
pyre_configuration,
[
root_path / "a/s1.py",
root_path / "b/s2.py",
root_path / "b/s4.txt",
],
),
[
root_path / "a/s1.py",
root_path / "b/s2.py",
],
)
self.assertCountEqual(
find_paths_to_parse(pyre_configuration, [root_path]),
[
root_path / "s0.py",
root_path / "a/s1.py",
root_path / "b/s2.py",
root_path / "b/c/s3.py",
],
)
pyre_configuration = configuration.Configuration(
project_root="/root",
dot_pyre_directory=Path("/irrelevant"),
excludes=[r".*2\.py"],
)
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
setup.ensure_files_exist(
root_path,
["s0.py", "a/s1.py", "b/s2.py", "b/c/s3.py", "b/s4.txt", "b/__s5.py"],
)
setup.ensure_directories_exists(root_path, ["b/d"])
self.assertCountEqual(
find_paths_to_parse(
pyre_configuration,
[
root_path / "a/s1.py",
root_path / "b/s2.py",
root_path / "b/s4.txt",
],
),
[
root_path / "a/s1.py",
],
)
self.assertCountEqual(
find_paths_to_parse(pyre_configuration, [root_path]),
[
root_path / "s0.py",
root_path / "a/s1.py",
root_path / "b/c/s3.py",
],
)
def test_parse_text_to_module(self) -> None:
self.assertIsNotNone(
parse_text_to_module(
textwrap.dedent(
"""
def foo() -> int:
pass
"""
)
)
)
self.assertIsNone(
parse_text_to_module(
textwrap.dedent(
"""
def foo() ->
"""
)
)
)
def test_parse_path_to_module(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
source_path = root_path / "source.py"
source_path.write_text("reveal_type(42)")
self.assertIsNotNone(parse_path_to_module(source_path))
self.assertIsNone(parse_path_to_module(root_path / "nonexistent.py"))
def test_collect_statistics(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
setup.ensure_files_exist(root_path, ["foo.py", "bar.py"])
foo_path = root_path / "foo.py"
bar_path = root_path / "bar.py"
data = collect_statistics([foo_path, bar_path], strict_default=False)
self.assertIn(str(foo_path), data.annotations)
self.assertIn(str(foo_path), data.fixmes)
self.assertIn(str(foo_path), data.ignores)
self.assertIn(str(foo_path), data.strict)
self.assertIn(str(bar_path), data.annotations)
self.assertIn(str(bar_path), data.fixmes)
self.assertIn(str(bar_path), data.ignores)
self.assertIn(str(bar_path), data.strict)
def test_aggregate_statistics__single_file(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
a_path = root_path / "a.py"
a_path.write_text(
textwrap.dedent(
"""
# pyre-unsafe
def foo():
return 1
""".rstrip()
)
)
self.assertEqual(
aggregate_statistics(
collect_statistics([a_path], strict_default=False)
),
AggregatedStatisticsData(
annotations={
"return_count": 1,
"annotated_return_count": 0,
"globals_count": 0,
"annotated_globals_count": 0,
"parameter_count": 0,
"annotated_parameter_count": 0,
"attribute_count": 0,
"annotated_attribute_count": 0,
"partially_annotated_function_count": 0,
"fully_annotated_function_count": 0,
"line_count": 6,
},
fixmes=0,
ignores=0,
strict=0,
unsafe=1,
),
)
def test_aggregate_statistics__multiple_files(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
a_path = root_path / "a.py"
b_path = root_path / "b.py"
a_path.write_text(
textwrap.dedent(
"""
# pyre-unsafe
def foo():
return 1
""".rstrip()
)
)
b_path.write_text(
textwrap.dedent(
"""
# pyre-strict
def foo(x: int) -> int:
return 1
""".rstrip()
)
)
self.assertEqual(
aggregate_statistics(
collect_statistics([a_path, b_path], strict_default=False)
),
AggregatedStatisticsData(
annotations={
"return_count": 2,
"annotated_return_count": 1,
"globals_count": 0,
"annotated_globals_count": 0,
"parameter_count": 1,
"annotated_parameter_count": 1,
"attribute_count": 0,
"annotated_attribute_count": 0,
"partially_annotated_function_count": 0,
"fully_annotated_function_count": 1,
"line_count": 12,
},
fixmes=0,
ignores=0,
strict=1,
unsafe=1,
),
)
|
en
| 0.875524
|
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. def foo() -> int: pass def foo() -> # pyre-unsafe def foo(): return 1 # pyre-unsafe def foo(): return 1 # pyre-strict def foo(x: int) -> int: return 1
| 2.27162
| 2
|
zerver/tests/test_middleware.py
|
alexandraciobica/zulip
| 4
|
6626387
|
import time
from typing import List
from bs4 import BeautifulSoup
from django.test import override_settings
from unittest.mock import Mock, patch
from zerver.lib.realm_icon import get_realm_icon_url
from zerver.lib.test_classes import ZulipTestCase
from zerver.middleware import is_slow_query, write_log_line
from zerver.models import get_realm
class SlowQueryTest(ZulipTestCase):
SLOW_QUERY_TIME = 10
log_data = {'extra': '[transport=websocket]',
'time_started': 0,
'bugdown_requests_start': 0,
'bugdown_time_start': 0,
'remote_cache_time_start': 0,
'remote_cache_requests_start': 0}
def test_is_slow_query(self) -> None:
self.assertFalse(is_slow_query(1.1, '/some/random/url'))
self.assertTrue(is_slow_query(2, '/some/random/url'))
self.assertTrue(is_slow_query(5.1, '/activity'))
self.assertFalse(is_slow_query(2, '/activity'))
self.assertFalse(is_slow_query(2, '/json/report/error'))
self.assertFalse(is_slow_query(2, '/api/v1/deployments/report_error'))
self.assertFalse(is_slow_query(2, '/realm_activity/whatever'))
self.assertFalse(is_slow_query(2, '/user_activity/whatever'))
self.assertFalse(is_slow_query(9, '/accounts/webathena_kerberos_login/'))
self.assertTrue(is_slow_query(11, '/accounts/webathena_kerberos_login/'))
@override_settings(SLOW_QUERY_LOGS_STREAM="logs")
@patch('logging.info')
def test_slow_query_log(self, mock_logging_info: Mock) -> None:
self.log_data['time_started'] = time.time() - self.SLOW_QUERY_TIME
write_log_line(self.log_data, path='/socket/open', method='SOCKET',
remote_ip='123.456.789.012', email='unknown', client_name='?')
last_message = self.get_last_message()
self.assertEqual(last_message.sender.email, "<EMAIL>")
self.assertIn("logs", str(last_message.recipient))
self.assertEqual(last_message.topic_name(), "testserver: slow queries")
self.assertRegexpMatches(last_message.content,
r"123\.456\.789\.012 SOCKET 200 10\.\ds .*")
@override_settings(ERROR_BOT=None)
@patch('logging.info')
@patch('zerver.lib.actions.internal_send_message')
def test_slow_query_log_without_error_bot(self, mock_internal_send_message: Mock,
mock_logging_info: Mock) -> None:
self.log_data['time_started'] = time.time() - self.SLOW_QUERY_TIME
write_log_line(self.log_data, path='/socket/open', method='SOCKET',
remote_ip='123.456.789.012', email='unknown', client_name='?')
mock_internal_send_message.assert_not_called()
class OpenGraphTest(ZulipTestCase):
def check_title_and_description(self, path: str, title: str,
in_description: List[str],
not_in_description: List[str],
status_code: int=200) -> None:
response = self.client_get(path)
self.assertEqual(response.status_code, status_code)
decoded = response.content.decode('utf-8')
bs = BeautifulSoup(decoded, features='lxml')
open_graph_title = bs.select_one('meta[property="og:title"]').get('content')
self.assertEqual(open_graph_title, title)
open_graph_description = bs.select_one('meta[property="og:description"]').get('content')
for substring in in_description:
self.assertIn(substring, open_graph_description)
for substring in not_in_description:
self.assertNotIn(substring, open_graph_description)
def test_admonition_and_link(self) -> None:
# disable-message-edit-history starts with an {!admin-only.md!}, and has a link
# in the first paragraph.
self.check_title_and_description(
'/help/disable-message-edit-history',
"Disable message edit history (Zulip Help Center)",
["By default, Zulip displays messages",
"users can view the edit history of a message. | To remove the",
"best to delete the message entirely. "],
["Disable message edit history", "feature is only available", "Related articles",
"Restrict message editing"]
)
def test_double_quotes(self) -> None:
# night-mode has a quoted string "night mode"
self.check_title_and_description(
'/help/night-mode',
"Night mode (Zulip Help Center)",
['By default, Zulip has a white background. ',
'Zulip also provides a "night mode", which is great for working in a dark space.'],
[]
)
def test_settings_tab(self) -> None:
# deactivate-your-account starts with {settings_tab|your-account}
self.check_title_and_description(
'/help/deactivate-your-account',
"Deactivate your account (Zulip Help Center)",
["Any bots that you maintain will be disabled. | Deactivating "],
["Confirm by clicking", " ", "\n"])
def test_tabs(self) -> None:
# logging-out starts with {start_tabs}
self.check_title_and_description(
'/help/logging-out',
"Logging out (Zulip Help Center)",
# Ideally we'd do something better here
["We're here to help! Email us at <EMAIL> with questions, feedback, or " +
"feature requests."],
["Click on the gear"])
def test_index_pages(self) -> None:
self.check_title_and_description(
'/help/',
"Zulip Help Center",
[("Zulip is a group chat app. Its most distinctive characteristic is that "
"conversation within an organization is divided into “streams” and further ")], [])
self.check_title_and_description(
'/api/',
"Zulip API Documentation",
[("Zulip's APIs allow you to integrate other services with Zulip. This "
"guide should help you find the API you need:")], [])
def test_nonexistent_page(self) -> None:
self.check_title_and_description(
'/help/not-a-real-page',
# Probably we should make this "Zulip Help Center"
"No such article. (Zulip Help Center)",
["No such article. | We're here to help!",
"Email us at <EMAIL> with questions, feedback, or feature requests."],
[],
# Test that our open graph logic doesn't throw a 500
404)
def test_login_page_simple_description(self) -> None:
name = 'Zulip Dev'
description = "The Zulip development environment default organization. It's great for testing!"
self.check_title_and_description(
'/login/',
name,
[description],
[])
def test_login_page_markdown_description(self) -> None:
realm = get_realm('zulip')
description = ("Welcome to **Clojurians Zulip** - the place where the Clojure community meets.\n\n"
"Before you signup/login:\n\n"
"* note-1\n"
"* note-2\n"
"* note-3\n\n"
"Enjoy!")
realm.description = description
realm.save(update_fields=['description'])
self.check_title_and_description(
'/login/',
'Zulip Dev',
['Welcome to Clojurians Zulip - the place where the Clojure community meets',
'* note-1 * note-2 * note-3 | Enjoy!'],
[])
def test_login_page_realm_icon(self) -> None:
realm = get_realm('zulip')
realm.icon_source = 'U'
realm.save(update_fields=['icon_source'])
realm_icon = get_realm_icon_url(realm)
response = self.client_get('/login/')
self.assertEqual(response.status_code, 200)
decoded = response.content.decode('utf-8')
bs = BeautifulSoup(decoded, features='lxml')
open_graph_image = bs.select_one('meta[property="og:image"]').get('content')
self.assertEqual(open_graph_image, '%s%s' % (realm.uri, realm_icon))
def test_login_page_realm_icon_absolute_url(self) -> None:
realm = get_realm('zulip')
realm.icon_source = 'U'
realm.save(update_fields=['icon_source'])
icon_url = "https://foo.s3.amazonaws.com/%s/realm/icon.png?version=%s" % (realm.id, 1)
with patch('zerver.lib.realm_icon.upload_backend.get_realm_icon_url', return_value=icon_url):
response = self.client_get('/login/')
self.assertEqual(response.status_code, 200)
decoded = response.content.decode('utf-8')
bs = BeautifulSoup(decoded, features='lxml')
open_graph_image = bs.select_one('meta[property="og:image"]').get('content')
self.assertEqual(open_graph_image, icon_url)
def test_no_realm_api_page_og_url(self) -> None:
response = self.client_get('/api/', subdomain='')
self.assertEqual(response.status_code, 200)
decoded = response.content.decode('utf-8')
bs = BeautifulSoup(decoded, features='lxml')
open_graph_url = bs.select_one('meta[property="og:url"]').get('content')
self.assertTrue(open_graph_url.endswith('/api/'))
|
import time
from typing import List
from bs4 import BeautifulSoup
from django.test import override_settings
from unittest.mock import Mock, patch
from zerver.lib.realm_icon import get_realm_icon_url
from zerver.lib.test_classes import ZulipTestCase
from zerver.middleware import is_slow_query, write_log_line
from zerver.models import get_realm
class SlowQueryTest(ZulipTestCase):
SLOW_QUERY_TIME = 10
log_data = {'extra': '[transport=websocket]',
'time_started': 0,
'bugdown_requests_start': 0,
'bugdown_time_start': 0,
'remote_cache_time_start': 0,
'remote_cache_requests_start': 0}
def test_is_slow_query(self) -> None:
self.assertFalse(is_slow_query(1.1, '/some/random/url'))
self.assertTrue(is_slow_query(2, '/some/random/url'))
self.assertTrue(is_slow_query(5.1, '/activity'))
self.assertFalse(is_slow_query(2, '/activity'))
self.assertFalse(is_slow_query(2, '/json/report/error'))
self.assertFalse(is_slow_query(2, '/api/v1/deployments/report_error'))
self.assertFalse(is_slow_query(2, '/realm_activity/whatever'))
self.assertFalse(is_slow_query(2, '/user_activity/whatever'))
self.assertFalse(is_slow_query(9, '/accounts/webathena_kerberos_login/'))
self.assertTrue(is_slow_query(11, '/accounts/webathena_kerberos_login/'))
@override_settings(SLOW_QUERY_LOGS_STREAM="logs")
@patch('logging.info')
def test_slow_query_log(self, mock_logging_info: Mock) -> None:
self.log_data['time_started'] = time.time() - self.SLOW_QUERY_TIME
write_log_line(self.log_data, path='/socket/open', method='SOCKET',
remote_ip='123.456.789.012', email='unknown', client_name='?')
last_message = self.get_last_message()
self.assertEqual(last_message.sender.email, "<EMAIL>")
self.assertIn("logs", str(last_message.recipient))
self.assertEqual(last_message.topic_name(), "testserver: slow queries")
self.assertRegexpMatches(last_message.content,
r"123\.456\.789\.012 SOCKET 200 10\.\ds .*")
@override_settings(ERROR_BOT=None)
@patch('logging.info')
@patch('zerver.lib.actions.internal_send_message')
def test_slow_query_log_without_error_bot(self, mock_internal_send_message: Mock,
mock_logging_info: Mock) -> None:
self.log_data['time_started'] = time.time() - self.SLOW_QUERY_TIME
write_log_line(self.log_data, path='/socket/open', method='SOCKET',
remote_ip='123.456.789.012', email='unknown', client_name='?')
mock_internal_send_message.assert_not_called()
class OpenGraphTest(ZulipTestCase):
def check_title_and_description(self, path: str, title: str,
in_description: List[str],
not_in_description: List[str],
status_code: int=200) -> None:
response = self.client_get(path)
self.assertEqual(response.status_code, status_code)
decoded = response.content.decode('utf-8')
bs = BeautifulSoup(decoded, features='lxml')
open_graph_title = bs.select_one('meta[property="og:title"]').get('content')
self.assertEqual(open_graph_title, title)
open_graph_description = bs.select_one('meta[property="og:description"]').get('content')
for substring in in_description:
self.assertIn(substring, open_graph_description)
for substring in not_in_description:
self.assertNotIn(substring, open_graph_description)
def test_admonition_and_link(self) -> None:
# disable-message-edit-history starts with an {!admin-only.md!}, and has a link
# in the first paragraph.
self.check_title_and_description(
'/help/disable-message-edit-history',
"Disable message edit history (Zulip Help Center)",
["By default, Zulip displays messages",
"users can view the edit history of a message. | To remove the",
"best to delete the message entirely. "],
["Disable message edit history", "feature is only available", "Related articles",
"Restrict message editing"]
)
def test_double_quotes(self) -> None:
# night-mode has a quoted string "night mode"
self.check_title_and_description(
'/help/night-mode',
"Night mode (Zulip Help Center)",
['By default, Zulip has a white background. ',
'Zulip also provides a "night mode", which is great for working in a dark space.'],
[]
)
def test_settings_tab(self) -> None:
# deactivate-your-account starts with {settings_tab|your-account}
self.check_title_and_description(
'/help/deactivate-your-account',
"Deactivate your account (Zulip Help Center)",
["Any bots that you maintain will be disabled. | Deactivating "],
["Confirm by clicking", " ", "\n"])
def test_tabs(self) -> None:
# logging-out starts with {start_tabs}
self.check_title_and_description(
'/help/logging-out',
"Logging out (Zulip Help Center)",
# Ideally we'd do something better here
["We're here to help! Email us at <EMAIL> with questions, feedback, or " +
"feature requests."],
["Click on the gear"])
def test_index_pages(self) -> None:
self.check_title_and_description(
'/help/',
"Zulip Help Center",
[("Zulip is a group chat app. Its most distinctive characteristic is that "
"conversation within an organization is divided into “streams” and further ")], [])
self.check_title_and_description(
'/api/',
"Zulip API Documentation",
[("Zulip's APIs allow you to integrate other services with Zulip. This "
"guide should help you find the API you need:")], [])
def test_nonexistent_page(self) -> None:
self.check_title_and_description(
'/help/not-a-real-page',
# Probably we should make this "Zulip Help Center"
"No such article. (Zulip Help Center)",
["No such article. | We're here to help!",
"Email us at <EMAIL> with questions, feedback, or feature requests."],
[],
# Test that our open graph logic doesn't throw a 500
404)
def test_login_page_simple_description(self) -> None:
name = 'Zulip Dev'
description = "The Zulip development environment default organization. It's great for testing!"
self.check_title_and_description(
'/login/',
name,
[description],
[])
def test_login_page_markdown_description(self) -> None:
realm = get_realm('zulip')
description = ("Welcome to **Clojurians Zulip** - the place where the Clojure community meets.\n\n"
"Before you signup/login:\n\n"
"* note-1\n"
"* note-2\n"
"* note-3\n\n"
"Enjoy!")
realm.description = description
realm.save(update_fields=['description'])
self.check_title_and_description(
'/login/',
'Zulip Dev',
['Welcome to Clojurians Zulip - the place where the Clojure community meets',
'* note-1 * note-2 * note-3 | Enjoy!'],
[])
def test_login_page_realm_icon(self) -> None:
realm = get_realm('zulip')
realm.icon_source = 'U'
realm.save(update_fields=['icon_source'])
realm_icon = get_realm_icon_url(realm)
response = self.client_get('/login/')
self.assertEqual(response.status_code, 200)
decoded = response.content.decode('utf-8')
bs = BeautifulSoup(decoded, features='lxml')
open_graph_image = bs.select_one('meta[property="og:image"]').get('content')
self.assertEqual(open_graph_image, '%s%s' % (realm.uri, realm_icon))
def test_login_page_realm_icon_absolute_url(self) -> None:
realm = get_realm('zulip')
realm.icon_source = 'U'
realm.save(update_fields=['icon_source'])
icon_url = "https://foo.s3.amazonaws.com/%s/realm/icon.png?version=%s" % (realm.id, 1)
with patch('zerver.lib.realm_icon.upload_backend.get_realm_icon_url', return_value=icon_url):
response = self.client_get('/login/')
self.assertEqual(response.status_code, 200)
decoded = response.content.decode('utf-8')
bs = BeautifulSoup(decoded, features='lxml')
open_graph_image = bs.select_one('meta[property="og:image"]').get('content')
self.assertEqual(open_graph_image, icon_url)
def test_no_realm_api_page_og_url(self) -> None:
response = self.client_get('/api/', subdomain='')
self.assertEqual(response.status_code, 200)
decoded = response.content.decode('utf-8')
bs = BeautifulSoup(decoded, features='lxml')
open_graph_url = bs.select_one('meta[property="og:url"]').get('content')
self.assertTrue(open_graph_url.endswith('/api/'))
|
en
| 0.861931
|
# disable-message-edit-history starts with an {!admin-only.md!}, and has a link # in the first paragraph. # night-mode has a quoted string "night mode" # deactivate-your-account starts with {settings_tab|your-account} # logging-out starts with {start_tabs} # Ideally we'd do something better here # Probably we should make this "Zulip Help Center" # Test that our open graph logic doesn't throw a 500
| 2.039517
| 2
|
2-1.TextCNN/train_eval.py
|
techthiyanes/nlp-notebook
| 136
|
6626388
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import optuna
from sklearn import metrics
from optuna.trial import TrialState
from model import TextCNN
from load_data import train_iter, val_iter, id2vocab
EPOCHS = 10
CLS = 2
device = "cuda" if torch.cuda.is_available() else 'cpu'
def objective(trial):
model = TextCNN(trial, len(id2vocab), CLS)
model.to(device)
optimizer_name = trial.suggest_categorical("optimizer", ["Adam", "RMSprop", "SGD"])
lr = trial.suggest_float("lr", 1e-5, 1e-1, log=True)
optimizer = getattr(optim, optimizer_name)(model.parameters(), lr=lr)
criterion = nn.NLLLoss()
for epoch in range(EPOCHS):
model.train()
epoch_loss= []
for batch in train_iter:
text_idx_batch, label_idx_batch = batch.text.t_().to(device), batch.label.to(device)
model.zero_grad()
out = model(text_idx_batch)
loss = criterion(out, label_idx_batch)
loss.backward()
epoch_loss.append(loss.item())
optimizer.step()
#print(f'Epoch[{epoch}] - Loss:{sum(epoch_loss)/len(epoch_loss)}')
model.eval()
predict_all = np.array([], dtype=int)
labels_all = np.array([], dtype=int)
with torch.no_grad():
for batch in val_iter:
text_idx_batch, label_idx_batch = batch.text.t_().to(device), batch.label
pred = model(text_idx_batch)
pred = torch.max(pred.data, 1)[1].cpu().numpy()
predict_all = np.append(predict_all, pred)
truth = label_idx_batch.cpu().numpy()
labels_all = np.append(labels_all, truth)
acc = metrics.accuracy_score(labels_all, predict_all)
trial.report(acc, epoch)
if trial.should_prune():
raise optuna.exceptions.TrialPruned()
return acc
if __name__ == '__main__':
study = optuna.create_study(direction="maximize")
study.optimize(objective, n_trials=8)
pruned_trials = study.get_trials(deepcopy=False, states=[TrialState.PRUNED])
complete_trials = study.get_trials(deepcopy=False, states=[TrialState.COMPLETE])
print("Study statistics: ")
print(" Number of finished trials: ", len(study.trials))
print(" Number of pruned trials: ", len(pruned_trials))
print(" Number of complete trials: ", len(complete_trials))
print("Best trial:")
trial = study.best_trial
print(" Value: ", trial.value)
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
|
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import optuna
from sklearn import metrics
from optuna.trial import TrialState
from model import TextCNN
from load_data import train_iter, val_iter, id2vocab
EPOCHS = 10
CLS = 2
device = "cuda" if torch.cuda.is_available() else 'cpu'
def objective(trial):
model = TextCNN(trial, len(id2vocab), CLS)
model.to(device)
optimizer_name = trial.suggest_categorical("optimizer", ["Adam", "RMSprop", "SGD"])
lr = trial.suggest_float("lr", 1e-5, 1e-1, log=True)
optimizer = getattr(optim, optimizer_name)(model.parameters(), lr=lr)
criterion = nn.NLLLoss()
for epoch in range(EPOCHS):
model.train()
epoch_loss= []
for batch in train_iter:
text_idx_batch, label_idx_batch = batch.text.t_().to(device), batch.label.to(device)
model.zero_grad()
out = model(text_idx_batch)
loss = criterion(out, label_idx_batch)
loss.backward()
epoch_loss.append(loss.item())
optimizer.step()
#print(f'Epoch[{epoch}] - Loss:{sum(epoch_loss)/len(epoch_loss)}')
model.eval()
predict_all = np.array([], dtype=int)
labels_all = np.array([], dtype=int)
with torch.no_grad():
for batch in val_iter:
text_idx_batch, label_idx_batch = batch.text.t_().to(device), batch.label
pred = model(text_idx_batch)
pred = torch.max(pred.data, 1)[1].cpu().numpy()
predict_all = np.append(predict_all, pred)
truth = label_idx_batch.cpu().numpy()
labels_all = np.append(labels_all, truth)
acc = metrics.accuracy_score(labels_all, predict_all)
trial.report(acc, epoch)
if trial.should_prune():
raise optuna.exceptions.TrialPruned()
return acc
if __name__ == '__main__':
study = optuna.create_study(direction="maximize")
study.optimize(objective, n_trials=8)
pruned_trials = study.get_trials(deepcopy=False, states=[TrialState.PRUNED])
complete_trials = study.get_trials(deepcopy=False, states=[TrialState.COMPLETE])
print("Study statistics: ")
print(" Number of finished trials: ", len(study.trials))
print(" Number of pruned trials: ", len(pruned_trials))
print(" Number of complete trials: ", len(complete_trials))
print("Best trial:")
trial = study.best_trial
print(" Value: ", trial.value)
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
|
en
| 0.35356
|
# -*- coding: utf-8 -*- #print(f'Epoch[{epoch}] - Loss:{sum(epoch_loss)/len(epoch_loss)}')
| 2.527859
| 3
|
MAF.py
|
Fan-Han/Population-analysis-with-pooled-data
| 1
|
6626389
|
#! /usr/bin/python3
# Calculate the allele frequency for particular populations
import sys
import argparse
parser = argparse.ArgumentParser(description = "Calculate the minor allele frequency for particular group of samples")
parser.add_argument("--pop", type = str, help = "Population list that the MAF will calculated in. This flag can be specified multiple times", required = True, action = "append")
parser.add_argument("--AD", type = str, help = "File of allelic depth at each site of each pool", required = True)
args = vars(parser.parse_args())
AD = args["AD"]
pop = args["pop"]
# read all samples into a list
def read_sample(my_list):
out_list = []
for file in my_list:
with open(file) as f:
for l in f:
l = l.strip().split("\t")
out_list.append(l[0])
return out_list
samples = read_sample(pop)
# header
print("CHR","POS","MAF", sep="\t")
# Go through AD file
with open(AD) as ad:
header = ad.readline()
header = header.strip().split("\t")
sample_index = []
for i in range(len(header)):
if header[i] in samples:
sample_index.append(i)
# check if all samples are in the AD file
#print(len(sample_index))
if len(sample_index) != len(samples):
sys.exit("Not all samples are in the AD file!")
# read allelic depth
for l in ad:
l = l.strip().split("\t")
CHR = l[0]
POS = l[1]
ref = 0
alt = 0
for x in range(len(l)):
if x in sample_index and l[x] != ",":
x = [int(n) for n in l[x].split(",")]
ref += x[0]
alt += x[1]
# MAF
if ref + alt != 0:
MAF = round(min(ref, alt)/(ref + alt),3)
if MAF > 0:
print(CHR, POS, MAF, sep="\t")
|
#! /usr/bin/python3
# Calculate the allele frequency for particular populations
import sys
import argparse
parser = argparse.ArgumentParser(description = "Calculate the minor allele frequency for particular group of samples")
parser.add_argument("--pop", type = str, help = "Population list that the MAF will calculated in. This flag can be specified multiple times", required = True, action = "append")
parser.add_argument("--AD", type = str, help = "File of allelic depth at each site of each pool", required = True)
args = vars(parser.parse_args())
AD = args["AD"]
pop = args["pop"]
# read all samples into a list
def read_sample(my_list):
out_list = []
for file in my_list:
with open(file) as f:
for l in f:
l = l.strip().split("\t")
out_list.append(l[0])
return out_list
samples = read_sample(pop)
# header
print("CHR","POS","MAF", sep="\t")
# Go through AD file
with open(AD) as ad:
header = ad.readline()
header = header.strip().split("\t")
sample_index = []
for i in range(len(header)):
if header[i] in samples:
sample_index.append(i)
# check if all samples are in the AD file
#print(len(sample_index))
if len(sample_index) != len(samples):
sys.exit("Not all samples are in the AD file!")
# read allelic depth
for l in ad:
l = l.strip().split("\t")
CHR = l[0]
POS = l[1]
ref = 0
alt = 0
for x in range(len(l)):
if x in sample_index and l[x] != ",":
x = [int(n) for n in l[x].split(",")]
ref += x[0]
alt += x[1]
# MAF
if ref + alt != 0:
MAF = round(min(ref, alt)/(ref + alt),3)
if MAF > 0:
print(CHR, POS, MAF, sep="\t")
|
en
| 0.717474
|
#! /usr/bin/python3 # Calculate the allele frequency for particular populations # read all samples into a list # header # Go through AD file # check if all samples are in the AD file #print(len(sample_index)) # read allelic depth # MAF
| 3.15875
| 3
|
models/m2_transformer/encoders.py
|
lmfethan/RSTNet
| 0
|
6626390
|
<gh_stars>0
from torch.nn import functional as F
from models.m2_transformer.utils import PositionWiseFeedForward
import torch
from torch import nn
from models.m2_transformer.attention import MultiHeadAttention
class EncoderLayer(nn.Module):
def __init__(self, d_model=512, d_k=64, d_v=64, h=8, d_ff=2048, dropout=.1, identity_map_reordering=False,
attention_module=None, attention_module_kwargs=None):
super(EncoderLayer, self).__init__()
self.identity_map_reordering = identity_map_reordering
self.mhatt = MultiHeadAttention(d_model, d_k, d_v, h, dropout, identity_map_reordering=identity_map_reordering,
attention_module=attention_module,
attention_module_kwargs=attention_module_kwargs)
self.pwff = PositionWiseFeedForward(d_model, d_ff, dropout, identity_map_reordering=identity_map_reordering)
def forward(self, queries, keys, values, attention_mask=None, attention_weights=None):
att = self.mhatt(queries, keys, values, attention_mask, attention_weights)
ff = self.pwff(att)
return ff
class MultiLevelEncoder(nn.Module):
def __init__(self, N, padding_idx, d_model=512, d_k=64, d_v=64, h=8, d_ff=2048, dropout=.1,
identity_map_reordering=False, attention_module=None, attention_module_kwargs=None):
super(MultiLevelEncoder, self).__init__()
self.d_model = d_model
self.dropout = dropout
self.layers = nn.ModuleList([EncoderLayer(d_model, d_k, d_v, h, d_ff, dropout,
identity_map_reordering=identity_map_reordering,
attention_module=attention_module,
attention_module_kwargs=attention_module_kwargs)
for _ in range(N)])
self.padding_idx = padding_idx
def forward(self, input, attention_weights=None):
# input (b_s, seq_len, d_in)
attention_mask = (torch.sum(input, -1) == self.padding_idx).unsqueeze(1).unsqueeze(1) # (b_s, 1, 1, seq_len)
outs = []
out = input
for l in self.layers:
out = l(out, out, out, attention_mask, attention_weights)
outs.append(out.unsqueeze(1))
outs = torch.cat(outs, 1)
return outs, attention_mask
class MemoryAugmentedEncoder(MultiLevelEncoder):
def __init__(self, N, padding_idx, d_in=2048, **kwargs):
super(MemoryAugmentedEncoder, self).__init__(N, padding_idx, **kwargs)
self.fc = nn.Linear(d_in, self.d_model)
self.dropout = nn.Dropout(p=self.dropout)
self.layer_norm = nn.LayerNorm(self.d_model)
def forward(self, input, attention_weights=None):
out = F.relu(self.fc(input))
out = self.dropout(out)
out = self.layer_norm(out)
return super(MemoryAugmentedEncoder, self).forward(out, attention_weights=attention_weights)
|
from torch.nn import functional as F
from models.m2_transformer.utils import PositionWiseFeedForward
import torch
from torch import nn
from models.m2_transformer.attention import MultiHeadAttention
class EncoderLayer(nn.Module):
def __init__(self, d_model=512, d_k=64, d_v=64, h=8, d_ff=2048, dropout=.1, identity_map_reordering=False,
attention_module=None, attention_module_kwargs=None):
super(EncoderLayer, self).__init__()
self.identity_map_reordering = identity_map_reordering
self.mhatt = MultiHeadAttention(d_model, d_k, d_v, h, dropout, identity_map_reordering=identity_map_reordering,
attention_module=attention_module,
attention_module_kwargs=attention_module_kwargs)
self.pwff = PositionWiseFeedForward(d_model, d_ff, dropout, identity_map_reordering=identity_map_reordering)
def forward(self, queries, keys, values, attention_mask=None, attention_weights=None):
att = self.mhatt(queries, keys, values, attention_mask, attention_weights)
ff = self.pwff(att)
return ff
class MultiLevelEncoder(nn.Module):
def __init__(self, N, padding_idx, d_model=512, d_k=64, d_v=64, h=8, d_ff=2048, dropout=.1,
identity_map_reordering=False, attention_module=None, attention_module_kwargs=None):
super(MultiLevelEncoder, self).__init__()
self.d_model = d_model
self.dropout = dropout
self.layers = nn.ModuleList([EncoderLayer(d_model, d_k, d_v, h, d_ff, dropout,
identity_map_reordering=identity_map_reordering,
attention_module=attention_module,
attention_module_kwargs=attention_module_kwargs)
for _ in range(N)])
self.padding_idx = padding_idx
def forward(self, input, attention_weights=None):
# input (b_s, seq_len, d_in)
attention_mask = (torch.sum(input, -1) == self.padding_idx).unsqueeze(1).unsqueeze(1) # (b_s, 1, 1, seq_len)
outs = []
out = input
for l in self.layers:
out = l(out, out, out, attention_mask, attention_weights)
outs.append(out.unsqueeze(1))
outs = torch.cat(outs, 1)
return outs, attention_mask
class MemoryAugmentedEncoder(MultiLevelEncoder):
def __init__(self, N, padding_idx, d_in=2048, **kwargs):
super(MemoryAugmentedEncoder, self).__init__(N, padding_idx, **kwargs)
self.fc = nn.Linear(d_in, self.d_model)
self.dropout = nn.Dropout(p=self.dropout)
self.layer_norm = nn.LayerNorm(self.d_model)
def forward(self, input, attention_weights=None):
out = F.relu(self.fc(input))
out = self.dropout(out)
out = self.layer_norm(out)
return super(MemoryAugmentedEncoder, self).forward(out, attention_weights=attention_weights)
|
en
| 0.163706
|
# input (b_s, seq_len, d_in) # (b_s, 1, 1, seq_len)
| 2.356951
| 2
|
tourboxneo/menu.py
|
AlexandraAlter/tourboxneo
| 0
|
6626391
|
import logging
import toml
import time
import tkinter as tk
from tkinter import ttk
from threading import *
logger = logging.getLogger(__name__)
FONT = ('Verdana', 12)
NORM_FONT = ('Helvetica', 10)
SMALL_FONT = ('Helvetica', 8)
gui_thread = None
def start_tk(tk):
tk.mainloop()
class Menu(tk.Tk):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# basic setup
self.overrideredirect(True)
self.wm_title('TourBoxNeo Menu')
# callbacks
self.bind('<FocusIn>', self.focus_in)
self.bind('<FocusOut>', self.focus_out)
close = ttk.Button(self, text='x', command=self.destroy)
close.pack(side='right')
label = ttk.Label(self, text='foo', font=FONT)
label.pack(side='top', fill='x', pady=10)
# position
abs_x = self.winfo_pointerx() - self.winfo_rootx()
abs_y = self.winfo_pointery() - self.winfo_rooty()
self.geometry('+{0}+{1}'.format(abs_x, abs_y))
self.update_idletasks()
def focus_in(self, event):
print('focus in')
def focus_out(self, event):
print('focus out')
self.destroy()
|
import logging
import toml
import time
import tkinter as tk
from tkinter import ttk
from threading import *
logger = logging.getLogger(__name__)
FONT = ('Verdana', 12)
NORM_FONT = ('Helvetica', 10)
SMALL_FONT = ('Helvetica', 8)
gui_thread = None
def start_tk(tk):
tk.mainloop()
class Menu(tk.Tk):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# basic setup
self.overrideredirect(True)
self.wm_title('TourBoxNeo Menu')
# callbacks
self.bind('<FocusIn>', self.focus_in)
self.bind('<FocusOut>', self.focus_out)
close = ttk.Button(self, text='x', command=self.destroy)
close.pack(side='right')
label = ttk.Label(self, text='foo', font=FONT)
label.pack(side='top', fill='x', pady=10)
# position
abs_x = self.winfo_pointerx() - self.winfo_rootx()
abs_y = self.winfo_pointery() - self.winfo_rooty()
self.geometry('+{0}+{1}'.format(abs_x, abs_y))
self.update_idletasks()
def focus_in(self, event):
print('focus in')
def focus_out(self, event):
print('focus out')
self.destroy()
|
en
| 0.585941
|
# basic setup # callbacks # position
| 2.761093
| 3
|
misago/project_template/project_name/settings.py
|
HenryChenV/iJiangNan
| 1
|
6626392
|
<gh_stars>1-10
"""
Django settings for {{ project_name }} project.
Generated by 'django-admin startproject' using Django {{ django_version }}.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Define placeholder gettext function
# This function will mark strings in settings visible to makemessages
# without need for Django's i18n features be initialized first.
_ = lambda x: x
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '{{ secret_key }}'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# A list of strings representing the host/domain names that this Django site can serve.
# If you are unsure, just enter here your domain name, eg. ['mysite.com', 'www.mysite.com']
ALLOWED_HOSTS = []
# Database
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#databases
DATABASES = {
'default': {
# Misago requires PostgreSQL to run
'ENGINE': 'django.db.backends.postgresql',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': 5432,
}
}
# Caching
# https://docs.djangoproject.com/en/{{ docs_version }}/topics/cache/#setting-up-the-cache
CACHES = {
'default': {
# Misago doesn't run well with LocMemCache in production environments
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# Password validation
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
'OPTIONS': {
'user_attributes': ['username', 'email'],
}
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 7,
}
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/{{ docs_version }}/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
STATIC_URL = '/static/'
# User uploads (Avatars, Attachments, files uploaded in other Django apps, ect.)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
MEDIA_URL = '/media/'
# The absolute path to the directory where collectstatic will collect static files for deployment.
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#static-root
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Absolute filesystem path to the directory that will hold user-uploaded files.
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#media-root
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# This setting defines the additional locations the staticfiles app will traverse if the FileSystemFinder finder
# is enabled, e.g. if you use the collectstatic or findstatic management command or use the static file serving view.
# https://docs.djangoproject.com/en/1.10/ref/settings/#staticfiles-dirs
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'theme', 'static'),
]
# Email configuration
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#email-backend
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
# If either of these settings is empty, Django won't attempt authentication.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
# Default email address to use for various automated correspondence from the site manager(s).
DEFAULT_FROM_EMAIL = 'Forums <%s>' % EMAIL_HOST_USER
# Application definition
AUTH_USER_MODEL = 'misago_users.User'
AUTHENTICATION_BACKENDS = [
'misago.users.authbackends.MisagoBackend',
]
CSRF_FAILURE_VIEW = 'misago.core.errorpages.csrf_failure'
INSTALLED_APPS = [
# Misago overrides for Django core feature
'misago',
'misago.users',
# Django apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.postgres',
'django.contrib.humanize',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd party apps used by Misago
'debug_toolbar',
'crispy_forms',
'mptt',
'rest_framework',
# Misago apps
'misago.admin',
'misago.acl',
'misago.core',
'misago.conf',
'misago.markup',
'misago.legal',
'misago.categories',
'misago.threads',
'misago.readtracker',
'misago.search',
'misago.faker',
]
INTERNAL_IPS = [
'127.0.0.1'
]
LOGIN_REDIRECT_URL = 'misago:index'
LOGIN_URL = 'misago:login'
LOGOUT_URL = 'misago:logout'
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'misago.users.middleware.RealIPMiddleware',
'misago.core.middleware.frontendcontext.FrontendContextMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'misago.users.middleware.UserMiddleware',
'misago.core.middleware.exceptionhandler.ExceptionHandlerMiddleware',
'misago.users.middleware.OnlineTrackerMiddleware',
'misago.admin.middleware.AdminAuthMiddleware',
'misago.threads.middleware.UnreadThreadsCountMiddleware',
'misago.core.middleware.threadstore.ThreadStoreMiddleware',
]
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'theme', 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'misago.core.context_processors.site_address',
'misago.core.context_processors.momentjs_locale',
'misago.conf.context_processors.settings',
'misago.users.context_processors.user_links',
'misago.legal.context_processors.legal_links',
# Data preloaders
'misago.conf.context_processors.preload_settings_json',
'misago.core.context_processors.current_link',
'misago.markup.context_processors.preload_api_url',
'misago.threads.context_processors.preload_threads_urls',
'misago.users.context_processors.preload_user_json',
# Note: keep frontend_context processor last for previous processors
# to be able to expose data UI app via request.frontend_context
'misago.core.context_processors.frontend_context',
],
},
},
]
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Django Crispy Forms
#http://django-crispy-forms.readthedocs.io/en/latest/install.html
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# Django Debug Toolbar
# http://django-debug-toolbar.readthedocs.io/en/stable/configuration.html
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'misago.acl.panels.MisagoACLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
]
# Django Rest Framework
# http://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'misago.core.rest_permissions.IsAuthenticatedOrReadOnly',
],
'DEFAULT_RENDERER_CLASSES': [
'rest_framework.renderers.JSONRenderer',
],
'EXCEPTION_HANDLER': 'misago.core.exceptionhandler.handle_api_exception',
'UNAUTHENTICATED_USER': 'misago.users.models.AnonymousUser',
'URL_FORMAT_OVERRIDE': None,
}
# Misago specific settings
# https://misago.readthedocs.io/en/latest/developers/settings.html
# PostgreSQL text search configuration to use in searches
# Defaults to "simple", for list of installed configurations run "\dF" in "psql"
# Standard configs as of PostgreSQL 9.5 are: dutch, english, finnish, french,
# german, hungarian, italian, norwegian, portuguese, romanian, russian, simple,
# spanish, swedish and turkish
# Example on adding custom language can be found here: https://github.com/lemonskyjwt/plpstgrssearch
MISAGO_SEARCH_CONFIG = 'simple'
# Path to directory containing avatar galleries
# Those galleries can be loaded by running loadavatargallery command
MISAGO_AVATAR_GALLERY = os.path.join(BASE_DIR, 'avatargallery')
# Profile fields
MISAGO_PROFILE_FIELDS = [
{
'name': _("Personal"),
'fields': [
'misago.users.profilefields.default.FullNameField',
'misago.users.profilefields.default.GenderField',
'misago.users.profilefields.default.BioField',
'misago.users.profilefields.default.LocationField',
],
},
{
'name': _("Contact"),
'fields': [
'misago.users.profilefields.default.TwitterHandleField',
'misago.users.profilefields.default.SkypeIdField',
'misago.users.profilefields.default.WebsiteField',
],
},
{
'name': _("IP address"),
'fields': [
'misago.users.profilefields.default.JoinIpField',
'misago.users.profilefields.default.LastIpField',
],
},
]
|
"""
Django settings for {{ project_name }} project.
Generated by 'django-admin startproject' using Django {{ django_version }}.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Define placeholder gettext function
# This function will mark strings in settings visible to makemessages
# without need for Django's i18n features be initialized first.
_ = lambda x: x
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '{{ secret_key }}'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# A list of strings representing the host/domain names that this Django site can serve.
# If you are unsure, just enter here your domain name, eg. ['mysite.com', 'www.mysite.com']
ALLOWED_HOSTS = []
# Database
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#databases
DATABASES = {
'default': {
# Misago requires PostgreSQL to run
'ENGINE': 'django.db.backends.postgresql',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': 5432,
}
}
# Caching
# https://docs.djangoproject.com/en/{{ docs_version }}/topics/cache/#setting-up-the-cache
CACHES = {
'default': {
# Misago doesn't run well with LocMemCache in production environments
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# Password validation
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
'OPTIONS': {
'user_attributes': ['username', 'email'],
}
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 7,
}
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/{{ docs_version }}/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
STATIC_URL = '/static/'
# User uploads (Avatars, Attachments, files uploaded in other Django apps, ect.)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
MEDIA_URL = '/media/'
# The absolute path to the directory where collectstatic will collect static files for deployment.
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#static-root
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Absolute filesystem path to the directory that will hold user-uploaded files.
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#media-root
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# This setting defines the additional locations the staticfiles app will traverse if the FileSystemFinder finder
# is enabled, e.g. if you use the collectstatic or findstatic management command or use the static file serving view.
# https://docs.djangoproject.com/en/1.10/ref/settings/#staticfiles-dirs
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'theme', 'static'),
]
# Email configuration
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#email-backend
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
# If either of these settings is empty, Django won't attempt authentication.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
# Default email address to use for various automated correspondence from the site manager(s).
DEFAULT_FROM_EMAIL = 'Forums <%s>' % EMAIL_HOST_USER
# Application definition
AUTH_USER_MODEL = 'misago_users.User'
AUTHENTICATION_BACKENDS = [
'misago.users.authbackends.MisagoBackend',
]
CSRF_FAILURE_VIEW = 'misago.core.errorpages.csrf_failure'
INSTALLED_APPS = [
# Misago overrides for Django core feature
'misago',
'misago.users',
# Django apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.postgres',
'django.contrib.humanize',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd party apps used by Misago
'debug_toolbar',
'crispy_forms',
'mptt',
'rest_framework',
# Misago apps
'misago.admin',
'misago.acl',
'misago.core',
'misago.conf',
'misago.markup',
'misago.legal',
'misago.categories',
'misago.threads',
'misago.readtracker',
'misago.search',
'misago.faker',
]
INTERNAL_IPS = [
'127.0.0.1'
]
LOGIN_REDIRECT_URL = 'misago:index'
LOGIN_URL = 'misago:login'
LOGOUT_URL = 'misago:logout'
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'misago.users.middleware.RealIPMiddleware',
'misago.core.middleware.frontendcontext.FrontendContextMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'misago.users.middleware.UserMiddleware',
'misago.core.middleware.exceptionhandler.ExceptionHandlerMiddleware',
'misago.users.middleware.OnlineTrackerMiddleware',
'misago.admin.middleware.AdminAuthMiddleware',
'misago.threads.middleware.UnreadThreadsCountMiddleware',
'misago.core.middleware.threadstore.ThreadStoreMiddleware',
]
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'theme', 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'misago.core.context_processors.site_address',
'misago.core.context_processors.momentjs_locale',
'misago.conf.context_processors.settings',
'misago.users.context_processors.user_links',
'misago.legal.context_processors.legal_links',
# Data preloaders
'misago.conf.context_processors.preload_settings_json',
'misago.core.context_processors.current_link',
'misago.markup.context_processors.preload_api_url',
'misago.threads.context_processors.preload_threads_urls',
'misago.users.context_processors.preload_user_json',
# Note: keep frontend_context processor last for previous processors
# to be able to expose data UI app via request.frontend_context
'misago.core.context_processors.frontend_context',
],
},
},
]
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Django Crispy Forms
#http://django-crispy-forms.readthedocs.io/en/latest/install.html
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# Django Debug Toolbar
# http://django-debug-toolbar.readthedocs.io/en/stable/configuration.html
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'misago.acl.panels.MisagoACLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
]
# Django Rest Framework
# http://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'misago.core.rest_permissions.IsAuthenticatedOrReadOnly',
],
'DEFAULT_RENDERER_CLASSES': [
'rest_framework.renderers.JSONRenderer',
],
'EXCEPTION_HANDLER': 'misago.core.exceptionhandler.handle_api_exception',
'UNAUTHENTICATED_USER': 'misago.users.models.AnonymousUser',
'URL_FORMAT_OVERRIDE': None,
}
# Misago specific settings
# https://misago.readthedocs.io/en/latest/developers/settings.html
# PostgreSQL text search configuration to use in searches
# Defaults to "simple", for list of installed configurations run "\dF" in "psql"
# Standard configs as of PostgreSQL 9.5 are: dutch, english, finnish, french,
# german, hungarian, italian, norwegian, portuguese, romanian, russian, simple,
# spanish, swedish and turkish
# Example on adding custom language can be found here: https://github.com/lemonskyjwt/plpstgrssearch
MISAGO_SEARCH_CONFIG = 'simple'
# Path to directory containing avatar galleries
# Those galleries can be loaded by running loadavatargallery command
MISAGO_AVATAR_GALLERY = os.path.join(BASE_DIR, 'avatargallery')
# Profile fields
MISAGO_PROFILE_FIELDS = [
{
'name': _("Personal"),
'fields': [
'misago.users.profilefields.default.FullNameField',
'misago.users.profilefields.default.GenderField',
'misago.users.profilefields.default.BioField',
'misago.users.profilefields.default.LocationField',
],
},
{
'name': _("Contact"),
'fields': [
'misago.users.profilefields.default.TwitterHandleField',
'misago.users.profilefields.default.SkypeIdField',
'misago.users.profilefields.default.WebsiteField',
],
},
{
'name': _("IP address"),
'fields': [
'misago.users.profilefields.default.JoinIpField',
'misago.users.profilefields.default.LastIpField',
],
},
]
|
en
| 0.653894
|
Django settings for {{ project_name }} project. Generated by 'django-admin startproject' using Django {{ django_version }}. For more information on this file, see https://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) # Define placeholder gettext function # This function will mark strings in settings visible to makemessages # without need for Django's i18n features be initialized first. # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! # SECURITY WARNING: don't run with debug turned on in production! # A list of strings representing the host/domain names that this Django site can serve. # If you are unsure, just enter here your domain name, eg. ['mysite.com', 'www.mysite.com'] # Database # https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#databases # Misago requires PostgreSQL to run # Caching # https://docs.djangoproject.com/en/{{ docs_version }}/topics/cache/#setting-up-the-cache # Misago doesn't run well with LocMemCache in production environments # Password validation # https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#auth-password-validators # Internationalization # https://docs.djangoproject.com/en/{{ docs_version }}/topics/i18n/ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/ # User uploads (Avatars, Attachments, files uploaded in other Django apps, ect.) # https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/ # The absolute path to the directory where collectstatic will collect static files for deployment. # https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#static-root # Absolute filesystem path to the directory that will hold user-uploaded files. # https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#media-root # This setting defines the additional locations the staticfiles app will traverse if the FileSystemFinder finder # is enabled, e.g. if you use the collectstatic or findstatic management command or use the static file serving view. # https://docs.djangoproject.com/en/1.10/ref/settings/#staticfiles-dirs # Email configuration # https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#email-backend # If either of these settings is empty, Django won't attempt authentication. # Default email address to use for various automated correspondence from the site manager(s). # Application definition # Misago overrides for Django core feature # Django apps # 3rd party apps used by Misago # Misago apps # Data preloaders # Note: keep frontend_context processor last for previous processors # to be able to expose data UI app via request.frontend_context # Django Crispy Forms #http://django-crispy-forms.readthedocs.io/en/latest/install.html # Django Debug Toolbar # http://django-debug-toolbar.readthedocs.io/en/stable/configuration.html # Django Rest Framework # http://www.django-rest-framework.org/api-guide/settings/ # Misago specific settings # https://misago.readthedocs.io/en/latest/developers/settings.html # PostgreSQL text search configuration to use in searches # Defaults to "simple", for list of installed configurations run "\dF" in "psql" # Standard configs as of PostgreSQL 9.5 are: dutch, english, finnish, french, # german, hungarian, italian, norwegian, portuguese, romanian, russian, simple, # spanish, swedish and turkish # Example on adding custom language can be found here: https://github.com/lemonskyjwt/plpstgrssearch # Path to directory containing avatar galleries # Those galleries can be loaded by running loadavatargallery command # Profile fields
| 2.067695
| 2
|
HR_Easy/hr_flipping_bits.py
|
rndmz451/tinuviel-project
| 0
|
6626393
|
<gh_stars>0
'''
You will be given a list of 32 bit unsigned integers. Flip all the bits (1 -> 0 and 0 -> 1) and return the result as an unsigned integer.
'''
n = 2
def flipping_bits(n):
num = (2 ** 32) - 1
return n ^ num
if __name__ == '__main__':
print(flipping_bits(n))
|
'''
You will be given a list of 32 bit unsigned integers. Flip all the bits (1 -> 0 and 0 -> 1) and return the result as an unsigned integer.
'''
n = 2
def flipping_bits(n):
num = (2 ** 32) - 1
return n ^ num
if __name__ == '__main__':
print(flipping_bits(n))
|
en
| 0.877958
|
You will be given a list of 32 bit unsigned integers. Flip all the bits (1 -> 0 and 0 -> 1) and return the result as an unsigned integer.
| 4.094314
| 4
|
keras_extension/preprocess/graph.py
|
k1414st/keras_extention
| 3
|
6626394
|
# -*- coding: utf-8 -*-
"""
Transformation function from "graph adjacency matrix" to
1. graph laplacian matrix
2. normalize graph matrix
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy.sparse.csr import csr_matrix
# to avoid zero division
epsilon = 1e-7
def _batch_dot(x, y):
""" execute dot operation for each unit """
return np.einsum('ijk,ikl->ijl', x, y)
def _get_I(X):
""" get identity matrix or batched matrices of X """
if X.ndim == 2:
I = np.eye(X.shape[-1])
elif X.ndim == 3:
I = np.expand_dims(np.eye(X.shape[-1]), axis=0)
return I
def _get_diag(X):
""" get diagonal matrix or batched matrices of X """
if X.ndim == 2:
D = np.diag(X.sum(axis=-1))
elif X.ndim == 3:
D = np.array([np.diag(m.sum(axis=1)) for m in X])
return D
def to_laplacian(mtx,
binarize=False,
normalize=False,
matrix_type=np.array):
"""
calculate laplacian matrix.
Args:
mtx: input matrix
(2D square matrix or 3D batched square matrix)
binarize: binarize weighted matrix.
(if element != 0, binarize to 1)
normalize: normalize adjacency matrix.
(A -> D^1/2 `dot` A `dot` D^1/2)
matrix_type:
output matrix type (np.array or scipy.sparse.csr.csr_matrix)
if input_dim == 3 and specified matrix_type == csr_matrix,
returns np.array(csr_matrix)
"""
# validation
if not mtx.ndim in (2, 3):
raise ValueError('ndim of input matrix must be 2 or 3.')
if not mtx.shape[-2] == mtx.shape[-1]:
raise ValueError('input matrix shape must be squared')
if not matrix_type in (np.array, csr_matrix):
raise ValueError(
'matrix type must be "numpy.array" or "scipy.sparse.csr.csr_matrix"')
if binarize:
mtx = np.where(mtx == 0, 0., 1.)
# case of ndim(2 or 3) x normalize(Do or Not).
D = _get_diag(mtx)
I = _get_I(mtx)
if normalize:
Dr = np.where(D == 0, 0,
np.power(np.where(D > epsilon, D, epsilon), -0.5))
if mtx.ndim == 2:
mtx = Dr.dot(mtx.dot(Dr))
elif mtx.ndim == 3:
mtx = _batch_dot(Dr, _batch_dot(mtx, Dr))
mtx_laplacian = I - mtx
else:
mtx_laplacian = D - mtx
# batch & sparse -> np.array of csr_matrix
if mtx.ndim == 3 and matrix_type == csr_matrix:
return np.array([matrix_type(m) for m in mtx_laplacian])
# np.array or single csr_matrix
else:
return matrix_type(mtx_laplacian)
def normalize_graph_matrix(mtx,
binarize=False,
add_self=False,
add_diagonal=False,
normalize=False,
normalize_input=False,
normalize_output=False,
matrix_type=np.array):
"""
Normalize graph matrix or list of matrix.
normalize operation include binarize, add_self_loop, whole normalization,
or input/output normalization. (all optional)
Args:
mtx: input adjacency matrix (no self loop, weighted or no-weighted).
(2D square matrix or 3D batched square matrix)
binarize: binarize weighted matrix.
(if element != 0, binarize to 1)
add_self: add identify matrix (self loop) after normalize.
You can use this option w/o normalize option.
add_diagonal: add diagonal matrix (weighted self loop)
after normalize. You can use this option w/o normalize option.
normalize: normalize self-adjacency matrix.
(A -> D^1/2 `dot` A `dot` D^1/2)
normalize_input: normalize graph input
normalize_output: normalize graph output
matrix_type:
output matrix type (np.array or scipy.sparse.csr.csr_matrix)
if input_dim == 3 and specified matrix_type == csr_matrix,
returns np.array(csr_matrix)
"""
# validation
if not mtx.ndim in (2, 3):
raise ValueError('ndim of input matrix must be 2 or 3.')
if not mtx.shape[-2] == mtx.shape[-1]:
raise ValueError('input matrix shape must be squared.')
if not matrix_type in (np.array, csr_matrix):
raise ValueError(
'matrix type must be "numpy.array" or "scipy.sparse.csr.csr_matrix".')
if normalize + normalize_input + normalize_output > 1:
raise ValueError('multiple normalize options cannt be selected.')
if normalize and (add_self or add_diagonal):
raise ValueError('you can use add_self or add_diagonal option w/o normalize option.')
if add_self and add_diagonal:
raise ValueError('you cannot set add_self and add_diagonal simultaneously.')
# fundamental preprocess
if binarize:
mtx = np.where(mtx == 0, 0., 1.)
if normalize:
mtx = mtx + np.eye(mtx.shape[-1])
# normalize adjacency matrix. (A -> D^1/2 `dot` A `dot` D^1/2)
D = _get_diag(mtx)
I = _get_I(mtx)
if normalize:
Dr = np.where(D == 0, 0,
np.power(np.where(D > epsilon, D, epsilon), -0.5))
if mtx.ndim == 2:
mtx = Dr.dot(mtx.dot(Dr))
elif mtx.ndim == 3:
mtx = _batch_dot(Dr, _batch_dot(mtx, Dr))
# normalization of input or output.
elif normalize_input:
H = mtx.sum(axis=-1)
H = np.where(H > epsilon, H, epsilon)
if mtx.ndim == 2:
mtx = np.einsum('jk,j->jk', mtx, 1/H)
elif mtx.ndim == 3:
mtx = np.einsum('ijk,ij->ijk', mtx, 1/H)
elif normalize_output:
V = mtx.sum(axis=-2)
V = np.where(V > epsilon, V, epsilon)
if mtx.ndim == 2:
mtx = np.einsum('jk,k->jk', mtx, 1/V)
elif mtx.ndim == 3:
mtx = np.einsum('ijk,ik->ijk', mtx, 1/V)
# adding self loop elements.
if add_self:
mtx = mtx + I
if add_diagonal:
mtx = mtx + _get_diag(mtx) # recalc after normalize.
# batch & sparse -> np.array of csr_matrix
if mtx.ndim == 3 and matrix_type == csr_matrix:
return np.array([matrix_type(m) for m in mtx])
# np.array or single csr_matrix
else:
return matrix_type(mtx)
|
# -*- coding: utf-8 -*-
"""
Transformation function from "graph adjacency matrix" to
1. graph laplacian matrix
2. normalize graph matrix
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy.sparse.csr import csr_matrix
# to avoid zero division
epsilon = 1e-7
def _batch_dot(x, y):
""" execute dot operation for each unit """
return np.einsum('ijk,ikl->ijl', x, y)
def _get_I(X):
""" get identity matrix or batched matrices of X """
if X.ndim == 2:
I = np.eye(X.shape[-1])
elif X.ndim == 3:
I = np.expand_dims(np.eye(X.shape[-1]), axis=0)
return I
def _get_diag(X):
""" get diagonal matrix or batched matrices of X """
if X.ndim == 2:
D = np.diag(X.sum(axis=-1))
elif X.ndim == 3:
D = np.array([np.diag(m.sum(axis=1)) for m in X])
return D
def to_laplacian(mtx,
binarize=False,
normalize=False,
matrix_type=np.array):
"""
calculate laplacian matrix.
Args:
mtx: input matrix
(2D square matrix or 3D batched square matrix)
binarize: binarize weighted matrix.
(if element != 0, binarize to 1)
normalize: normalize adjacency matrix.
(A -> D^1/2 `dot` A `dot` D^1/2)
matrix_type:
output matrix type (np.array or scipy.sparse.csr.csr_matrix)
if input_dim == 3 and specified matrix_type == csr_matrix,
returns np.array(csr_matrix)
"""
# validation
if not mtx.ndim in (2, 3):
raise ValueError('ndim of input matrix must be 2 or 3.')
if not mtx.shape[-2] == mtx.shape[-1]:
raise ValueError('input matrix shape must be squared')
if not matrix_type in (np.array, csr_matrix):
raise ValueError(
'matrix type must be "numpy.array" or "scipy.sparse.csr.csr_matrix"')
if binarize:
mtx = np.where(mtx == 0, 0., 1.)
# case of ndim(2 or 3) x normalize(Do or Not).
D = _get_diag(mtx)
I = _get_I(mtx)
if normalize:
Dr = np.where(D == 0, 0,
np.power(np.where(D > epsilon, D, epsilon), -0.5))
if mtx.ndim == 2:
mtx = Dr.dot(mtx.dot(Dr))
elif mtx.ndim == 3:
mtx = _batch_dot(Dr, _batch_dot(mtx, Dr))
mtx_laplacian = I - mtx
else:
mtx_laplacian = D - mtx
# batch & sparse -> np.array of csr_matrix
if mtx.ndim == 3 and matrix_type == csr_matrix:
return np.array([matrix_type(m) for m in mtx_laplacian])
# np.array or single csr_matrix
else:
return matrix_type(mtx_laplacian)
def normalize_graph_matrix(mtx,
binarize=False,
add_self=False,
add_diagonal=False,
normalize=False,
normalize_input=False,
normalize_output=False,
matrix_type=np.array):
"""
Normalize graph matrix or list of matrix.
normalize operation include binarize, add_self_loop, whole normalization,
or input/output normalization. (all optional)
Args:
mtx: input adjacency matrix (no self loop, weighted or no-weighted).
(2D square matrix or 3D batched square matrix)
binarize: binarize weighted matrix.
(if element != 0, binarize to 1)
add_self: add identify matrix (self loop) after normalize.
You can use this option w/o normalize option.
add_diagonal: add diagonal matrix (weighted self loop)
after normalize. You can use this option w/o normalize option.
normalize: normalize self-adjacency matrix.
(A -> D^1/2 `dot` A `dot` D^1/2)
normalize_input: normalize graph input
normalize_output: normalize graph output
matrix_type:
output matrix type (np.array or scipy.sparse.csr.csr_matrix)
if input_dim == 3 and specified matrix_type == csr_matrix,
returns np.array(csr_matrix)
"""
# validation
if not mtx.ndim in (2, 3):
raise ValueError('ndim of input matrix must be 2 or 3.')
if not mtx.shape[-2] == mtx.shape[-1]:
raise ValueError('input matrix shape must be squared.')
if not matrix_type in (np.array, csr_matrix):
raise ValueError(
'matrix type must be "numpy.array" or "scipy.sparse.csr.csr_matrix".')
if normalize + normalize_input + normalize_output > 1:
raise ValueError('multiple normalize options cannt be selected.')
if normalize and (add_self or add_diagonal):
raise ValueError('you can use add_self or add_diagonal option w/o normalize option.')
if add_self and add_diagonal:
raise ValueError('you cannot set add_self and add_diagonal simultaneously.')
# fundamental preprocess
if binarize:
mtx = np.where(mtx == 0, 0., 1.)
if normalize:
mtx = mtx + np.eye(mtx.shape[-1])
# normalize adjacency matrix. (A -> D^1/2 `dot` A `dot` D^1/2)
D = _get_diag(mtx)
I = _get_I(mtx)
if normalize:
Dr = np.where(D == 0, 0,
np.power(np.where(D > epsilon, D, epsilon), -0.5))
if mtx.ndim == 2:
mtx = Dr.dot(mtx.dot(Dr))
elif mtx.ndim == 3:
mtx = _batch_dot(Dr, _batch_dot(mtx, Dr))
# normalization of input or output.
elif normalize_input:
H = mtx.sum(axis=-1)
H = np.where(H > epsilon, H, epsilon)
if mtx.ndim == 2:
mtx = np.einsum('jk,j->jk', mtx, 1/H)
elif mtx.ndim == 3:
mtx = np.einsum('ijk,ij->ijk', mtx, 1/H)
elif normalize_output:
V = mtx.sum(axis=-2)
V = np.where(V > epsilon, V, epsilon)
if mtx.ndim == 2:
mtx = np.einsum('jk,k->jk', mtx, 1/V)
elif mtx.ndim == 3:
mtx = np.einsum('ijk,ik->ijk', mtx, 1/V)
# adding self loop elements.
if add_self:
mtx = mtx + I
if add_diagonal:
mtx = mtx + _get_diag(mtx) # recalc after normalize.
# batch & sparse -> np.array of csr_matrix
if mtx.ndim == 3 and matrix_type == csr_matrix:
return np.array([matrix_type(m) for m in mtx])
# np.array or single csr_matrix
else:
return matrix_type(mtx)
|
en
| 0.488529
|
# -*- coding: utf-8 -*- Transformation function from "graph adjacency matrix" to 1. graph laplacian matrix 2. normalize graph matrix # to avoid zero division execute dot operation for each unit get identity matrix or batched matrices of X get diagonal matrix or batched matrices of X calculate laplacian matrix. Args: mtx: input matrix (2D square matrix or 3D batched square matrix) binarize: binarize weighted matrix. (if element != 0, binarize to 1) normalize: normalize adjacency matrix. (A -> D^1/2 `dot` A `dot` D^1/2) matrix_type: output matrix type (np.array or scipy.sparse.csr.csr_matrix) if input_dim == 3 and specified matrix_type == csr_matrix, returns np.array(csr_matrix) # validation # case of ndim(2 or 3) x normalize(Do or Not). # batch & sparse -> np.array of csr_matrix # np.array or single csr_matrix Normalize graph matrix or list of matrix. normalize operation include binarize, add_self_loop, whole normalization, or input/output normalization. (all optional) Args: mtx: input adjacency matrix (no self loop, weighted or no-weighted). (2D square matrix or 3D batched square matrix) binarize: binarize weighted matrix. (if element != 0, binarize to 1) add_self: add identify matrix (self loop) after normalize. You can use this option w/o normalize option. add_diagonal: add diagonal matrix (weighted self loop) after normalize. You can use this option w/o normalize option. normalize: normalize self-adjacency matrix. (A -> D^1/2 `dot` A `dot` D^1/2) normalize_input: normalize graph input normalize_output: normalize graph output matrix_type: output matrix type (np.array or scipy.sparse.csr.csr_matrix) if input_dim == 3 and specified matrix_type == csr_matrix, returns np.array(csr_matrix) # validation # fundamental preprocess # normalize adjacency matrix. (A -> D^1/2 `dot` A `dot` D^1/2) # normalization of input or output. # adding self loop elements. # recalc after normalize. # batch & sparse -> np.array of csr_matrix # np.array or single csr_matrix
| 3.186982
| 3
|
lib/streamlit/string_util.py
|
rajvijay68/streamlit
| 1
|
6626395
|
# -*- coding: utf-8 -*-
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Python 2/3 compatibility
from __future__ import print_function, division, unicode_literals, absolute_import
from streamlit.compatibility import setup_2_3_shims
setup_2_3_shims(globals())
import textwrap
def decode_ascii(string):
"""Decodes a string as ascii."""
return string.decode("ascii")
def clean_text(text):
return textwrap.dedent(str(text)).strip()
def escape_markdown(raw_string):
"""Returns a new string which escapes all markdown metacharacters.
Args
----
raw_string : str
A string, possibly with markdown metacharacters, e.g. "1 * 2"
Returns
-------
A string with all metacharacters escaped.
Examples
--------
::
escape_markdown("1 * 2") -> "1 \\* 2"
"""
metacharacters = ["\\", "*", "-", "=", "`", "!", "#", "|"]
result = raw_string
for character in metacharacters:
result = result.replace(character, "\\" + character)
return result
TEXTCHARS = bytearray({7, 8, 9, 10, 12, 13, 27} | set(range(0x20, 0x100)) - {0x7F})
def is_binary_string(inp):
"""Guess if an input bytesarray can be encoded as a string."""
# From https://stackoverflow.com/a/7392391
return bool(inp.translate(None, TEXTCHARS))
|
# -*- coding: utf-8 -*-
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Python 2/3 compatibility
from __future__ import print_function, division, unicode_literals, absolute_import
from streamlit.compatibility import setup_2_3_shims
setup_2_3_shims(globals())
import textwrap
def decode_ascii(string):
"""Decodes a string as ascii."""
return string.decode("ascii")
def clean_text(text):
return textwrap.dedent(str(text)).strip()
def escape_markdown(raw_string):
"""Returns a new string which escapes all markdown metacharacters.
Args
----
raw_string : str
A string, possibly with markdown metacharacters, e.g. "1 * 2"
Returns
-------
A string with all metacharacters escaped.
Examples
--------
::
escape_markdown("1 * 2") -> "1 \\* 2"
"""
metacharacters = ["\\", "*", "-", "=", "`", "!", "#", "|"]
result = raw_string
for character in metacharacters:
result = result.replace(character, "\\" + character)
return result
TEXTCHARS = bytearray({7, 8, 9, 10, 12, 13, 27} | set(range(0x20, 0x100)) - {0x7F})
def is_binary_string(inp):
"""Guess if an input bytesarray can be encoded as a string."""
# From https://stackoverflow.com/a/7392391
return bool(inp.translate(None, TEXTCHARS))
|
en
| 0.767677
|
# -*- coding: utf-8 -*- # Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Python 2/3 compatibility Decodes a string as ascii. Returns a new string which escapes all markdown metacharacters. Args ---- raw_string : str A string, possibly with markdown metacharacters, e.g. "1 * 2" Returns ------- A string with all metacharacters escaped. Examples -------- :: escape_markdown("1 * 2") -> "1 \\* 2" Guess if an input bytesarray can be encoded as a string. # From https://stackoverflow.com/a/7392391
| 2.650272
| 3
|
run_comparison_heapsearch_astar.py
|
agissaud/DeepSynth
| 0
|
6626396
|
import logging
import time
import random
import csv
import matplotlib.pyplot as plt
import numpy as np
from math import log10
from type_system import Type, PolymorphicType, PrimitiveType, Arrow, List, UnknownType, INT, BOOL
from program import Program, Function, Variable, BasicPrimitive, New
from cfg import CFG
from pcfg import PCFG
from dsl import DSL
from program_as_list import reconstruct_from_compressed
from Algorithms.heap_search import heap_search
# from Algorithms.heap_search_naive import heap_search_naive
from Algorithms.a_star import a_star
from DSL.deepcoder import semantics,primitive_types
logging_levels = {0: logging.INFO, 1: logging.DEBUG}
verbosity = 0
logging.basicConfig(format='%(message)s', level=logging_levels[verbosity])
seed = 100
random.seed(seed)
np.random.seed(seed)
deepcoder = DSL(semantics, primitive_types)
type_request = Arrow(List(INT),List(INT))
deepcoder_CFG = deepcoder.DSL_to_CFG(type_request, max_program_depth = 4)
list_algorithms = [
# (heap_search_naive, 'Heap Search naive', {}),
(heap_search, 'Heap Search', {}),
(a_star, 'A*', {}),
]
reconstruct = {a_star}
def run_algorithm(pcfg, algo_index):
'''
Run the algorithm until timeout, and for each program record probability and time of output
'''
algorithm, name_algo, param = list_algorithms[algo_index]
result = []
search_time = 0
gen = algorithm(pcfg, **param)
nb_programs = 0
cumulative_probability = 0
while (search_time < timeout):
search_time -= time.perf_counter()
try:
program = next(gen)
except StopIteration:
search_time += time.perf_counter()
logging.debug(
"Output the last program after {}".format(nb_programs))
break # no next program
search_time += time.perf_counter()
# logging.debug('program found: {}'.format(program))
if algorithm in reconstruct:
target_type = pcfg.start[0]
program = reconstruct_from_compressed(program, target_type)
probability = pcfg.probability_program(pcfg.start, program)
cumulative_probability += probability
nb_programs += 1
row = search_time, probability, cumulative_probability
result.append(row)
return result
def create_dataset():
logging.info('Create dataset')
number_algorithms = len(list_algorithms)
deepcoder_PCFG = deepcoder_CFG.CFG_to_Random_PCFG()
timepoints = np.logspace(start = -1, stop = log10(timeout), num = number_timepoints)
r_program = np.zeros((number_samples, number_algorithms, number_timepoints))
for i in range(number_samples):
for algo_index in range(number_algorithms):
algorithm, name_algo, param = list_algorithms[algo_index]
logging.info('start run number {}: {}'.format(i+1, name_algo))
res = run_algorithm(pcfg = deepcoder_PCFG, algo_index = algo_index)
r_program[i][algo_index] = np.interp(timepoints,
[search_time for search_time, _, _ in res],
range(len(res)))
logging.info('finished run number {}'.format(i+1))
result_mean = np.mean(r_program, axis=0)
result_std = np.std(r_program, axis=0)
for algo_index in range(number_algorithms):
algorithm, name_algo, param = list_algorithms[algo_index]
with open('results_syntactic/run_{}_{}.csv'.format(name_algo, timeout), 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
header = ['time', 'mean number of programs', 'standard deviation']
writer.writerow(header)
for x,t in enumerate(timepoints):
writer.writerow((t, result_mean[algo_index][x], result_std[algo_index][x]))
# Plot comparison
def plot():
logging.info('Plot comparison')
timepoints = np.logspace(start = -1, stop = log10(timeout), num = number_timepoints)
for algo_index in range(len(list_algorithms)):
algorithm, name_algo, param = list_algorithms[algo_index]
logging.info('retrieve run: {}'.format(name_algo))
with open('results_syntactic/run_{}_{}.csv'.format(name_algo, timeout), 'r', encoding='UTF8', newline='') as f:
reader = csv.reader(f)
result_mean = np.zeros(number_timepoints)
result_std = np.zeros(number_timepoints)
for i, row in enumerate(reader):
if i == 0:
continue
result_mean[i-1] = row[1]
result_std[i-1] = row[2]
logging.info('retrieved')
result_top = result_mean + 2 * result_std
result_low = result_mean - 2 * result_std
sc = plt.scatter(timepoints, result_mean, label = name_algo, s = 5)
color = sc.get_facecolors()[0].tolist()
plt.fill_between(timepoints, result_top, result_low, facecolor = color, alpha=0.5)
plt.legend(loc = 'upper left')
plt.xlim((1e-1,timeout))
plt.xscale('log')
plt.xlabel('time (in seconds)')
plt.ticklabel_format(axis='y', style='sci')
plt.ylabel('number of programs')
plt.yscale('log')
plt.savefig("results_syntactic/comparison_heapsearch_astar_%s.png" % (seed),
dpi=500,
bbox_inches='tight')
plt.clf()
timeout = 10
number_samples = 10
number_timepoints = 1_000
max_number_programs = 1e6
create_dataset()
plot()
|
import logging
import time
import random
import csv
import matplotlib.pyplot as plt
import numpy as np
from math import log10
from type_system import Type, PolymorphicType, PrimitiveType, Arrow, List, UnknownType, INT, BOOL
from program import Program, Function, Variable, BasicPrimitive, New
from cfg import CFG
from pcfg import PCFG
from dsl import DSL
from program_as_list import reconstruct_from_compressed
from Algorithms.heap_search import heap_search
# from Algorithms.heap_search_naive import heap_search_naive
from Algorithms.a_star import a_star
from DSL.deepcoder import semantics,primitive_types
logging_levels = {0: logging.INFO, 1: logging.DEBUG}
verbosity = 0
logging.basicConfig(format='%(message)s', level=logging_levels[verbosity])
seed = 100
random.seed(seed)
np.random.seed(seed)
deepcoder = DSL(semantics, primitive_types)
type_request = Arrow(List(INT),List(INT))
deepcoder_CFG = deepcoder.DSL_to_CFG(type_request, max_program_depth = 4)
list_algorithms = [
# (heap_search_naive, 'Heap Search naive', {}),
(heap_search, 'Heap Search', {}),
(a_star, 'A*', {}),
]
reconstruct = {a_star}
def run_algorithm(pcfg, algo_index):
'''
Run the algorithm until timeout, and for each program record probability and time of output
'''
algorithm, name_algo, param = list_algorithms[algo_index]
result = []
search_time = 0
gen = algorithm(pcfg, **param)
nb_programs = 0
cumulative_probability = 0
while (search_time < timeout):
search_time -= time.perf_counter()
try:
program = next(gen)
except StopIteration:
search_time += time.perf_counter()
logging.debug(
"Output the last program after {}".format(nb_programs))
break # no next program
search_time += time.perf_counter()
# logging.debug('program found: {}'.format(program))
if algorithm in reconstruct:
target_type = pcfg.start[0]
program = reconstruct_from_compressed(program, target_type)
probability = pcfg.probability_program(pcfg.start, program)
cumulative_probability += probability
nb_programs += 1
row = search_time, probability, cumulative_probability
result.append(row)
return result
def create_dataset():
logging.info('Create dataset')
number_algorithms = len(list_algorithms)
deepcoder_PCFG = deepcoder_CFG.CFG_to_Random_PCFG()
timepoints = np.logspace(start = -1, stop = log10(timeout), num = number_timepoints)
r_program = np.zeros((number_samples, number_algorithms, number_timepoints))
for i in range(number_samples):
for algo_index in range(number_algorithms):
algorithm, name_algo, param = list_algorithms[algo_index]
logging.info('start run number {}: {}'.format(i+1, name_algo))
res = run_algorithm(pcfg = deepcoder_PCFG, algo_index = algo_index)
r_program[i][algo_index] = np.interp(timepoints,
[search_time for search_time, _, _ in res],
range(len(res)))
logging.info('finished run number {}'.format(i+1))
result_mean = np.mean(r_program, axis=0)
result_std = np.std(r_program, axis=0)
for algo_index in range(number_algorithms):
algorithm, name_algo, param = list_algorithms[algo_index]
with open('results_syntactic/run_{}_{}.csv'.format(name_algo, timeout), 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
header = ['time', 'mean number of programs', 'standard deviation']
writer.writerow(header)
for x,t in enumerate(timepoints):
writer.writerow((t, result_mean[algo_index][x], result_std[algo_index][x]))
# Plot comparison
def plot():
logging.info('Plot comparison')
timepoints = np.logspace(start = -1, stop = log10(timeout), num = number_timepoints)
for algo_index in range(len(list_algorithms)):
algorithm, name_algo, param = list_algorithms[algo_index]
logging.info('retrieve run: {}'.format(name_algo))
with open('results_syntactic/run_{}_{}.csv'.format(name_algo, timeout), 'r', encoding='UTF8', newline='') as f:
reader = csv.reader(f)
result_mean = np.zeros(number_timepoints)
result_std = np.zeros(number_timepoints)
for i, row in enumerate(reader):
if i == 0:
continue
result_mean[i-1] = row[1]
result_std[i-1] = row[2]
logging.info('retrieved')
result_top = result_mean + 2 * result_std
result_low = result_mean - 2 * result_std
sc = plt.scatter(timepoints, result_mean, label = name_algo, s = 5)
color = sc.get_facecolors()[0].tolist()
plt.fill_between(timepoints, result_top, result_low, facecolor = color, alpha=0.5)
plt.legend(loc = 'upper left')
plt.xlim((1e-1,timeout))
plt.xscale('log')
plt.xlabel('time (in seconds)')
plt.ticklabel_format(axis='y', style='sci')
plt.ylabel('number of programs')
plt.yscale('log')
plt.savefig("results_syntactic/comparison_heapsearch_astar_%s.png" % (seed),
dpi=500,
bbox_inches='tight')
plt.clf()
timeout = 10
number_samples = 10
number_timepoints = 1_000
max_number_programs = 1e6
create_dataset()
plot()
|
en
| 0.725091
|
# from Algorithms.heap_search_naive import heap_search_naive # (heap_search_naive, 'Heap Search naive', {}), Run the algorithm until timeout, and for each program record probability and time of output # no next program # logging.debug('program found: {}'.format(program)) # Plot comparison
| 2.56705
| 3
|
Task_1/Task_1A/Code/nnet/__init__.py
|
NithishB/eyantra2k18
| 0
|
6626397
|
from nnet import activation, loss, model, optimizer
|
from nnet import activation, loss, model, optimizer
|
none
| 1
| 1.010425
| 1
|
|
multischema/multischema/routers.py
|
jelitox/django_async
| 0
|
6626398
|
"""Class definition for Database Routing."""
from django.urls import resolve
from django.apps import apps
from django.conf import settings
from django.db import router, connections
import sys
import threading
from django.http import Http404
request_cfg = threading.local()
DEFAULT_DB_ALIAS = 'default'
USER_APPS = settings.DATABASES.keys()
SYSTEM_APPS = [ 'admin', 'auth', 'contenttypes', 'dashboard', 'sessions', 'sites', 'silk', 'social_django', 'notifications', 'social_django' ]
SYSTEM_TABLES = [ 'auth_user', 'auth_group', 'auth_permission', 'auth_user_groups', 'auth_user_user_permissions', 'social_auth_usersocialauth' ]
class schemaRouter(object):
"""A router to control troc db operations."""
db = None
index = len(USER_APPS)
def __init__(self):
"""Get information about databases."""
self.db = settings.DATABASES
def _multi_db(self, model):
from django.conf import settings
#print(model._meta.db_table)
if hasattr(request_cfg, 'db'):
print(request_cfg.db)
if request_cfg.db in self.db.keys():
return request_cfg.db
else:
raise Http404
else:
return DEFAULT_DB_ALIAS
def db_for_read(self, model, **hints):
"""Point all operations on app1 models to 'db_app1'."""
if model._meta.app_label in SYSTEM_APPS:
return DEFAULT_DB_ALIAS
if model._meta.app_label in self.db.keys():
return model._meta.app_label
else:
return self._multi_db()
return None
def db_for_write(self, model, **hints):
"""Point all operations on app1 models to 'db_app1'."""
if model._meta.app_label in SYSTEM_APPS or model._meta.db_table in SYSTEM_TABLES:
return DEFAULT_DB_ALIAS
if model._meta.app_label in self.db.keys():
#db_table = 'schema\".\"tablename'
try:
readonly = self.db[model._meta.app_label]['PARAMS']['readonly']
if readonly:
# Read Only Database
return False
else:
table = model._meta.db_table
if table.find('.') == -1:
schema = model._meta.app_label
model._meta.db_table = '{}\".\"{}'.format(schema, table)
return self._multi_db(model)
except KeyError:
table = model._meta.db_table
#print(table.find('.'))
if table.find('.') == -1:
schema = model._meta.app_label
model._meta.db_table = '{}\".\"{}'.format(schema, table)
#model._meta.db_table = '{}.{}'.format(schema, table)
return self._multi_db(model)
return None
def allow_relation(self, obj1, obj2, **hints):
"""Allow any relation if a model in app1 is involved."""
if obj1._meta.app_label in [ 'auth' ] or obj2._meta.app_label in [ 'auth' ]:
""" Can made migrations with AUTH model """
return True
if obj1._meta.app_label in self.db.keys() or obj2._meta.app_label in self.db.keys():
try:
db1 = self.db[obj1._meta.app_label]['NAME']
db2 = self.db[obj2._meta.app_label]['NAME']
except KeyError:
return True
if db1 == db2:
""" Both DB are the same """
return True
else:
return False
return None
def allow_migrate(self, db, app_label, model=None, **hints):
"""Make sure the app1 only appears in the 'app1' database."""
if app_label in SYSTEM_APPS:
db == 'default'
return True
if db == 'default':
#print('APP LABEL: %s DB: %s' % (app_label, b))
if app_label in self.db.keys():
# cannot run migration of app models onto default database
db = app_label
return True
elif model and model._meta.app_label in self.db.keys():
db = app_label
return False
else:
return None
if model and app_label in self.db.keys():
try:
readonly = self.db[app_label]['PARAMS']['readonly']
if readonly:
return False
else:
return True
except KeyError:
return True
return None
def ensure_schema(self):
"""
Ensures the table exists and has the correct schema.
"""
if self.Migration._meta.db_table in self.connection.introspection.get_table_list(self.connection.cursor()):
return
if router.allow_migrate(self.connection, self.Migration):
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
|
"""Class definition for Database Routing."""
from django.urls import resolve
from django.apps import apps
from django.conf import settings
from django.db import router, connections
import sys
import threading
from django.http import Http404
request_cfg = threading.local()
DEFAULT_DB_ALIAS = 'default'
USER_APPS = settings.DATABASES.keys()
SYSTEM_APPS = [ 'admin', 'auth', 'contenttypes', 'dashboard', 'sessions', 'sites', 'silk', 'social_django', 'notifications', 'social_django' ]
SYSTEM_TABLES = [ 'auth_user', 'auth_group', 'auth_permission', 'auth_user_groups', 'auth_user_user_permissions', 'social_auth_usersocialauth' ]
class schemaRouter(object):
"""A router to control troc db operations."""
db = None
index = len(USER_APPS)
def __init__(self):
"""Get information about databases."""
self.db = settings.DATABASES
def _multi_db(self, model):
from django.conf import settings
#print(model._meta.db_table)
if hasattr(request_cfg, 'db'):
print(request_cfg.db)
if request_cfg.db in self.db.keys():
return request_cfg.db
else:
raise Http404
else:
return DEFAULT_DB_ALIAS
def db_for_read(self, model, **hints):
"""Point all operations on app1 models to 'db_app1'."""
if model._meta.app_label in SYSTEM_APPS:
return DEFAULT_DB_ALIAS
if model._meta.app_label in self.db.keys():
return model._meta.app_label
else:
return self._multi_db()
return None
def db_for_write(self, model, **hints):
"""Point all operations on app1 models to 'db_app1'."""
if model._meta.app_label in SYSTEM_APPS or model._meta.db_table in SYSTEM_TABLES:
return DEFAULT_DB_ALIAS
if model._meta.app_label in self.db.keys():
#db_table = 'schema\".\"tablename'
try:
readonly = self.db[model._meta.app_label]['PARAMS']['readonly']
if readonly:
# Read Only Database
return False
else:
table = model._meta.db_table
if table.find('.') == -1:
schema = model._meta.app_label
model._meta.db_table = '{}\".\"{}'.format(schema, table)
return self._multi_db(model)
except KeyError:
table = model._meta.db_table
#print(table.find('.'))
if table.find('.') == -1:
schema = model._meta.app_label
model._meta.db_table = '{}\".\"{}'.format(schema, table)
#model._meta.db_table = '{}.{}'.format(schema, table)
return self._multi_db(model)
return None
def allow_relation(self, obj1, obj2, **hints):
"""Allow any relation if a model in app1 is involved."""
if obj1._meta.app_label in [ 'auth' ] or obj2._meta.app_label in [ 'auth' ]:
""" Can made migrations with AUTH model """
return True
if obj1._meta.app_label in self.db.keys() or obj2._meta.app_label in self.db.keys():
try:
db1 = self.db[obj1._meta.app_label]['NAME']
db2 = self.db[obj2._meta.app_label]['NAME']
except KeyError:
return True
if db1 == db2:
""" Both DB are the same """
return True
else:
return False
return None
def allow_migrate(self, db, app_label, model=None, **hints):
"""Make sure the app1 only appears in the 'app1' database."""
if app_label in SYSTEM_APPS:
db == 'default'
return True
if db == 'default':
#print('APP LABEL: %s DB: %s' % (app_label, b))
if app_label in self.db.keys():
# cannot run migration of app models onto default database
db = app_label
return True
elif model and model._meta.app_label in self.db.keys():
db = app_label
return False
else:
return None
if model and app_label in self.db.keys():
try:
readonly = self.db[app_label]['PARAMS']['readonly']
if readonly:
return False
else:
return True
except KeyError:
return True
return None
def ensure_schema(self):
"""
Ensures the table exists and has the correct schema.
"""
if self.Migration._meta.db_table in self.connection.introspection.get_table_list(self.connection.cursor()):
return
if router.allow_migrate(self.connection, self.Migration):
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
|
en
| 0.753891
|
Class definition for Database Routing. A router to control troc db operations. Get information about databases. #print(model._meta.db_table) Point all operations on app1 models to 'db_app1'. Point all operations on app1 models to 'db_app1'. #db_table = 'schema\".\"tablename' # Read Only Database #print(table.find('.')) #model._meta.db_table = '{}.{}'.format(schema, table) Allow any relation if a model in app1 is involved. Can made migrations with AUTH model Both DB are the same Make sure the app1 only appears in the 'app1' database. #print('APP LABEL: %s DB: %s' % (app_label, b)) # cannot run migration of app models onto default database Ensures the table exists and has the correct schema.
| 2.616301
| 3
|
RecoTracker/TkSeedGenerator/python/GlobalCombinedSeeds_cff.py
|
ckamtsikis/cmssw
| 852
|
6626399
|
<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from RecoTracker.TkSeedGenerator.GlobalCombinedSeeds_cfi import *
|
import FWCore.ParameterSet.Config as cms
from RecoTracker.TkSeedGenerator.GlobalCombinedSeeds_cfi import *
|
none
| 1
| 1.071465
| 1
|
|
acme/acme/standalone_test.py
|
jcollie/certbot
| 4
|
6626400
|
<reponame>jcollie/certbot
"""Tests for acme.standalone."""
import multiprocessing
import os
import shutil
import socket
import threading
import tempfile
import unittest
import time
from contextlib import closing
from six.moves import http_client # pylint: disable=import-error
from six.moves import socketserver # type: ignore # pylint: disable=import-error
import josepy as jose
import mock
import requests
from acme import challenges
from acme import crypto_util
from acme import errors
from acme import test_util
from acme.magic_typing import Set # pylint: disable=unused-import, no-name-in-module
class TLSServerTest(unittest.TestCase):
"""Tests for acme.standalone.TLSServer."""
def test_bind(self): # pylint: disable=no-self-use
from acme.standalone import TLSServer
server = TLSServer(
('', 0), socketserver.BaseRequestHandler, bind_and_activate=True)
server.server_close()
def test_ipv6(self):
if socket.has_ipv6:
from acme.standalone import TLSServer
server = TLSServer(
('', 0), socketserver.BaseRequestHandler, bind_and_activate=True, ipv6=True)
server.server_close()
class TLSSNI01ServerTest(unittest.TestCase):
"""Test for acme.standalone.TLSSNI01Server."""
def setUp(self):
self.certs = {b'localhost': (
test_util.load_pyopenssl_private_key('rsa2048_key.pem'),
test_util.load_cert('rsa2048_cert.pem'),
)}
from acme.standalone import TLSSNI01Server
self.server = TLSSNI01Server(('localhost', 0), certs=self.certs)
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.start()
def tearDown(self):
self.server.shutdown()
self.thread.join()
def test_it(self):
host, port = self.server.socket.getsockname()[:2]
cert = crypto_util.probe_sni(
b'localhost', host=host, port=port, timeout=1)
self.assertEqual(jose.ComparableX509(cert),
jose.ComparableX509(self.certs[b'localhost'][1]))
class HTTP01ServerTest(unittest.TestCase):
"""Tests for acme.standalone.HTTP01Server."""
def setUp(self):
self.account_key = jose.JWK.load(
test_util.load_vector('rsa1024_key.pem'))
self.resources = set() # type: Set
from acme.standalone import HTTP01Server
self.server = HTTP01Server(('', 0), resources=self.resources)
self.port = self.server.socket.getsockname()[1]
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.start()
def tearDown(self):
self.server.shutdown()
self.thread.join()
def test_index(self):
response = requests.get(
'http://localhost:{0}'.format(self.port), verify=False)
self.assertEqual(
response.text, 'ACME client standalone challenge solver')
self.assertTrue(response.ok)
def test_404(self):
response = requests.get(
'http://localhost:{0}/foo'.format(self.port), verify=False)
self.assertEqual(response.status_code, http_client.NOT_FOUND)
def _test_http01(self, add):
chall = challenges.HTTP01(token=(b'x' * 16))
response, validation = chall.response_and_validation(self.account_key)
from acme.standalone import HTTP01RequestHandler
resource = HTTP01RequestHandler.HTTP01Resource(
chall=chall, response=response, validation=validation)
if add:
self.resources.add(resource)
return resource.response.simple_verify(
resource.chall, 'localhost', self.account_key.public_key(),
port=self.port)
def test_http01_found(self):
self.assertTrue(self._test_http01(add=True))
def test_http01_not_found(self):
self.assertFalse(self._test_http01(add=False))
class BaseDualNetworkedServersTest(unittest.TestCase):
"""Test for acme.standalone.BaseDualNetworkedServers."""
class SingleProtocolServer(socketserver.TCPServer):
"""Server that only serves on a single protocol. FreeBSD has this behavior for AF_INET6."""
def __init__(self, *args, **kwargs):
ipv6 = kwargs.pop("ipv6", False)
if ipv6:
self.address_family = socket.AF_INET6
kwargs["bind_and_activate"] = False
else:
self.address_family = socket.AF_INET
socketserver.TCPServer.__init__(self, *args, **kwargs)
if ipv6:
# NB: On Windows, socket.IPPROTO_IPV6 constant may be missing.
# We use the corresponding value (41) instead.
level = getattr(socket, "IPPROTO_IPV6", 41)
self.socket.setsockopt(level, socket.IPV6_V6ONLY, 1)
try:
self.server_bind()
self.server_activate()
except:
self.server_close()
raise
@mock.patch("socket.socket.bind")
def test_fail_to_bind(self, mock_bind):
mock_bind.side_effect = socket.error
from acme.standalone import BaseDualNetworkedServers
self.assertRaises(socket.error, BaseDualNetworkedServers,
BaseDualNetworkedServersTest.SingleProtocolServer,
('', 0),
socketserver.BaseRequestHandler)
def test_ports_equal(self):
from acme.standalone import BaseDualNetworkedServers
servers = BaseDualNetworkedServers(
BaseDualNetworkedServersTest.SingleProtocolServer,
('', 0),
socketserver.BaseRequestHandler)
socknames = servers.getsocknames()
prev_port = None
# assert ports are equal
for sockname in socknames:
port = sockname[1]
if prev_port:
self.assertEqual(prev_port, port)
prev_port = port
class TLSSNI01DualNetworkedServersTest(unittest.TestCase):
"""Test for acme.standalone.TLSSNI01DualNetworkedServers."""
def setUp(self):
self.certs = {b'localhost': (
test_util.load_pyopenssl_private_key('rsa2048_key.pem'),
test_util.load_cert('rsa2048_cert.pem'),
)}
from acme.standalone import TLSSNI01DualNetworkedServers
self.servers = TLSSNI01DualNetworkedServers(('localhost', 0), certs=self.certs)
self.servers.serve_forever()
def tearDown(self):
self.servers.shutdown_and_server_close()
def test_connect(self):
socknames = self.servers.getsocknames()
# connect to all addresses
for sockname in socknames:
host, port = sockname[:2]
cert = crypto_util.probe_sni(
b'localhost', host=host, port=port, timeout=1)
self.assertEqual(jose.ComparableX509(cert),
jose.ComparableX509(self.certs[b'localhost'][1]))
class HTTP01DualNetworkedServersTest(unittest.TestCase):
"""Tests for acme.standalone.HTTP01DualNetworkedServers."""
def setUp(self):
self.account_key = jose.JWK.load(
test_util.load_vector('rsa1024_key.pem'))
self.resources = set() # type: Set
from acme.standalone import HTTP01DualNetworkedServers
self.servers = HTTP01DualNetworkedServers(('', 0), resources=self.resources)
self.port = self.servers.getsocknames()[0][1]
self.servers.serve_forever()
def tearDown(self):
self.servers.shutdown_and_server_close()
def test_index(self):
response = requests.get(
'http://localhost:{0}'.format(self.port), verify=False)
self.assertEqual(
response.text, 'ACME client standalone challenge solver')
self.assertTrue(response.ok)
def test_404(self):
response = requests.get(
'http://localhost:{0}/foo'.format(self.port), verify=False)
self.assertEqual(response.status_code, http_client.NOT_FOUND)
def _test_http01(self, add):
chall = challenges.HTTP01(token=(b'x' * 16))
response, validation = chall.response_and_validation(self.account_key)
from acme.standalone import HTTP01RequestHandler
resource = HTTP01RequestHandler.HTTP01Resource(
chall=chall, response=response, validation=validation)
if add:
self.resources.add(resource)
return resource.response.simple_verify(
resource.chall, 'localhost', self.account_key.public_key(),
port=self.port)
def test_http01_found(self):
self.assertTrue(self._test_http01(add=True))
def test_http01_not_found(self):
self.assertFalse(self._test_http01(add=False))
class TestSimpleTLSSNI01Server(unittest.TestCase):
"""Tests for acme.standalone.simple_tls_sni_01_server."""
def setUp(self):
# mirror ../examples/standalone
self.test_cwd = tempfile.mkdtemp()
localhost_dir = os.path.join(self.test_cwd, 'localhost')
os.makedirs(localhost_dir)
shutil.copy(test_util.vector_path('rsa2048_cert.pem'),
os.path.join(localhost_dir, 'cert.pem'))
shutil.copy(test_util.vector_path('rsa2048_key.pem'),
os.path.join(localhost_dir, 'key.pem'))
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind(('', 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.port = sock.getsockname()[1]
from acme.standalone import simple_tls_sni_01_server
self.process = multiprocessing.Process(target=simple_tls_sni_01_server,
args=(['path', '-p', str(self.port)],))
self.old_cwd = os.getcwd()
os.chdir(self.test_cwd)
def tearDown(self):
os.chdir(self.old_cwd)
if self.process.is_alive():
self.process.terminate()
self.process.join(timeout=5)
# Check that we didn't timeout waiting for the process to
# terminate.
self.assertNotEqual(self.process.exitcode, None)
shutil.rmtree(self.test_cwd)
@mock.patch('acme.standalone.TLSSNI01Server.handle_request')
def test_mock(self, handle):
from acme.standalone import simple_tls_sni_01_server
simple_tls_sni_01_server(cli_args=['path', '-p', str(self.port)], forever=False)
self.assertEqual(handle.call_count, 1)
def test_live(self):
self.process.start()
cert = None
for _ in range(50):
time.sleep(0.1)
try:
cert = crypto_util.probe_sni(b'localhost', b'127.0.0.1', self.port)
break
except errors.Error: # pragma: no cover
pass
self.assertEqual(jose.ComparableX509(cert),
test_util.load_comparable_cert('rsa2048_cert.pem'))
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
"""Tests for acme.standalone."""
import multiprocessing
import os
import shutil
import socket
import threading
import tempfile
import unittest
import time
from contextlib import closing
from six.moves import http_client # pylint: disable=import-error
from six.moves import socketserver # type: ignore # pylint: disable=import-error
import josepy as jose
import mock
import requests
from acme import challenges
from acme import crypto_util
from acme import errors
from acme import test_util
from acme.magic_typing import Set # pylint: disable=unused-import, no-name-in-module
class TLSServerTest(unittest.TestCase):
"""Tests for acme.standalone.TLSServer."""
def test_bind(self): # pylint: disable=no-self-use
from acme.standalone import TLSServer
server = TLSServer(
('', 0), socketserver.BaseRequestHandler, bind_and_activate=True)
server.server_close()
def test_ipv6(self):
if socket.has_ipv6:
from acme.standalone import TLSServer
server = TLSServer(
('', 0), socketserver.BaseRequestHandler, bind_and_activate=True, ipv6=True)
server.server_close()
class TLSSNI01ServerTest(unittest.TestCase):
"""Test for acme.standalone.TLSSNI01Server."""
def setUp(self):
self.certs = {b'localhost': (
test_util.load_pyopenssl_private_key('rsa2048_key.pem'),
test_util.load_cert('rsa2048_cert.pem'),
)}
from acme.standalone import TLSSNI01Server
self.server = TLSSNI01Server(('localhost', 0), certs=self.certs)
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.start()
def tearDown(self):
self.server.shutdown()
self.thread.join()
def test_it(self):
host, port = self.server.socket.getsockname()[:2]
cert = crypto_util.probe_sni(
b'localhost', host=host, port=port, timeout=1)
self.assertEqual(jose.ComparableX509(cert),
jose.ComparableX509(self.certs[b'localhost'][1]))
class HTTP01ServerTest(unittest.TestCase):
"""Tests for acme.standalone.HTTP01Server."""
def setUp(self):
self.account_key = jose.JWK.load(
test_util.load_vector('rsa1024_key.pem'))
self.resources = set() # type: Set
from acme.standalone import HTTP01Server
self.server = HTTP01Server(('', 0), resources=self.resources)
self.port = self.server.socket.getsockname()[1]
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.start()
def tearDown(self):
self.server.shutdown()
self.thread.join()
def test_index(self):
response = requests.get(
'http://localhost:{0}'.format(self.port), verify=False)
self.assertEqual(
response.text, 'ACME client standalone challenge solver')
self.assertTrue(response.ok)
def test_404(self):
response = requests.get(
'http://localhost:{0}/foo'.format(self.port), verify=False)
self.assertEqual(response.status_code, http_client.NOT_FOUND)
def _test_http01(self, add):
chall = challenges.HTTP01(token=(b'x' * 16))
response, validation = chall.response_and_validation(self.account_key)
from acme.standalone import HTTP01RequestHandler
resource = HTTP01RequestHandler.HTTP01Resource(
chall=chall, response=response, validation=validation)
if add:
self.resources.add(resource)
return resource.response.simple_verify(
resource.chall, 'localhost', self.account_key.public_key(),
port=self.port)
def test_http01_found(self):
self.assertTrue(self._test_http01(add=True))
def test_http01_not_found(self):
self.assertFalse(self._test_http01(add=False))
class BaseDualNetworkedServersTest(unittest.TestCase):
"""Test for acme.standalone.BaseDualNetworkedServers."""
class SingleProtocolServer(socketserver.TCPServer):
"""Server that only serves on a single protocol. FreeBSD has this behavior for AF_INET6."""
def __init__(self, *args, **kwargs):
ipv6 = kwargs.pop("ipv6", False)
if ipv6:
self.address_family = socket.AF_INET6
kwargs["bind_and_activate"] = False
else:
self.address_family = socket.AF_INET
socketserver.TCPServer.__init__(self, *args, **kwargs)
if ipv6:
# NB: On Windows, socket.IPPROTO_IPV6 constant may be missing.
# We use the corresponding value (41) instead.
level = getattr(socket, "IPPROTO_IPV6", 41)
self.socket.setsockopt(level, socket.IPV6_V6ONLY, 1)
try:
self.server_bind()
self.server_activate()
except:
self.server_close()
raise
@mock.patch("socket.socket.bind")
def test_fail_to_bind(self, mock_bind):
mock_bind.side_effect = socket.error
from acme.standalone import BaseDualNetworkedServers
self.assertRaises(socket.error, BaseDualNetworkedServers,
BaseDualNetworkedServersTest.SingleProtocolServer,
('', 0),
socketserver.BaseRequestHandler)
def test_ports_equal(self):
from acme.standalone import BaseDualNetworkedServers
servers = BaseDualNetworkedServers(
BaseDualNetworkedServersTest.SingleProtocolServer,
('', 0),
socketserver.BaseRequestHandler)
socknames = servers.getsocknames()
prev_port = None
# assert ports are equal
for sockname in socknames:
port = sockname[1]
if prev_port:
self.assertEqual(prev_port, port)
prev_port = port
class TLSSNI01DualNetworkedServersTest(unittest.TestCase):
"""Test for acme.standalone.TLSSNI01DualNetworkedServers."""
def setUp(self):
self.certs = {b'localhost': (
test_util.load_pyopenssl_private_key('rsa2048_key.pem'),
test_util.load_cert('rsa2048_cert.pem'),
)}
from acme.standalone import TLSSNI01DualNetworkedServers
self.servers = TLSSNI01DualNetworkedServers(('localhost', 0), certs=self.certs)
self.servers.serve_forever()
def tearDown(self):
self.servers.shutdown_and_server_close()
def test_connect(self):
socknames = self.servers.getsocknames()
# connect to all addresses
for sockname in socknames:
host, port = sockname[:2]
cert = crypto_util.probe_sni(
b'localhost', host=host, port=port, timeout=1)
self.assertEqual(jose.ComparableX509(cert),
jose.ComparableX509(self.certs[b'localhost'][1]))
class HTTP01DualNetworkedServersTest(unittest.TestCase):
"""Tests for acme.standalone.HTTP01DualNetworkedServers."""
def setUp(self):
self.account_key = jose.JWK.load(
test_util.load_vector('rsa1024_key.pem'))
self.resources = set() # type: Set
from acme.standalone import HTTP01DualNetworkedServers
self.servers = HTTP01DualNetworkedServers(('', 0), resources=self.resources)
self.port = self.servers.getsocknames()[0][1]
self.servers.serve_forever()
def tearDown(self):
self.servers.shutdown_and_server_close()
def test_index(self):
response = requests.get(
'http://localhost:{0}'.format(self.port), verify=False)
self.assertEqual(
response.text, 'ACME client standalone challenge solver')
self.assertTrue(response.ok)
def test_404(self):
response = requests.get(
'http://localhost:{0}/foo'.format(self.port), verify=False)
self.assertEqual(response.status_code, http_client.NOT_FOUND)
def _test_http01(self, add):
chall = challenges.HTTP01(token=(b'x' * 16))
response, validation = chall.response_and_validation(self.account_key)
from acme.standalone import HTTP01RequestHandler
resource = HTTP01RequestHandler.HTTP01Resource(
chall=chall, response=response, validation=validation)
if add:
self.resources.add(resource)
return resource.response.simple_verify(
resource.chall, 'localhost', self.account_key.public_key(),
port=self.port)
def test_http01_found(self):
self.assertTrue(self._test_http01(add=True))
def test_http01_not_found(self):
self.assertFalse(self._test_http01(add=False))
class TestSimpleTLSSNI01Server(unittest.TestCase):
"""Tests for acme.standalone.simple_tls_sni_01_server."""
def setUp(self):
# mirror ../examples/standalone
self.test_cwd = tempfile.mkdtemp()
localhost_dir = os.path.join(self.test_cwd, 'localhost')
os.makedirs(localhost_dir)
shutil.copy(test_util.vector_path('rsa2048_cert.pem'),
os.path.join(localhost_dir, 'cert.pem'))
shutil.copy(test_util.vector_path('rsa2048_key.pem'),
os.path.join(localhost_dir, 'key.pem'))
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind(('', 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.port = sock.getsockname()[1]
from acme.standalone import simple_tls_sni_01_server
self.process = multiprocessing.Process(target=simple_tls_sni_01_server,
args=(['path', '-p', str(self.port)],))
self.old_cwd = os.getcwd()
os.chdir(self.test_cwd)
def tearDown(self):
os.chdir(self.old_cwd)
if self.process.is_alive():
self.process.terminate()
self.process.join(timeout=5)
# Check that we didn't timeout waiting for the process to
# terminate.
self.assertNotEqual(self.process.exitcode, None)
shutil.rmtree(self.test_cwd)
@mock.patch('acme.standalone.TLSSNI01Server.handle_request')
def test_mock(self, handle):
from acme.standalone import simple_tls_sni_01_server
simple_tls_sni_01_server(cli_args=['path', '-p', str(self.port)], forever=False)
self.assertEqual(handle.call_count, 1)
def test_live(self):
self.process.start()
cert = None
for _ in range(50):
time.sleep(0.1)
try:
cert = crypto_util.probe_sni(b'localhost', b'127.0.0.1', self.port)
break
except errors.Error: # pragma: no cover
pass
self.assertEqual(jose.ComparableX509(cert),
test_util.load_comparable_cert('rsa2048_cert.pem'))
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
en
| 0.670555
|
Tests for acme.standalone. # pylint: disable=import-error # type: ignore # pylint: disable=import-error # pylint: disable=unused-import, no-name-in-module Tests for acme.standalone.TLSServer. # pylint: disable=no-self-use Test for acme.standalone.TLSSNI01Server. Tests for acme.standalone.HTTP01Server. # type: Set Test for acme.standalone.BaseDualNetworkedServers. Server that only serves on a single protocol. FreeBSD has this behavior for AF_INET6. # NB: On Windows, socket.IPPROTO_IPV6 constant may be missing. # We use the corresponding value (41) instead. # assert ports are equal Test for acme.standalone.TLSSNI01DualNetworkedServers. # connect to all addresses Tests for acme.standalone.HTTP01DualNetworkedServers. # type: Set Tests for acme.standalone.simple_tls_sni_01_server. # mirror ../examples/standalone # Check that we didn't timeout waiting for the process to # terminate. # pragma: no cover # pragma: no cover
| 2.098183
| 2
|
k-nearest-neighbors/k_nearest_neighbor_knn_tutorial.py
|
fimoziq/tutorials
| 670
|
6626401
|
# -*- coding: utf-8 -*-
"""
#K-Nearest Neighbors (KNN) Algorithm Tutorial - Machine Learning Basics
* Tutorial: https://news.towardsai.net/knn
* Github: https://github.com/towardsai/tutorials/tree/master/k-nearest-neighbors
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
# Import the iris dataset as provided by the sklearn Python module
from sklearn.datasets import load_iris
iris = load_iris()
type(iris)
# Converting sklearn data into Pandas dataframe
# target variables imply
# 0.0 - Setosa
# 1.0 - Versicolor
# 2.0 - Virginica
iris = pd.DataFrame(data= np.c_[iris['data'], iris['target']],
columns= iris['feature_names'] + ['target'])
iris.head()
"""## Checking for outliers and imbalanced data"""
# data is perfectly balanced
sns.countplot(x='target', data=iris)
# not much of outliers to br handled
for feature in ['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)']:
sns.boxplot(x='target', y=feature, data=iris)
plt.show()
"""## Plotting a 2-D graph"""
sns.scatterplot(x='sepal length (cm)', y='sepal width (cm)', data=iris, hue='target', palette="deep")
"""## Separating features and target"""
# X variable contains flower features
# Y variable contains target values
X = iris.drop(['target'], axis=1)
y = iris['target']
"""## Split the dataset into train and test sets"""
# 60% of the data will be randomly selected at training data
# remaining 40% as testing data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# checking accuracy score for k-value rangin from 1 to 26
k_range = list(range(1,26))
scores = []
# model fitting and calculating accuracy score
# for each k-value in the range 1-26
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
scores.append(metrics.accuracy_score(y_test, y_pred))
plt.plot(k_range, scores)
plt.xlabel('Value of k')
plt.ylabel('Accuracy Score')
plt.title('Accuracy Scores for different values of k')
plt.show()
# 60% of the data will be randomly selected at training data
# remaining 40% as testing data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
"""## Initial model"""
# Initial model with nearest neighbor as 1(k-value)
# further, k will be replaced with optimal value
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
print(knn.score(X_test, y_test))
"""## Finding the right k-value"""
# checking accuracy score for k-value rangin from 1 to 26
k_range = list(range(1,26))
scores = []
# model fitting and calculating accuracy score
# for each k-value in the range 1-26
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
scores.append(metrics.accuracy_score(y_test, y_pred))
plt.plot(k_range, scores)
plt.xlabel('Value of k')
plt.ylabel('Accuracy Score')
plt.title('Accuracy Scores for different values of k')
plt.show()
"""## Accuracy for optimal k-value"""
# 11 is the optimal k-value for this dataset
knn = KNeighborsClassifier(n_neighbors=11)
knn.fit(X_train, y_train)
print(knn.score(X_test, y_test))
"""## Predicting class of new data"""
knn = KNeighborsClassifier(n_neighbors=11)
# fitting the entire data without splitting
# into train and test
knn.fit(iris.drop(['target'], axis=1), iris['target'])
# new data to be classified
X_new = np.array([[1, 2.9, 10, 0.2]])
prediction = knn.predict(X_new)
print(prediction)
if prediction[0] == 0.0:
print('Setosa')
elif prediction[0] == 1.0:
print('Versicolor')
else:
print('Virginica')
|
# -*- coding: utf-8 -*-
"""
#K-Nearest Neighbors (KNN) Algorithm Tutorial - Machine Learning Basics
* Tutorial: https://news.towardsai.net/knn
* Github: https://github.com/towardsai/tutorials/tree/master/k-nearest-neighbors
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
# Import the iris dataset as provided by the sklearn Python module
from sklearn.datasets import load_iris
iris = load_iris()
type(iris)
# Converting sklearn data into Pandas dataframe
# target variables imply
# 0.0 - Setosa
# 1.0 - Versicolor
# 2.0 - Virginica
iris = pd.DataFrame(data= np.c_[iris['data'], iris['target']],
columns= iris['feature_names'] + ['target'])
iris.head()
"""## Checking for outliers and imbalanced data"""
# data is perfectly balanced
sns.countplot(x='target', data=iris)
# not much of outliers to br handled
for feature in ['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)']:
sns.boxplot(x='target', y=feature, data=iris)
plt.show()
"""## Plotting a 2-D graph"""
sns.scatterplot(x='sepal length (cm)', y='sepal width (cm)', data=iris, hue='target', palette="deep")
"""## Separating features and target"""
# X variable contains flower features
# Y variable contains target values
X = iris.drop(['target'], axis=1)
y = iris['target']
"""## Split the dataset into train and test sets"""
# 60% of the data will be randomly selected at training data
# remaining 40% as testing data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# checking accuracy score for k-value rangin from 1 to 26
k_range = list(range(1,26))
scores = []
# model fitting and calculating accuracy score
# for each k-value in the range 1-26
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
scores.append(metrics.accuracy_score(y_test, y_pred))
plt.plot(k_range, scores)
plt.xlabel('Value of k')
plt.ylabel('Accuracy Score')
plt.title('Accuracy Scores for different values of k')
plt.show()
# 60% of the data will be randomly selected at training data
# remaining 40% as testing data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
"""## Initial model"""
# Initial model with nearest neighbor as 1(k-value)
# further, k will be replaced with optimal value
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
print(knn.score(X_test, y_test))
"""## Finding the right k-value"""
# checking accuracy score for k-value rangin from 1 to 26
k_range = list(range(1,26))
scores = []
# model fitting and calculating accuracy score
# for each k-value in the range 1-26
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
scores.append(metrics.accuracy_score(y_test, y_pred))
plt.plot(k_range, scores)
plt.xlabel('Value of k')
plt.ylabel('Accuracy Score')
plt.title('Accuracy Scores for different values of k')
plt.show()
"""## Accuracy for optimal k-value"""
# 11 is the optimal k-value for this dataset
knn = KNeighborsClassifier(n_neighbors=11)
knn.fit(X_train, y_train)
print(knn.score(X_test, y_test))
"""## Predicting class of new data"""
knn = KNeighborsClassifier(n_neighbors=11)
# fitting the entire data without splitting
# into train and test
knn.fit(iris.drop(['target'], axis=1), iris['target'])
# new data to be classified
X_new = np.array([[1, 2.9, 10, 0.2]])
prediction = knn.predict(X_new)
print(prediction)
if prediction[0] == 0.0:
print('Setosa')
elif prediction[0] == 1.0:
print('Versicolor')
else:
print('Virginica')
|
en
| 0.813926
|
# -*- coding: utf-8 -*- #K-Nearest Neighbors (KNN) Algorithm Tutorial - Machine Learning Basics * Tutorial: https://news.towardsai.net/knn * Github: https://github.com/towardsai/tutorials/tree/master/k-nearest-neighbors # Import the iris dataset as provided by the sklearn Python module # Converting sklearn data into Pandas dataframe # target variables imply # 0.0 - Setosa # 1.0 - Versicolor # 2.0 - Virginica ## Checking for outliers and imbalanced data # data is perfectly balanced # not much of outliers to br handled ## Plotting a 2-D graph ## Separating features and target # X variable contains flower features # Y variable contains target values ## Split the dataset into train and test sets # 60% of the data will be randomly selected at training data # remaining 40% as testing data # checking accuracy score for k-value rangin from 1 to 26 # model fitting and calculating accuracy score # for each k-value in the range 1-26 # 60% of the data will be randomly selected at training data # remaining 40% as testing data ## Initial model # Initial model with nearest neighbor as 1(k-value) # further, k will be replaced with optimal value ## Finding the right k-value # checking accuracy score for k-value rangin from 1 to 26 # model fitting and calculating accuracy score # for each k-value in the range 1-26 ## Accuracy for optimal k-value # 11 is the optimal k-value for this dataset ## Predicting class of new data # fitting the entire data without splitting # into train and test # new data to be classified
| 3.885892
| 4
|
python/GafferUI/MultiLineStringPlugValueWidget.py
|
ddesmond/gaffer
| 561
|
6626402
|
##########################################################################
#
# Copyright (c) 2012, <NAME>. All rights reserved.
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
## Supported Metadata :
#
# - "multiLineStringPlugValueWidget:continuousUpdate"
# - "multiLineStringPlugValueWidget:role"
class MultiLineStringPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, **kw ) :
self.__textWidget = GafferUI.MultiLineTextWidget()
GafferUI.PlugValueWidget.__init__( self, self.__textWidget, plug, **kw )
self._addPopupMenu( self.__textWidget )
self.__textWidget.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ), scoped = False )
self.__textWidget.activatedSignal().connect( Gaffer.WeakMethod( self.__setPlugValue ), scoped = False )
self.__textWidget.editingFinishedSignal().connect( Gaffer.WeakMethod( self.__setPlugValue ), scoped = False )
self.__textChangedConnection = self.__textWidget.textChangedSignal().connect( Gaffer.WeakMethod( self.__setPlugValue ), scoped = False )
self._updateFromPlug()
def textWidget( self ) :
return self.__textWidget
def _updateFromPlug( self ) :
if self.getPlug() is not None :
with self.getContext() :
try :
value = self.getPlug().getValue()
except :
value = None
if value is not None :
self.__textWidget.setText( value )
self.__textWidget.setErrored( value is None )
fixedLineHeight = Gaffer.Metadata.value( self.getPlug(), "fixedLineHeight" )
self.__textWidget.setFixedLineHeight( fixedLineHeight )
role = Gaffer.Metadata.value( self.getPlug(), "multiLineStringPlugValueWidget:role" )
role = getattr( self.__textWidget.Role, role.capitalize() ) if role else self.__textWidget.Role.Text
self.__textWidget.setRole( role )
self.__textChangedConnection.block(
not Gaffer.Metadata.value( self.getPlug(), "multiLineStringPlugValueWidget:continuousUpdate" )
)
self.__textWidget.setEditable( self._editable() )
def __keyPress( self, widget, event ) :
assert( widget is self.__textWidget )
if not self.__textWidget.getEditable() :
return False
# escape abandons everything
if event.key=="Escape" :
self._updateFromPlug()
return True
return False
def __setPlugValue( self, *unused ) :
if not self._editable() :
return
text = self.__textWidget.getText()
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
self.getPlug().setValue( text )
|
##########################################################################
#
# Copyright (c) 2012, <NAME>. All rights reserved.
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
## Supported Metadata :
#
# - "multiLineStringPlugValueWidget:continuousUpdate"
# - "multiLineStringPlugValueWidget:role"
class MultiLineStringPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, **kw ) :
self.__textWidget = GafferUI.MultiLineTextWidget()
GafferUI.PlugValueWidget.__init__( self, self.__textWidget, plug, **kw )
self._addPopupMenu( self.__textWidget )
self.__textWidget.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ), scoped = False )
self.__textWidget.activatedSignal().connect( Gaffer.WeakMethod( self.__setPlugValue ), scoped = False )
self.__textWidget.editingFinishedSignal().connect( Gaffer.WeakMethod( self.__setPlugValue ), scoped = False )
self.__textChangedConnection = self.__textWidget.textChangedSignal().connect( Gaffer.WeakMethod( self.__setPlugValue ), scoped = False )
self._updateFromPlug()
def textWidget( self ) :
return self.__textWidget
def _updateFromPlug( self ) :
if self.getPlug() is not None :
with self.getContext() :
try :
value = self.getPlug().getValue()
except :
value = None
if value is not None :
self.__textWidget.setText( value )
self.__textWidget.setErrored( value is None )
fixedLineHeight = Gaffer.Metadata.value( self.getPlug(), "fixedLineHeight" )
self.__textWidget.setFixedLineHeight( fixedLineHeight )
role = Gaffer.Metadata.value( self.getPlug(), "multiLineStringPlugValueWidget:role" )
role = getattr( self.__textWidget.Role, role.capitalize() ) if role else self.__textWidget.Role.Text
self.__textWidget.setRole( role )
self.__textChangedConnection.block(
not Gaffer.Metadata.value( self.getPlug(), "multiLineStringPlugValueWidget:continuousUpdate" )
)
self.__textWidget.setEditable( self._editable() )
def __keyPress( self, widget, event ) :
assert( widget is self.__textWidget )
if not self.__textWidget.getEditable() :
return False
# escape abandons everything
if event.key=="Escape" :
self._updateFromPlug()
return True
return False
def __setPlugValue( self, *unused ) :
if not self._editable() :
return
text = self.__textWidget.getText()
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
self.getPlug().setValue( text )
|
en
| 0.622536
|
########################################################################## # # Copyright (c) 2012, <NAME>. All rights reserved. # Copyright (c) 2012, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of <NAME> nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## ## Supported Metadata : # # - "multiLineStringPlugValueWidget:continuousUpdate" # - "multiLineStringPlugValueWidget:role" # escape abandons everything
| 0.798653
| 1
|
oop/relationships.py
|
ramkumarkrishnan/LearnPython
| 0
|
6626403
|
# Object Relationships
print ("\nAggregation - Has-A relationship")
print ("Example - Country has many persons")
class Country:
def __init__(self, name=None, population=0):
self.name = name
self.population = population
def printDetails(self):
print("Country Name:", self.name)
print("Country Population", self.population)
class Person:
def __init__(self, name, country):
self.name = name
self.country = country
def printDetails(self):
print("Person Name:", self.name)
self.country.printDetails()
c = Country("India", 1500)
p = Person("Raj", c)
p.printDetails()
del p
c.printDetails()
print ("\nComposition - Part-Of relationship")
print ("Example - A Car is composed of many parts")
class Engine:
def __init__(self, capacity=0):
self.capacity = capacity
def Start(self):
print("Engine started")
def Stop(self):
print("Engine stopped")
def printDetails(self):
print("Engine Details:", self.capacity)
class Tires:
def __init__(self, tires=0):
self.tires = tires
def printDetails(self):
print("Number of tires:", self.tires)
class Doors:
def __init__(self, doors=0):
self.doors = doors
def printDetails(self):
print("Number of doors:", self.doors)
class Car:
def __init__(self, eng, tr, dr, color):
self.eObj = Engine(eng)
self.tObj = Tires(tr)
self.dObj = Doors(dr)
self.color = color
def printDetails(self):
self.eObj.printDetails()
self.tObj.printDetails()
self.dObj.printDetails()
print("Car color:", self.color)
c = Car(1600, 4, 2, "Red")
c.printDetails()
print ("\nCars and Sedans")
print ("The Car parent only has a reference to Engine capacity attribute")
print ("and not the Engine object. So you need to add a property that")
print ("refers to the Engine object, and then reuse its methods")
class Sedan(Car):
def __init__(self, eng, tr, dr, color):
super().__init__(eng, tr, dr, color)
self.engine = Engine()
def SedanStart(self):
self.engine.Start()
def SedanStop(self):
self.engine.Stop()
s1 = Sedan(2000, 5, 4, "Metallic Blue")
s1.SedanStart()
s1.SedanStop()
s1.printDetails()
print ("\nSports Teams")
class Player:
def __init__(self, Id=None, name=None, teamName=None):
self.Id = Id
self.name = name
self.teamName = teamName
def printPlayer(self):
print("Player: " + str(self.Id) + " Name: " + str(self.name) +
" TeamName: " + str(self.teamName))
class Team:
def __init__(self, name=None, players=[]):
self.name = name
self.players = players
def addPlayer(self, player):
self.players.append(player)
def getPlayerCount(self):
return len(self.players)
def printTeam(self):
print("TeamName: " + str(self.name))
print("Players: ", self.players)
for p in self.players:
p.printPlayer()
class School:
def __init__(self, name=None, teams=[]):
self.name = name
self.teams = teams
def addTeam(self, team):
self.teams.append(team)
def printSchool(self):
print("School: ", self.name)
print("Teams: ", self.teams)
for t in self.teams:
t.printTeam()
p1 = Player("01", "Salah", "Liverpool")
p2 = Player("02", "Mane", "Liverpool")
p3 = Player("03", "Henderson", "Liverpool")
p = [p1, p2, p3]
t1 = Team("Liverpool", p)
q1 = Player("10", "Aguero", "Man City")
q2 = Player("11", "<NAME>", "Man City")
q3 = Player("12", "Mahrez", "Man City")
q = [q1, q2, q3]
t2 = Team ("Man City", q)
t = [t1, t2]
s = School("St Josephs", t)
s.printSchool()
print (t1.getPlayerCount())
print (t2.getPlayerCount())
p4 = Player("04", "<NAME>", "Liverpool")
t1.addPlayer(p4)
print (t1.getPlayerCount())
s.printSchool()
|
# Object Relationships
print ("\nAggregation - Has-A relationship")
print ("Example - Country has many persons")
class Country:
def __init__(self, name=None, population=0):
self.name = name
self.population = population
def printDetails(self):
print("Country Name:", self.name)
print("Country Population", self.population)
class Person:
def __init__(self, name, country):
self.name = name
self.country = country
def printDetails(self):
print("Person Name:", self.name)
self.country.printDetails()
c = Country("India", 1500)
p = Person("Raj", c)
p.printDetails()
del p
c.printDetails()
print ("\nComposition - Part-Of relationship")
print ("Example - A Car is composed of many parts")
class Engine:
def __init__(self, capacity=0):
self.capacity = capacity
def Start(self):
print("Engine started")
def Stop(self):
print("Engine stopped")
def printDetails(self):
print("Engine Details:", self.capacity)
class Tires:
def __init__(self, tires=0):
self.tires = tires
def printDetails(self):
print("Number of tires:", self.tires)
class Doors:
def __init__(self, doors=0):
self.doors = doors
def printDetails(self):
print("Number of doors:", self.doors)
class Car:
def __init__(self, eng, tr, dr, color):
self.eObj = Engine(eng)
self.tObj = Tires(tr)
self.dObj = Doors(dr)
self.color = color
def printDetails(self):
self.eObj.printDetails()
self.tObj.printDetails()
self.dObj.printDetails()
print("Car color:", self.color)
c = Car(1600, 4, 2, "Red")
c.printDetails()
print ("\nCars and Sedans")
print ("The Car parent only has a reference to Engine capacity attribute")
print ("and not the Engine object. So you need to add a property that")
print ("refers to the Engine object, and then reuse its methods")
class Sedan(Car):
def __init__(self, eng, tr, dr, color):
super().__init__(eng, tr, dr, color)
self.engine = Engine()
def SedanStart(self):
self.engine.Start()
def SedanStop(self):
self.engine.Stop()
s1 = Sedan(2000, 5, 4, "Metallic Blue")
s1.SedanStart()
s1.SedanStop()
s1.printDetails()
print ("\nSports Teams")
class Player:
def __init__(self, Id=None, name=None, teamName=None):
self.Id = Id
self.name = name
self.teamName = teamName
def printPlayer(self):
print("Player: " + str(self.Id) + " Name: " + str(self.name) +
" TeamName: " + str(self.teamName))
class Team:
def __init__(self, name=None, players=[]):
self.name = name
self.players = players
def addPlayer(self, player):
self.players.append(player)
def getPlayerCount(self):
return len(self.players)
def printTeam(self):
print("TeamName: " + str(self.name))
print("Players: ", self.players)
for p in self.players:
p.printPlayer()
class School:
def __init__(self, name=None, teams=[]):
self.name = name
self.teams = teams
def addTeam(self, team):
self.teams.append(team)
def printSchool(self):
print("School: ", self.name)
print("Teams: ", self.teams)
for t in self.teams:
t.printTeam()
p1 = Player("01", "Salah", "Liverpool")
p2 = Player("02", "Mane", "Liverpool")
p3 = Player("03", "Henderson", "Liverpool")
p = [p1, p2, p3]
t1 = Team("Liverpool", p)
q1 = Player("10", "Aguero", "Man City")
q2 = Player("11", "<NAME>", "Man City")
q3 = Player("12", "Mahrez", "Man City")
q = [q1, q2, q3]
t2 = Team ("Man City", q)
t = [t1, t2]
s = School("St Josephs", t)
s.printSchool()
print (t1.getPlayerCount())
print (t2.getPlayerCount())
p4 = Player("04", "<NAME>", "Liverpool")
t1.addPlayer(p4)
print (t1.getPlayerCount())
s.printSchool()
|
en
| 0.551848
|
# Object Relationships
| 4.204139
| 4
|
test/test_scp.py
|
IMRCLab/kinodynamic-motion-planning-benchmark
| 0
|
6626404
|
import sys
import os
import pytest
sys.path.append(os.getcwd() + "/../scripts")
from main_scp import run_scp
import checker
# skip all tests in this module
pytestmark = pytest.mark.skip(reason="SCP currently not supported")
def _run_check(filename_env: str, filename_guess: str, filename_result: str):
result = run_scp(filename_env,
filename_guess,
filename_result)
assert result == True
result = checker.check(filename_env, filename_result)
assert result == True
def test_unicycle_first_order_0_parallelpark_0():
_run_check("../benchmark/unicycle_first_order_0/parallelpark_0.yaml",
"../test/unicycle_first_order_0/guess_parallelpark_0_sol0.yaml",
"tmp.yaml")
def test_unicycle_first_order_0_kink_0():
_run_check("../benchmark/unicycle_first_order_0/kink_0.yaml",
"../test/unicycle_first_order_0/guess_kink_0_sol0.yaml",
"tmp.yaml")
def test_unicycle_first_order_0_bugtrap_0():
_run_check("../benchmark/unicycle_first_order_0/bugtrap_0.yaml",
"../test/unicycle_first_order_0/guess_bugtrap_0_sol0.yaml",
"tmp.yaml")
def test_unicycle_second_order_0_parallelpark_0():
_run_check("../benchmark/unicycle_second_order_0/parallelpark_0.yaml",
"../test/unicycle_second_order_0/guess_parallelpark_0_sol0.yaml",
"tmp.yaml")
def test_unicycle_second_order_0_kink_0():
_run_check("../benchmark/unicycle_second_order_0/kink_0.yaml",
"../test/unicycle_second_order_0/guess_kink_0_sol0.yaml",
"tmp.yaml")
def test_unicycle_second_order_0_bugtrap_0():
_run_check("../benchmark/unicycle_second_order_0/bugtrap_0.yaml",
"../test/unicycle_second_order_0/guess_bugtrap_0_sol0.yaml",
"tmp.yaml")
|
import sys
import os
import pytest
sys.path.append(os.getcwd() + "/../scripts")
from main_scp import run_scp
import checker
# skip all tests in this module
pytestmark = pytest.mark.skip(reason="SCP currently not supported")
def _run_check(filename_env: str, filename_guess: str, filename_result: str):
result = run_scp(filename_env,
filename_guess,
filename_result)
assert result == True
result = checker.check(filename_env, filename_result)
assert result == True
def test_unicycle_first_order_0_parallelpark_0():
_run_check("../benchmark/unicycle_first_order_0/parallelpark_0.yaml",
"../test/unicycle_first_order_0/guess_parallelpark_0_sol0.yaml",
"tmp.yaml")
def test_unicycle_first_order_0_kink_0():
_run_check("../benchmark/unicycle_first_order_0/kink_0.yaml",
"../test/unicycle_first_order_0/guess_kink_0_sol0.yaml",
"tmp.yaml")
def test_unicycle_first_order_0_bugtrap_0():
_run_check("../benchmark/unicycle_first_order_0/bugtrap_0.yaml",
"../test/unicycle_first_order_0/guess_bugtrap_0_sol0.yaml",
"tmp.yaml")
def test_unicycle_second_order_0_parallelpark_0():
_run_check("../benchmark/unicycle_second_order_0/parallelpark_0.yaml",
"../test/unicycle_second_order_0/guess_parallelpark_0_sol0.yaml",
"tmp.yaml")
def test_unicycle_second_order_0_kink_0():
_run_check("../benchmark/unicycle_second_order_0/kink_0.yaml",
"../test/unicycle_second_order_0/guess_kink_0_sol0.yaml",
"tmp.yaml")
def test_unicycle_second_order_0_bugtrap_0():
_run_check("../benchmark/unicycle_second_order_0/bugtrap_0.yaml",
"../test/unicycle_second_order_0/guess_bugtrap_0_sol0.yaml",
"tmp.yaml")
|
en
| 0.553496
|
# skip all tests in this module
| 2.367017
| 2
|
test/functional/wallet_signer.py
|
qogecoin/qogecoin
| 9
|
6626405
|
#!/usr/bin/env python3
# Copyright (c) 2017-2021 The Bitcoin and Qogecoin Core Authors
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test external signer.
Verify that a qogecoind node can use an external signer command
See also rpc_signer.py for tests without wallet context.
"""
import os
import platform
from test_framework.test_framework import QogecoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class WalletSignerTest(QogecoinTestFramework):
def mock_signer_path(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mocks', 'signer.py')
if platform.system() == "Windows":
return "py " + path
else:
return path
def mock_invalid_signer_path(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mocks', 'invalid_signer.py')
if platform.system() == "Windows":
return "py " + path
else:
return path
def set_test_params(self):
self.num_nodes = 2
# The experimental syscall sandbox feature (-sandbox) is not compatible with -signer (which
# invokes execve).
self.disable_syscall_sandbox = True
self.extra_args = [
[],
[f"-signer={self.mock_signer_path()}", '-keypool=10'],
]
def skip_test_if_missing_module(self):
self.skip_if_no_external_signer()
self.skip_if_no_wallet()
def set_mock_result(self, node, res):
with open(os.path.join(node.cwd, "mock_result"), "w", encoding="utf8") as f:
f.write(res)
def clear_mock_result(self, node):
os.remove(os.path.join(node.cwd, "mock_result"))
def run_test(self):
self.test_valid_signer()
self.restart_node(1, [f"-signer={self.mock_invalid_signer_path()}", "-keypool=10"])
self.test_invalid_signer()
def test_valid_signer(self):
self.log.debug(f"-signer={self.mock_signer_path()}")
# Create new wallets for an external signer.
# disable_private_keys and descriptors must be true:
assert_raises_rpc_error(-4, "Private keys must be disabled when using an external signer", self.nodes[1].createwallet, wallet_name='not_hww', disable_private_keys=False, descriptors=True, external_signer=True)
if self.is_bdb_compiled():
assert_raises_rpc_error(-4, "Descriptor support must be enabled when using an external signer", self.nodes[1].createwallet, wallet_name='not_hww', disable_private_keys=True, descriptors=False, external_signer=True)
else:
assert_raises_rpc_error(-4, "Compiled without bdb support (required for legacy wallets)", self.nodes[1].createwallet, wallet_name='not_hww', disable_private_keys=True, descriptors=False, external_signer=True)
self.nodes[1].createwallet(wallet_name='hww', disable_private_keys=True, descriptors=True, external_signer=True)
hww = self.nodes[1].get_wallet_rpc('hww')
assert_equal(hww.getwalletinfo()["external_signer"], True)
# Flag can't be set afterwards (could be added later for non-blank descriptor based watch-only wallets)
self.nodes[1].createwallet(wallet_name='not_hww', disable_private_keys=True, descriptors=True, external_signer=False)
not_hww = self.nodes[1].get_wallet_rpc('not_hww')
assert_equal(not_hww.getwalletinfo()["external_signer"], False)
assert_raises_rpc_error(-8, "Wallet flag is immutable: external_signer", not_hww.setwalletflag, "external_signer", True)
# assert_raises_rpc_error(-4, "Multiple signers found, please specify which to use", wallet_name='not_hww', disable_private_keys=True, descriptors=True, external_signer=True)
# TODO: Handle error thrown by script
# self.set_mock_result(self.nodes[1], "2")
# assert_raises_rpc_error(-1, 'Unable to parse JSON',
# self.nodes[1].createwallet, wallet_name='not_hww2', disable_private_keys=True, descriptors=True, external_signer=False
# )
# self.clear_mock_result(self.nodes[1])
assert_equal(hww.getwalletinfo()["keypoolsize"], 30)
address1 = hww.getnewaddress(address_type="bech32")
assert_equal(address1, "bcrt1qm90ugl4d48jv8n6e5t9ln6t9zlpm5th68x4f8g")
address_info = hww.getaddressinfo(address1)
assert_equal(address_info['solvable'], True)
assert_equal(address_info['ismine'], True)
assert_equal(address_info['hdkeypath'], "m/84'/1'/0'/0/0")
address2 = hww.getnewaddress(address_type="p2sh-segwit")
assert_equal(address2, "2N2gQKzjUe47gM8p1JZxaAkTcoHPXV6YyVp")
address_info = hww.getaddressinfo(address2)
assert_equal(address_info['solvable'], True)
assert_equal(address_info['ismine'], True)
assert_equal(address_info['hdkeypath'], "m/49'/1'/0'/0/0")
address3 = hww.getnewaddress(address_type="legacy")
assert_equal(address3, "n1LKejAadN6hg2FrBXoU1KrwX4uK16mco9")
address_info = hww.getaddressinfo(address3)
assert_equal(address_info['solvable'], True)
assert_equal(address_info['ismine'], True)
assert_equal(address_info['hdkeypath'], "m/44'/1'/0'/0/0")
self.log.info('Test walletdisplayaddress')
result = hww.walletdisplayaddress(address1)
assert_equal(result, {"address": address1})
# Handle error thrown by script
self.set_mock_result(self.nodes[1], "2")
assert_raises_rpc_error(-1, 'RunCommandParseJSON error',
hww.walletdisplayaddress, address1
)
self.clear_mock_result(self.nodes[1])
self.log.info('Prepare mock PSBT')
self.nodes[0].sendtoaddress(address1, 1)
self.generate(self.nodes[0], 1)
# Load private key into wallet to generate a signed PSBT for the mock
self.nodes[1].createwallet(wallet_name="mock", disable_private_keys=False, blank=True, descriptors=True)
mock_wallet = self.nodes[1].get_wallet_rpc("mock")
assert mock_wallet.getwalletinfo()['private_keys_enabled']
result = mock_wallet.importdescriptors([{
"desc": "wpkh([00000001/84'/1'/0']tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0/*)#rweraev0",
"timestamp": 0,
"range": [0,1],
"internal": False,
"active": True
},
{
"desc": "wpkh([00000001/84'/1'/0']tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/*)#j6uzqvuh",
"timestamp": 0,
"range": [0, 0],
"internal": True,
"active": True
}])
assert_equal(result[0], {'success': True})
assert_equal(result[1], {'success': True})
assert_equal(mock_wallet.getwalletinfo()["txcount"], 1)
dest = self.nodes[0].getnewaddress(address_type='bech32')
mock_psbt = mock_wallet.walletcreatefundedpsbt([], {dest:0.5}, 0, {}, True)['psbt']
mock_psbt_signed = mock_wallet.walletprocesspsbt(psbt=mock_psbt, sign=True, sighashtype="ALL", bip32derivs=True)
mock_psbt_final = mock_wallet.finalizepsbt(mock_psbt_signed["psbt"])
mock_tx = mock_psbt_final["hex"]
assert(mock_wallet.testmempoolaccept([mock_tx])[0]["allowed"])
# # Create a new wallet and populate with specific public keys, in order
# # to work with the mock signed PSBT.
# self.nodes[1].createwallet(wallet_name="hww4", disable_private_keys=True, descriptors=True, external_signer=True)
# hww4 = self.nodes[1].get_wallet_rpc("hww4")
#
# descriptors = [{
# "desc": "wpkh([00000001/84'/1'/0']tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/0/*)#x30uthjs",
# "timestamp": "now",
# "range": [0, 1],
# "internal": False,
# "watchonly": True,
# "active": True
# },
# {
# "desc": "wpkh([00000001/84'/1'/0']tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/*)#h92akzzg",
# "timestamp": "now",
# "range": [0, 0],
# "internal": True,
# "watchonly": True,
# "active": True
# }]
# result = hww4.importdescriptors(descriptors)
# assert_equal(result[0], {'success': True})
# assert_equal(result[1], {'success': True})
assert_equal(hww.getwalletinfo()["txcount"], 1)
assert(hww.testmempoolaccept([mock_tx])[0]["allowed"])
with open(os.path.join(self.nodes[1].cwd, "mock_psbt"), "w", encoding="utf8") as f:
f.write(mock_psbt_signed["psbt"])
self.log.info('Test send using hww1')
res = hww.send(outputs={dest:0.5},options={"add_to_wallet": False})
assert(res["complete"])
assert_equal(res["hex"], mock_tx)
self.log.info('Test sendall using hww1')
res = hww.sendall(recipients=[{dest:0.5}, hww.getrawchangeaddress()],options={"add_to_wallet": False})
assert(res["complete"])
assert_equal(res["hex"], mock_tx)
# # Handle error thrown by script
# self.set_mock_result(self.nodes[4], "2")
# assert_raises_rpc_error(-1, 'Unable to parse JSON',
# hww4.signerprocesspsbt, psbt_orig, "00000001"
# )
# self.clear_mock_result(self.nodes[4])
def test_invalid_signer(self):
self.log.debug(f"-signer={self.mock_invalid_signer_path()}")
self.log.info('Test invalid external signer')
assert_raises_rpc_error(-1, "Invalid descriptor", self.nodes[1].createwallet, wallet_name='hww_invalid', disable_private_keys=True, descriptors=True, external_signer=True)
if __name__ == '__main__':
WalletSignerTest().main()
|
#!/usr/bin/env python3
# Copyright (c) 2017-2021 The Bitcoin and Qogecoin Core Authors
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test external signer.
Verify that a qogecoind node can use an external signer command
See also rpc_signer.py for tests without wallet context.
"""
import os
import platform
from test_framework.test_framework import QogecoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class WalletSignerTest(QogecoinTestFramework):
def mock_signer_path(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mocks', 'signer.py')
if platform.system() == "Windows":
return "py " + path
else:
return path
def mock_invalid_signer_path(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mocks', 'invalid_signer.py')
if platform.system() == "Windows":
return "py " + path
else:
return path
def set_test_params(self):
self.num_nodes = 2
# The experimental syscall sandbox feature (-sandbox) is not compatible with -signer (which
# invokes execve).
self.disable_syscall_sandbox = True
self.extra_args = [
[],
[f"-signer={self.mock_signer_path()}", '-keypool=10'],
]
def skip_test_if_missing_module(self):
self.skip_if_no_external_signer()
self.skip_if_no_wallet()
def set_mock_result(self, node, res):
with open(os.path.join(node.cwd, "mock_result"), "w", encoding="utf8") as f:
f.write(res)
def clear_mock_result(self, node):
os.remove(os.path.join(node.cwd, "mock_result"))
def run_test(self):
self.test_valid_signer()
self.restart_node(1, [f"-signer={self.mock_invalid_signer_path()}", "-keypool=10"])
self.test_invalid_signer()
def test_valid_signer(self):
self.log.debug(f"-signer={self.mock_signer_path()}")
# Create new wallets for an external signer.
# disable_private_keys and descriptors must be true:
assert_raises_rpc_error(-4, "Private keys must be disabled when using an external signer", self.nodes[1].createwallet, wallet_name='not_hww', disable_private_keys=False, descriptors=True, external_signer=True)
if self.is_bdb_compiled():
assert_raises_rpc_error(-4, "Descriptor support must be enabled when using an external signer", self.nodes[1].createwallet, wallet_name='not_hww', disable_private_keys=True, descriptors=False, external_signer=True)
else:
assert_raises_rpc_error(-4, "Compiled without bdb support (required for legacy wallets)", self.nodes[1].createwallet, wallet_name='not_hww', disable_private_keys=True, descriptors=False, external_signer=True)
self.nodes[1].createwallet(wallet_name='hww', disable_private_keys=True, descriptors=True, external_signer=True)
hww = self.nodes[1].get_wallet_rpc('hww')
assert_equal(hww.getwalletinfo()["external_signer"], True)
# Flag can't be set afterwards (could be added later for non-blank descriptor based watch-only wallets)
self.nodes[1].createwallet(wallet_name='not_hww', disable_private_keys=True, descriptors=True, external_signer=False)
not_hww = self.nodes[1].get_wallet_rpc('not_hww')
assert_equal(not_hww.getwalletinfo()["external_signer"], False)
assert_raises_rpc_error(-8, "Wallet flag is immutable: external_signer", not_hww.setwalletflag, "external_signer", True)
# assert_raises_rpc_error(-4, "Multiple signers found, please specify which to use", wallet_name='not_hww', disable_private_keys=True, descriptors=True, external_signer=True)
# TODO: Handle error thrown by script
# self.set_mock_result(self.nodes[1], "2")
# assert_raises_rpc_error(-1, 'Unable to parse JSON',
# self.nodes[1].createwallet, wallet_name='not_hww2', disable_private_keys=True, descriptors=True, external_signer=False
# )
# self.clear_mock_result(self.nodes[1])
assert_equal(hww.getwalletinfo()["keypoolsize"], 30)
address1 = hww.getnewaddress(address_type="bech32")
assert_equal(address1, "bcrt1qm90ugl4d48jv8n6e5t9ln6t9zlpm5th68x4f8g")
address_info = hww.getaddressinfo(address1)
assert_equal(address_info['solvable'], True)
assert_equal(address_info['ismine'], True)
assert_equal(address_info['hdkeypath'], "m/84'/1'/0'/0/0")
address2 = hww.getnewaddress(address_type="p2sh-segwit")
assert_equal(address2, "2N2gQKzjUe47gM8p1JZxaAkTcoHPXV6YyVp")
address_info = hww.getaddressinfo(address2)
assert_equal(address_info['solvable'], True)
assert_equal(address_info['ismine'], True)
assert_equal(address_info['hdkeypath'], "m/49'/1'/0'/0/0")
address3 = hww.getnewaddress(address_type="legacy")
assert_equal(address3, "n1LKejAadN6hg2FrBXoU1KrwX4uK16mco9")
address_info = hww.getaddressinfo(address3)
assert_equal(address_info['solvable'], True)
assert_equal(address_info['ismine'], True)
assert_equal(address_info['hdkeypath'], "m/44'/1'/0'/0/0")
self.log.info('Test walletdisplayaddress')
result = hww.walletdisplayaddress(address1)
assert_equal(result, {"address": address1})
# Handle error thrown by script
self.set_mock_result(self.nodes[1], "2")
assert_raises_rpc_error(-1, 'RunCommandParseJSON error',
hww.walletdisplayaddress, address1
)
self.clear_mock_result(self.nodes[1])
self.log.info('Prepare mock PSBT')
self.nodes[0].sendtoaddress(address1, 1)
self.generate(self.nodes[0], 1)
# Load private key into wallet to generate a signed PSBT for the mock
self.nodes[1].createwallet(wallet_name="mock", disable_private_keys=False, blank=True, descriptors=True)
mock_wallet = self.nodes[1].get_wallet_rpc("mock")
assert mock_wallet.getwalletinfo()['private_keys_enabled']
result = mock_wallet.importdescriptors([{
"desc": "wpkh([00000001/84'/1'/0']tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0/*)#rweraev0",
"timestamp": 0,
"range": [0,1],
"internal": False,
"active": True
},
{
"desc": "wpkh([00000001/84'/1'/0']tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/*)#j6uzqvuh",
"timestamp": 0,
"range": [0, 0],
"internal": True,
"active": True
}])
assert_equal(result[0], {'success': True})
assert_equal(result[1], {'success': True})
assert_equal(mock_wallet.getwalletinfo()["txcount"], 1)
dest = self.nodes[0].getnewaddress(address_type='bech32')
mock_psbt = mock_wallet.walletcreatefundedpsbt([], {dest:0.5}, 0, {}, True)['psbt']
mock_psbt_signed = mock_wallet.walletprocesspsbt(psbt=mock_psbt, sign=True, sighashtype="ALL", bip32derivs=True)
mock_psbt_final = mock_wallet.finalizepsbt(mock_psbt_signed["psbt"])
mock_tx = mock_psbt_final["hex"]
assert(mock_wallet.testmempoolaccept([mock_tx])[0]["allowed"])
# # Create a new wallet and populate with specific public keys, in order
# # to work with the mock signed PSBT.
# self.nodes[1].createwallet(wallet_name="hww4", disable_private_keys=True, descriptors=True, external_signer=True)
# hww4 = self.nodes[1].get_wallet_rpc("hww4")
#
# descriptors = [{
# "desc": "wpkh([00000001/84'/1'/0']tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/0/*)#x30uthjs",
# "timestamp": "now",
# "range": [0, 1],
# "internal": False,
# "watchonly": True,
# "active": True
# },
# {
# "desc": "wpkh([00000001/84'/1'/0']tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/*)#h92akzzg",
# "timestamp": "now",
# "range": [0, 0],
# "internal": True,
# "watchonly": True,
# "active": True
# }]
# result = hww4.importdescriptors(descriptors)
# assert_equal(result[0], {'success': True})
# assert_equal(result[1], {'success': True})
assert_equal(hww.getwalletinfo()["txcount"], 1)
assert(hww.testmempoolaccept([mock_tx])[0]["allowed"])
with open(os.path.join(self.nodes[1].cwd, "mock_psbt"), "w", encoding="utf8") as f:
f.write(mock_psbt_signed["psbt"])
self.log.info('Test send using hww1')
res = hww.send(outputs={dest:0.5},options={"add_to_wallet": False})
assert(res["complete"])
assert_equal(res["hex"], mock_tx)
self.log.info('Test sendall using hww1')
res = hww.sendall(recipients=[{dest:0.5}, hww.getrawchangeaddress()],options={"add_to_wallet": False})
assert(res["complete"])
assert_equal(res["hex"], mock_tx)
# # Handle error thrown by script
# self.set_mock_result(self.nodes[4], "2")
# assert_raises_rpc_error(-1, 'Unable to parse JSON',
# hww4.signerprocesspsbt, psbt_orig, "00000001"
# )
# self.clear_mock_result(self.nodes[4])
def test_invalid_signer(self):
self.log.debug(f"-signer={self.mock_invalid_signer_path()}")
self.log.info('Test invalid external signer')
assert_raises_rpc_error(-1, "Invalid descriptor", self.nodes[1].createwallet, wallet_name='hww_invalid', disable_private_keys=True, descriptors=True, external_signer=True)
if __name__ == '__main__':
WalletSignerTest().main()
|
en
| 0.460329
|
#!/usr/bin/env python3 # Copyright (c) 2017-2021 The Bitcoin and Qogecoin Core Authors # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. Test external signer. Verify that a qogecoind node can use an external signer command See also rpc_signer.py for tests without wallet context. # The experimental syscall sandbox feature (-sandbox) is not compatible with -signer (which # invokes execve). # Create new wallets for an external signer. # disable_private_keys and descriptors must be true: # Flag can't be set afterwards (could be added later for non-blank descriptor based watch-only wallets) # assert_raises_rpc_error(-4, "Multiple signers found, please specify which to use", wallet_name='not_hww', disable_private_keys=True, descriptors=True, external_signer=True) # TODO: Handle error thrown by script # self.set_mock_result(self.nodes[1], "2") # assert_raises_rpc_error(-1, 'Unable to parse JSON', # self.nodes[1].createwallet, wallet_name='not_hww2', disable_private_keys=True, descriptors=True, external_signer=False # ) # self.clear_mock_result(self.nodes[1]) # Handle error thrown by script # Load private key into wallet to generate a signed PSBT for the mock #rweraev0", #j6uzqvuh", # # Create a new wallet and populate with specific public keys, in order # # to work with the mock signed PSBT. # self.nodes[1].createwallet(wallet_name="hww4", disable_private_keys=True, descriptors=True, external_signer=True) # hww4 = self.nodes[1].get_wallet_rpc("hww4") # # descriptors = [{ # "desc": "wpkh([00000001/84'/1'/0']tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/0/*)#x30uthjs", # "timestamp": "now", # "range": [0, 1], # "internal": False, # "watchonly": True, # "active": True # }, # { # "desc": "wpkh([00000001/84'/1'/0']tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/*)#h92akzzg", # "timestamp": "now", # "range": [0, 0], # "internal": True, # "watchonly": True, # "active": True # }] # result = hww4.importdescriptors(descriptors) # assert_equal(result[0], {'success': True}) # assert_equal(result[1], {'success': True}) # # Handle error thrown by script # self.set_mock_result(self.nodes[4], "2") # assert_raises_rpc_error(-1, 'Unable to parse JSON', # hww4.signerprocesspsbt, psbt_orig, "00000001" # ) # self.clear_mock_result(self.nodes[4])
| 2.153908
| 2
|
settings.py
|
cajohnst/Optimized_FX_Portfolio
| 11
|
6626406
|
<reponame>cajohnst/Optimized_FX_Portfolio
#Set variables
import datetime
from datetime import date, timedelta
# authorization key for quandl data
auth_tok = "<KEY>"
# Input last day to get returns data for (default is today)
end_date = datetime.date.today() - timedelta(3)
# Input original portfolio value, used for VaR calculations
portfolio_value = 1000
# Input number of days to calculate back returns
num_days_optimal_portfolio = 200
#Compute returns with shift percentage change delay (daily = 1)
shift = 1
# Input Leverage
leverage = 10
# Input Rolling Period for moving averages
rolling_period = 50
# Input minimum desired return for portfolio optimization
rminimum = 100/float(252)
# Input risk free interest rate
interest_rate = 2/ float(365)
# Input interval for displaying changes in the weight distribution over time for distribution chart (daily=1, weekly=5)
distribution_interval = 5
# Number of days to pull data for
# ** this num_days is a different value than that used in other files **
num_days_regression = 720
# ** Before this date, daily high/low data is unreliable and therefore the Stochastic calculation is unreliable **
stoch_date = datetime.date(2016, 7, 15)
# Number of random portfolios in Optimize_FX_Portfolio
n_portfolios = 5000
#Number of days worth of data useable for charts or regression analysis
num_days_charts = 100
#q = avg. periods for gain/loss
q = 14
# On the scale from 0-100, this level is considered to be "overbought" by RSI, typical value is 70
Overbought = 70
#On the scale from 0-100, this level is considered to be "oversold" by RSI, typical value is 30
Oversold = 30
#Determine the moving average windows for MACD, moving average convergence divergence, as measured by
#the difference between slow and fast exponentially weighted moving averages compared to the fastest of
#the three. Levels are typically 26 for slow, 12 for fast, and 9 for fastest
nslow = 26
nfast = 12
nema = 9
#Determine windows for simple moving averages to be overlayed on the exchange rate chart. Levels vary, but widely-used
#rolling averages include 10, 20, 50, 100, and 200 day averages
ma_slow = 100
ma_fast = 20
#Determine windows for stochastics. A typical window is 14 periods. N is the number of windows. D is the "slow" stochastic window
#typically a 3- period moving average of the fast stochastic
n = 14
d = 3
# RSI overbought or oversold
Overbought_S = 80
Oversold_S = 20
# Number of bins for VaR histogram in Daily_Reports
num_bins = 25
|
#Set variables
import datetime
from datetime import date, timedelta
# authorization key for quandl data
auth_tok = "<KEY>"
# Input last day to get returns data for (default is today)
end_date = datetime.date.today() - timedelta(3)
# Input original portfolio value, used for VaR calculations
portfolio_value = 1000
# Input number of days to calculate back returns
num_days_optimal_portfolio = 200
#Compute returns with shift percentage change delay (daily = 1)
shift = 1
# Input Leverage
leverage = 10
# Input Rolling Period for moving averages
rolling_period = 50
# Input minimum desired return for portfolio optimization
rminimum = 100/float(252)
# Input risk free interest rate
interest_rate = 2/ float(365)
# Input interval for displaying changes in the weight distribution over time for distribution chart (daily=1, weekly=5)
distribution_interval = 5
# Number of days to pull data for
# ** this num_days is a different value than that used in other files **
num_days_regression = 720
# ** Before this date, daily high/low data is unreliable and therefore the Stochastic calculation is unreliable **
stoch_date = datetime.date(2016, 7, 15)
# Number of random portfolios in Optimize_FX_Portfolio
n_portfolios = 5000
#Number of days worth of data useable for charts or regression analysis
num_days_charts = 100
#q = avg. periods for gain/loss
q = 14
# On the scale from 0-100, this level is considered to be "overbought" by RSI, typical value is 70
Overbought = 70
#On the scale from 0-100, this level is considered to be "oversold" by RSI, typical value is 30
Oversold = 30
#Determine the moving average windows for MACD, moving average convergence divergence, as measured by
#the difference between slow and fast exponentially weighted moving averages compared to the fastest of
#the three. Levels are typically 26 for slow, 12 for fast, and 9 for fastest
nslow = 26
nfast = 12
nema = 9
#Determine windows for simple moving averages to be overlayed on the exchange rate chart. Levels vary, but widely-used
#rolling averages include 10, 20, 50, 100, and 200 day averages
ma_slow = 100
ma_fast = 20
#Determine windows for stochastics. A typical window is 14 periods. N is the number of windows. D is the "slow" stochastic window
#typically a 3- period moving average of the fast stochastic
n = 14
d = 3
# RSI overbought or oversold
Overbought_S = 80
Oversold_S = 20
# Number of bins for VaR histogram in Daily_Reports
num_bins = 25
|
en
| 0.87396
|
#Set variables # authorization key for quandl data # Input last day to get returns data for (default is today) # Input original portfolio value, used for VaR calculations # Input number of days to calculate back returns #Compute returns with shift percentage change delay (daily = 1) # Input Leverage # Input Rolling Period for moving averages # Input minimum desired return for portfolio optimization # Input risk free interest rate # Input interval for displaying changes in the weight distribution over time for distribution chart (daily=1, weekly=5) # Number of days to pull data for # ** this num_days is a different value than that used in other files ** # ** Before this date, daily high/low data is unreliable and therefore the Stochastic calculation is unreliable ** # Number of random portfolios in Optimize_FX_Portfolio #Number of days worth of data useable for charts or regression analysis #q = avg. periods for gain/loss # On the scale from 0-100, this level is considered to be "overbought" by RSI, typical value is 70 #On the scale from 0-100, this level is considered to be "oversold" by RSI, typical value is 30 #Determine the moving average windows for MACD, moving average convergence divergence, as measured by #the difference between slow and fast exponentially weighted moving averages compared to the fastest of #the three. Levels are typically 26 for slow, 12 for fast, and 9 for fastest #Determine windows for simple moving averages to be overlayed on the exchange rate chart. Levels vary, but widely-used #rolling averages include 10, 20, 50, 100, and 200 day averages #Determine windows for stochastics. A typical window is 14 periods. N is the number of windows. D is the "slow" stochastic window #typically a 3- period moving average of the fast stochastic # RSI overbought or oversold # Number of bins for VaR histogram in Daily_Reports
| 2.494443
| 2
|
final_pipeline/main.py
|
pjrule/covid-path-planning
| 5
|
6626407
|
# Main script to solve the UV Light optimization problem
import pandas as pd
from room import Room
from polygon_extraction import extract_polygon, construct_isValidLocation_function
from lp_solver import solve_full_lp, visualize_times, solve_naive, visualize_energy, visualize_distance
from shapely.geometry import box
import matplotlib.pyplot as plt
from shapely.ops import transform
import sys
######################
### Parameters ###
######################
# I/O Files
#INPUT_FILE = '../floor_plans/hrilab_2510_sled.pgm'
#INPUT_YAML = '../floor_plans/hrilab_2510_sled.yaml'
INPUT_FILE = '../floor_plans/2560.pgm'
INPUT_YAML = '../floor_plans/2560.yaml'
OUTPUT_CSV = '../output/waiting_times.csv'
# Environment parameters
ROBOT_HEIGHT = 1.2192 # Height of UV light, in meters
ROBOT_RADIUS = 0.4 # Distance from robot center to farthest point,
# in meters
ROBOT_WATTAGE = 55 # Power of the UV light in Watts (ie. J/sec)
DISINFECTION_THRESHOLD = 1206 # Joules/meter^2
# Preprocessing parameters
ORTHOGONAL_TOL = 40 # Tolerance for orthogonal simplification, in pixels
AVOID_UNKNOWN_REGIONS = True # Treat "unknown" pixels as walls when determining
# the spaces that the robot can move to
# Algorithm parameters. See documentation for the different variations
naive_solution = False
use_weak_everything = False # Compute a lower bound on the time for a solution
# Overrides use_strong_visibility and use_strong_distances
use_strong_visibility = True
use_strong_distances = False
use_shadow = False
scaling_method = 'branch_and_bound' # must be in {'epsilon', 'branch_and_bound', 'none'}
ROBOT_EPSILON = 0.2 # Size of grid for discretization of possible robot
# locations, in meters
ROOM_EPSILON = 0.2 # Size of grid for discretization of locations to
# disinfect, in meters
# Smaller epsilon values guarantee that we find a
# solution closer to optimal, assuming infinite speed
# The right value should be determined experimentally
show_visualizations = False
############################
### Compute Solution ###
############################
# Step 1: read input file (pixel-like image) and transform it to a simple polygon
# (with clearly marked in/out)
print('Extracting polygon')
polygon_data = extract_polygon(INPUT_FILE,
INPUT_YAML,
ortho_tolerance = ORTHOGONAL_TOL,
show_visualization = show_visualizations)
polygon, gray_img, xy_to_pixel, meters_per_pixel = polygon_data
is_valid_location = construct_isValidLocation_function(gray_img,
xy_to_pixel,
ROBOT_RADIUS,
meters_per_pixel,
AVOID_UNKNOWN_REGIONS)
# Step 2: a Room object contains not only the boundary, but creates a
# discretized list of places for the robot to guard (and list
# of places where the robot can actually move to)
print('Creating room')
room = Room(polygon,
gray_img,
xy_to_pixel,
robot_buffer_meters = ROBOT_RADIUS,
is_valid_guard = is_valid_location,
room_eps = ROOM_EPSILON,
guard_eps = ROBOT_EPSILON,
show_visualization = show_visualizations)
if naive_solution:
solve_naive(room, ROBOT_HEIGHT, DISINFECTION_THRESHOLD)
else:
# Step 3: we generate the LP problem and solve it.
print('Solving lp')
lp_solution_data = solve_full_lp(room,
ROBOT_HEIGHT,
ROBOT_RADIUS,
ROBOT_WATTAGE,
use_weak_everything,
use_strong_visibility,
use_strong_distances,
scaling_method,
DISINFECTION_THRESHOLD,
show_visualizations,
use_shadow)
time, waiting_times, intensities, unguarded_room_idx, _, percent_disinfected = lp_solution_data
# Step 4: Output a solution
print("-"*80)
print('Total solution time:', time)
print('Percent Disinfected:', percent_disinfected)
print("-"*80)
# Create a csv of all positions and waiting time
rows = []
for (x, y), t in zip(room.guard_grid, waiting_times):
# We drop points that you stop less than a milisecond. HARDCODED
if t > 1e-3:
rows.append({'x': x, 'y': y, 'time': t})
pd.DataFrame(rows).to_csv(OUTPUT_CSV, index=False)
# Graphical visualizations of the solution
if show_visualizations:
print('Visualizing solution')
visualize_times(room, waiting_times, unguarded_room_idx)
visualize_energy(room, waiting_times, intensities, DISINFECTION_THRESHOLD)
visualize_distance(room, waiting_times, intensities)
|
# Main script to solve the UV Light optimization problem
import pandas as pd
from room import Room
from polygon_extraction import extract_polygon, construct_isValidLocation_function
from lp_solver import solve_full_lp, visualize_times, solve_naive, visualize_energy, visualize_distance
from shapely.geometry import box
import matplotlib.pyplot as plt
from shapely.ops import transform
import sys
######################
### Parameters ###
######################
# I/O Files
#INPUT_FILE = '../floor_plans/hrilab_2510_sled.pgm'
#INPUT_YAML = '../floor_plans/hrilab_2510_sled.yaml'
INPUT_FILE = '../floor_plans/2560.pgm'
INPUT_YAML = '../floor_plans/2560.yaml'
OUTPUT_CSV = '../output/waiting_times.csv'
# Environment parameters
ROBOT_HEIGHT = 1.2192 # Height of UV light, in meters
ROBOT_RADIUS = 0.4 # Distance from robot center to farthest point,
# in meters
ROBOT_WATTAGE = 55 # Power of the UV light in Watts (ie. J/sec)
DISINFECTION_THRESHOLD = 1206 # Joules/meter^2
# Preprocessing parameters
ORTHOGONAL_TOL = 40 # Tolerance for orthogonal simplification, in pixels
AVOID_UNKNOWN_REGIONS = True # Treat "unknown" pixels as walls when determining
# the spaces that the robot can move to
# Algorithm parameters. See documentation for the different variations
naive_solution = False
use_weak_everything = False # Compute a lower bound on the time for a solution
# Overrides use_strong_visibility and use_strong_distances
use_strong_visibility = True
use_strong_distances = False
use_shadow = False
scaling_method = 'branch_and_bound' # must be in {'epsilon', 'branch_and_bound', 'none'}
ROBOT_EPSILON = 0.2 # Size of grid for discretization of possible robot
# locations, in meters
ROOM_EPSILON = 0.2 # Size of grid for discretization of locations to
# disinfect, in meters
# Smaller epsilon values guarantee that we find a
# solution closer to optimal, assuming infinite speed
# The right value should be determined experimentally
show_visualizations = False
############################
### Compute Solution ###
############################
# Step 1: read input file (pixel-like image) and transform it to a simple polygon
# (with clearly marked in/out)
print('Extracting polygon')
polygon_data = extract_polygon(INPUT_FILE,
INPUT_YAML,
ortho_tolerance = ORTHOGONAL_TOL,
show_visualization = show_visualizations)
polygon, gray_img, xy_to_pixel, meters_per_pixel = polygon_data
is_valid_location = construct_isValidLocation_function(gray_img,
xy_to_pixel,
ROBOT_RADIUS,
meters_per_pixel,
AVOID_UNKNOWN_REGIONS)
# Step 2: a Room object contains not only the boundary, but creates a
# discretized list of places for the robot to guard (and list
# of places where the robot can actually move to)
print('Creating room')
room = Room(polygon,
gray_img,
xy_to_pixel,
robot_buffer_meters = ROBOT_RADIUS,
is_valid_guard = is_valid_location,
room_eps = ROOM_EPSILON,
guard_eps = ROBOT_EPSILON,
show_visualization = show_visualizations)
if naive_solution:
solve_naive(room, ROBOT_HEIGHT, DISINFECTION_THRESHOLD)
else:
# Step 3: we generate the LP problem and solve it.
print('Solving lp')
lp_solution_data = solve_full_lp(room,
ROBOT_HEIGHT,
ROBOT_RADIUS,
ROBOT_WATTAGE,
use_weak_everything,
use_strong_visibility,
use_strong_distances,
scaling_method,
DISINFECTION_THRESHOLD,
show_visualizations,
use_shadow)
time, waiting_times, intensities, unguarded_room_idx, _, percent_disinfected = lp_solution_data
# Step 4: Output a solution
print("-"*80)
print('Total solution time:', time)
print('Percent Disinfected:', percent_disinfected)
print("-"*80)
# Create a csv of all positions and waiting time
rows = []
for (x, y), t in zip(room.guard_grid, waiting_times):
# We drop points that you stop less than a milisecond. HARDCODED
if t > 1e-3:
rows.append({'x': x, 'y': y, 'time': t})
pd.DataFrame(rows).to_csv(OUTPUT_CSV, index=False)
# Graphical visualizations of the solution
if show_visualizations:
print('Visualizing solution')
visualize_times(room, waiting_times, unguarded_room_idx)
visualize_energy(room, waiting_times, intensities, DISINFECTION_THRESHOLD)
visualize_distance(room, waiting_times, intensities)
|
en
| 0.723006
|
# Main script to solve the UV Light optimization problem ###################### ### Parameters ### ###################### # I/O Files #INPUT_FILE = '../floor_plans/hrilab_2510_sled.pgm' #INPUT_YAML = '../floor_plans/hrilab_2510_sled.yaml' # Environment parameters # Height of UV light, in meters # Distance from robot center to farthest point, # in meters # Power of the UV light in Watts (ie. J/sec) # Joules/meter^2 # Preprocessing parameters # Tolerance for orthogonal simplification, in pixels # Treat "unknown" pixels as walls when determining # the spaces that the robot can move to # Algorithm parameters. See documentation for the different variations # Compute a lower bound on the time for a solution # Overrides use_strong_visibility and use_strong_distances # must be in {'epsilon', 'branch_and_bound', 'none'} # Size of grid for discretization of possible robot # locations, in meters # Size of grid for discretization of locations to # disinfect, in meters # Smaller epsilon values guarantee that we find a # solution closer to optimal, assuming infinite speed # The right value should be determined experimentally ############################ ### Compute Solution ### ############################ # Step 1: read input file (pixel-like image) and transform it to a simple polygon # (with clearly marked in/out) # Step 2: a Room object contains not only the boundary, but creates a # discretized list of places for the robot to guard (and list # of places where the robot can actually move to) # Step 3: we generate the LP problem and solve it. # Step 4: Output a solution # Create a csv of all positions and waiting time # We drop points that you stop less than a milisecond. HARDCODED # Graphical visualizations of the solution
| 2.572122
| 3
|
neighborhood_data/add_non_ecc.py
|
trinity-gao/echo-locator
| 2
|
6626408
|
<reponame>trinity-gao/echo-locator
#!/usr/bin/env python
# encoding=utf8
"""
Produces a GeoJSON file of the ECC neighborhood points with associated data.
Expects `add_zcta_centroids.py` was already run to identify the points.
"""
import csv
import errno
import os
ECC_NEIGHBORHOOD_CSV = 'ecc_neighborhoods.csv'
NON_ECC_NEIGHBORHOOD_CSV = 'non_ecc_max_subsidies.csv'
OUTPUT_FILE = 'neighborhoods.csv'
if not os.path.isfile(ECC_NEIGHBORHOOD_CSV):
print('\nMissing input file {f}.\n\n'.format(f=ECC_NEIGHBORHOOD_CSV))
raise IOError(errno.ENOENT,
os.strerror(errno.ENOENT),
ECC_NEIGHBORHOOD_CSV)
if not os.path.isfile(NON_ECC_NEIGHBORHOOD_CSV):
print('\nMissing input file {f}.\n\n'.format(f=NON_ECC_NEIGHBORHOOD_CSV))
raise IOError(errno.ENOENT,
os.strerror(errno.ENOENT),
NON_ECC_NEIGHBORHOOD_CSV)
# Read CSVs of neighborhoods, keyed by zip code.
ecc_zips = {}
with open(ECC_NEIGHBORHOOD_CSV) as df:
rdr = csv.DictReader(df)
ecc_fieldnames = list(rdr.fieldnames)
ecc_fieldnames.remove('zipcode')
for row in rdr:
zipcode = row['zipcode'].zfill(5)
row.pop('zipcode')
ecc_zips[zipcode] = row
non_ecc_zips = {}
with open(NON_ECC_NEIGHBORHOOD_CSV) as df:
rdr = csv.DictReader(df)
non_ecc_fieldnames = list(rdr.fieldnames)
non_ecc_fieldnames.remove('zipcode')
for row in rdr:
zipcode = row['zipcode'].zfill(5)
row.pop('zipcode')
non_ecc_zips[zipcode] = row
IGNORE_NON_ECC_COLUMNS = ['education_percentile_previous', 'zviolentcrimeflip']
for col in IGNORE_NON_ECC_COLUMNS:
non_ecc_fieldnames.remove(col)
joined_fieldnames = list(set(non_ecc_fieldnames).union(set(ecc_fieldnames)))
joined_zips = non_ecc_zips.copy()
# Add the fields from the ECC CSV to the combination ECC and non-ECC
for zipcode in joined_zips:
joined_zips[zipcode]['zipcode'] = zipcode
for col in IGNORE_NON_ECC_COLUMNS:
joined_zips[zipcode].pop(col)
if zipcode in ecc_zips:
for fld in ecc_fieldnames:
joined_zips[zipcode]['ecc'] = 1
joined_zips[zipcode][fld] = ecc_zips[zipcode][fld]
else:
is_ecc = joined_zips[zipcode]['ecc'] and (joined_zips[zipcode]['ecc'] == '1' or
joined_zips[zipcode]['ecc'] == 1)
joined_zips[zipcode]['ecc'] = 1 if is_ecc else 0
for fld in ecc_fieldnames:
if fld not in joined_zips[zipcode]:
joined_zips[zipcode][fld] = ''
joined_fieldnames.append('zipcode')
with open(OUTPUT_FILE, 'w') as outf:
wtr = csv.DictWriter(outf, fieldnames=joined_fieldnames)
wtr.writeheader()
wtr.writerows(joined_zips.values())
print('\nAll done writing {n} neighborhoods to {f}.'.format(n=len(joined_zips.keys()),
f=OUTPUT_FILE))
|
#!/usr/bin/env python
# encoding=utf8
"""
Produces a GeoJSON file of the ECC neighborhood points with associated data.
Expects `add_zcta_centroids.py` was already run to identify the points.
"""
import csv
import errno
import os
ECC_NEIGHBORHOOD_CSV = 'ecc_neighborhoods.csv'
NON_ECC_NEIGHBORHOOD_CSV = 'non_ecc_max_subsidies.csv'
OUTPUT_FILE = 'neighborhoods.csv'
if not os.path.isfile(ECC_NEIGHBORHOOD_CSV):
print('\nMissing input file {f}.\n\n'.format(f=ECC_NEIGHBORHOOD_CSV))
raise IOError(errno.ENOENT,
os.strerror(errno.ENOENT),
ECC_NEIGHBORHOOD_CSV)
if not os.path.isfile(NON_ECC_NEIGHBORHOOD_CSV):
print('\nMissing input file {f}.\n\n'.format(f=NON_ECC_NEIGHBORHOOD_CSV))
raise IOError(errno.ENOENT,
os.strerror(errno.ENOENT),
NON_ECC_NEIGHBORHOOD_CSV)
# Read CSVs of neighborhoods, keyed by zip code.
ecc_zips = {}
with open(ECC_NEIGHBORHOOD_CSV) as df:
rdr = csv.DictReader(df)
ecc_fieldnames = list(rdr.fieldnames)
ecc_fieldnames.remove('zipcode')
for row in rdr:
zipcode = row['zipcode'].zfill(5)
row.pop('zipcode')
ecc_zips[zipcode] = row
non_ecc_zips = {}
with open(NON_ECC_NEIGHBORHOOD_CSV) as df:
rdr = csv.DictReader(df)
non_ecc_fieldnames = list(rdr.fieldnames)
non_ecc_fieldnames.remove('zipcode')
for row in rdr:
zipcode = row['zipcode'].zfill(5)
row.pop('zipcode')
non_ecc_zips[zipcode] = row
IGNORE_NON_ECC_COLUMNS = ['education_percentile_previous', 'zviolentcrimeflip']
for col in IGNORE_NON_ECC_COLUMNS:
non_ecc_fieldnames.remove(col)
joined_fieldnames = list(set(non_ecc_fieldnames).union(set(ecc_fieldnames)))
joined_zips = non_ecc_zips.copy()
# Add the fields from the ECC CSV to the combination ECC and non-ECC
for zipcode in joined_zips:
joined_zips[zipcode]['zipcode'] = zipcode
for col in IGNORE_NON_ECC_COLUMNS:
joined_zips[zipcode].pop(col)
if zipcode in ecc_zips:
for fld in ecc_fieldnames:
joined_zips[zipcode]['ecc'] = 1
joined_zips[zipcode][fld] = ecc_zips[zipcode][fld]
else:
is_ecc = joined_zips[zipcode]['ecc'] and (joined_zips[zipcode]['ecc'] == '1' or
joined_zips[zipcode]['ecc'] == 1)
joined_zips[zipcode]['ecc'] = 1 if is_ecc else 0
for fld in ecc_fieldnames:
if fld not in joined_zips[zipcode]:
joined_zips[zipcode][fld] = ''
joined_fieldnames.append('zipcode')
with open(OUTPUT_FILE, 'w') as outf:
wtr = csv.DictWriter(outf, fieldnames=joined_fieldnames)
wtr.writeheader()
wtr.writerows(joined_zips.values())
print('\nAll done writing {n} neighborhoods to {f}.'.format(n=len(joined_zips.keys()),
f=OUTPUT_FILE))
|
en
| 0.866958
|
#!/usr/bin/env python # encoding=utf8 Produces a GeoJSON file of the ECC neighborhood points with associated data. Expects `add_zcta_centroids.py` was already run to identify the points. # Read CSVs of neighborhoods, keyed by zip code. # Add the fields from the ECC CSV to the combination ECC and non-ECC
| 2.771548
| 3
|
137-single-number-ii/137-single-number-ii.py
|
hyeseonko/LeetCode
| 2
|
6626409
|
<gh_stars>1-10
class Solution:
def singleNumber(self, nums: List[int]) -> int:
for num in set(nums):
if nums.count(num)==1:
return num
|
class Solution:
def singleNumber(self, nums: List[int]) -> int:
for num in set(nums):
if nums.count(num)==1:
return num
|
none
| 1
| 3.191027
| 3
|
|
games/hash/migrations/0007_auto_20200217_2044.py
|
Sharmaxz/battle-dual-api
| 0
|
6626410
|
# Generated by Django 3.0.3 on 2020-02-17 20:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hash', '0006_hash_matrix'),
]
operations = [
migrations.RemoveField(
model_name='hash',
name='cols',
),
migrations.RemoveField(
model_name='hash',
name='rows',
),
migrations.DeleteModel(
name='Item',
),
]
|
# Generated by Django 3.0.3 on 2020-02-17 20:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hash', '0006_hash_matrix'),
]
operations = [
migrations.RemoveField(
model_name='hash',
name='cols',
),
migrations.RemoveField(
model_name='hash',
name='rows',
),
migrations.DeleteModel(
name='Item',
),
]
|
en
| 0.786301
|
# Generated by Django 3.0.3 on 2020-02-17 20:44
| 1.68821
| 2
|
fontbuilder/ttf2eot/__init__.py
|
plrthink/myicons
| 83
|
6626411
|
#!/usr/bin/env python
# encoding: utf-8
"""
Translate ttf files to IE8 compatible EOT files.
Based on the implementation of ttf2eot in nodejs by fontello.
(https://github.com/fontello/ttf2eot/)
"""
import consts
from .bytebuffer import ByteBuffer
__all__ = ('ttf2eot', )
def strbuf(raw):
b = ByteBuffer(bytearray(len(raw) + 4))
b.setuint(16, 0, len(raw), True)
for i in xrange(0, len(raw), 2):
b1 = ord(raw[i])
b2 = ord(raw[i + 1])
val = (b1 << 8) + b2
b.setuint(16, i + 2, val, True)
b.setuint(16, len(raw) + 2, 0, True)
return b.getvalue()
def ttf2eot(array):
buf = ByteBuffer(array)
out = ByteBuffer(bytearray(consts.SIZEOF.EOT_PREFIX))
out.setuint(32, consts.EOT_OFFSET.FONT_LENGTH, len(array), True)
out.setuint(32, consts.EOT_OFFSET.VERSION, consts.MAGIC.EOT_VERSION, True)
out.setuint(8, consts.EOT_OFFSET.CHARSET, consts.MAGIC.EOT_CHARSET)
out.setuint(16, consts.EOT_OFFSET.MAGIC, consts.MAGIC.EOT_MAGIC, True)
familyName = []
subfamilyName = []
fullName = []
versionString = []
haveOS2 = False
haveName = False
haveHead = False
numTables = buf.getuint(16, consts.SFNT_OFFSET.NUMTABLES)
for i in xrange(numTables):
start = consts.SIZEOF.SFNT_HEADER + i * consts.SIZEOF.SFNT_TABLE_ENTRY
data = ByteBuffer(array[start:])
tableEntryTag = data.readat(consts.SFNT_OFFSET.TABLE_TAG, 4)
tableEntryOffset = data.getuint(32, consts.SFNT_OFFSET.TABLE_OFFSET)
tableEntryLength = data.getuint(32, consts.SFNT_OFFSET.TABLE_LENGTH)
table = ByteBuffer(array[tableEntryOffset: tableEntryOffset + tableEntryLength])
if tableEntryTag == 'OS/2':
haveOS2 = True
for j in xrange(10):
val = table.getuint(8, consts.SFNT_OFFSET.OS2_FONT_PANOSE + j)
out.setuint(8, consts.EOT_OFFSET.FONT_PANOSE + j, val)
fselection = table.getuint(16, consts.SFNT_OFFSET.OS2_FS_SELECTION)
out.setuint(8, consts.EOT_OFFSET.ITALIC, fselection & 0x01)
os2_weight = table.getuint(16, consts.SFNT_OFFSET.OS2_WEIGHT)
out.setuint(32, consts.EOT_OFFSET.WEIGHT, os2_weight, True)
for j in range(4):
os2_unicode_range = table.getuint(32, consts.SFNT_OFFSET.OS2_UNICODE_RANGE + j * 4)
out.setuint(32, consts.EOT_OFFSET.UNICODE_RANGE + j * 4, os2_unicode_range, True)
for j in (0, 1):
os2_codepage_range = table.getuint(32, consts.SFNT_OFFSET.OS2_CODEPAGE_RANGE + j * 4)
out.setuint(32, consts.EOT_OFFSET.CODEPAGE_RANGE + j * 4, os2_codepage_range, True)
elif tableEntryTag == 'head':
haveHead = True
head_checkssum_adjust = table.getuint(32, consts.SFNT_OFFSET.HEAD_CHECKSUM_ADJUSTMENT)
out.setuint(32, consts.EOT_OFFSET.CHECKSUM_ADJUSTMENT, head_checkssum_adjust, True)
elif tableEntryTag == 'name':
haveName = True
nameTableCount = table.getuint(16, consts.SFNT_OFFSET.NAMETABLE_COUNT)
nameTableStringOffset = table.getuint(16, consts.SFNT_OFFSET.NAMETABLE_STRING_OFFSET)
for j in xrange(nameTableCount):
tableOffset = tableEntryOffset + consts.SIZEOF.SFNT_NAMETABLE + j * consts.SIZEOF.SFNT_NAMETABLE_ENTRY
nameRecord = ByteBuffer(array[tableOffset:])
namePID = nameRecord.getuint(16, consts.SFNT_OFFSET.NAME_PLATFORM_ID)
nameEID = nameRecord.getuint(16, consts.SFNT_OFFSET.NAME_ENCODING_ID)
nameLID = nameRecord.getuint(16, consts.SFNT_OFFSET.NAME_LANGUAGE_ID)
nameID = nameRecord.getuint(16, consts.SFNT_OFFSET.NAME_NAME_ID)
nameLength = nameRecord.getuint(16, consts.SFNT_OFFSET.NAME_LENGTH)
nameOffset = nameRecord.getuint(16, consts.SFNT_OFFSET.NAME_OFFSET)
if namePID == 3 and nameEID == 1 and nameLID == consts.MAGIC.LANGUAGE_ENGLISH:
tablevalue = table.getvalue()
strbufOffset = nameTableStringOffset + nameOffset
s = strbuf(tablevalue[strbufOffset: strbufOffset + nameLength])
if nameID == 1:
familyName = s
elif nameID == 2:
subfamilyName = s
elif nameID == 4:
fullName = s
elif nameID == 5:
versionString = s
if haveOS2 and haveName and haveHead: break
if not (haveOS2 and haveName and haveHead):
raise Exception('Required section not found')
outvalue = out.getvalue()
bufvalue = buf.getvalue()
finallen = sum((len(outvalue),
len(familyName),
len(subfamilyName),
len(versionString),
len(fullName),
len(bufvalue),
2))
eot = ByteBuffer(bytearray(finallen))
eot.write(outvalue)
eot.write(familyName)
eot.write(subfamilyName)
eot.write(versionString)
eot.write(fullName)
eot.write(bytearray(2))
eot.write(bufvalue)
eot.setuint(32, consts.EOT_OFFSET.LENGTH, finallen, True)
return eot.getvalue()
|
#!/usr/bin/env python
# encoding: utf-8
"""
Translate ttf files to IE8 compatible EOT files.
Based on the implementation of ttf2eot in nodejs by fontello.
(https://github.com/fontello/ttf2eot/)
"""
import consts
from .bytebuffer import ByteBuffer
__all__ = ('ttf2eot', )
def strbuf(raw):
b = ByteBuffer(bytearray(len(raw) + 4))
b.setuint(16, 0, len(raw), True)
for i in xrange(0, len(raw), 2):
b1 = ord(raw[i])
b2 = ord(raw[i + 1])
val = (b1 << 8) + b2
b.setuint(16, i + 2, val, True)
b.setuint(16, len(raw) + 2, 0, True)
return b.getvalue()
def ttf2eot(array):
buf = ByteBuffer(array)
out = ByteBuffer(bytearray(consts.SIZEOF.EOT_PREFIX))
out.setuint(32, consts.EOT_OFFSET.FONT_LENGTH, len(array), True)
out.setuint(32, consts.EOT_OFFSET.VERSION, consts.MAGIC.EOT_VERSION, True)
out.setuint(8, consts.EOT_OFFSET.CHARSET, consts.MAGIC.EOT_CHARSET)
out.setuint(16, consts.EOT_OFFSET.MAGIC, consts.MAGIC.EOT_MAGIC, True)
familyName = []
subfamilyName = []
fullName = []
versionString = []
haveOS2 = False
haveName = False
haveHead = False
numTables = buf.getuint(16, consts.SFNT_OFFSET.NUMTABLES)
for i in xrange(numTables):
start = consts.SIZEOF.SFNT_HEADER + i * consts.SIZEOF.SFNT_TABLE_ENTRY
data = ByteBuffer(array[start:])
tableEntryTag = data.readat(consts.SFNT_OFFSET.TABLE_TAG, 4)
tableEntryOffset = data.getuint(32, consts.SFNT_OFFSET.TABLE_OFFSET)
tableEntryLength = data.getuint(32, consts.SFNT_OFFSET.TABLE_LENGTH)
table = ByteBuffer(array[tableEntryOffset: tableEntryOffset + tableEntryLength])
if tableEntryTag == 'OS/2':
haveOS2 = True
for j in xrange(10):
val = table.getuint(8, consts.SFNT_OFFSET.OS2_FONT_PANOSE + j)
out.setuint(8, consts.EOT_OFFSET.FONT_PANOSE + j, val)
fselection = table.getuint(16, consts.SFNT_OFFSET.OS2_FS_SELECTION)
out.setuint(8, consts.EOT_OFFSET.ITALIC, fselection & 0x01)
os2_weight = table.getuint(16, consts.SFNT_OFFSET.OS2_WEIGHT)
out.setuint(32, consts.EOT_OFFSET.WEIGHT, os2_weight, True)
for j in range(4):
os2_unicode_range = table.getuint(32, consts.SFNT_OFFSET.OS2_UNICODE_RANGE + j * 4)
out.setuint(32, consts.EOT_OFFSET.UNICODE_RANGE + j * 4, os2_unicode_range, True)
for j in (0, 1):
os2_codepage_range = table.getuint(32, consts.SFNT_OFFSET.OS2_CODEPAGE_RANGE + j * 4)
out.setuint(32, consts.EOT_OFFSET.CODEPAGE_RANGE + j * 4, os2_codepage_range, True)
elif tableEntryTag == 'head':
haveHead = True
head_checkssum_adjust = table.getuint(32, consts.SFNT_OFFSET.HEAD_CHECKSUM_ADJUSTMENT)
out.setuint(32, consts.EOT_OFFSET.CHECKSUM_ADJUSTMENT, head_checkssum_adjust, True)
elif tableEntryTag == 'name':
haveName = True
nameTableCount = table.getuint(16, consts.SFNT_OFFSET.NAMETABLE_COUNT)
nameTableStringOffset = table.getuint(16, consts.SFNT_OFFSET.NAMETABLE_STRING_OFFSET)
for j in xrange(nameTableCount):
tableOffset = tableEntryOffset + consts.SIZEOF.SFNT_NAMETABLE + j * consts.SIZEOF.SFNT_NAMETABLE_ENTRY
nameRecord = ByteBuffer(array[tableOffset:])
namePID = nameRecord.getuint(16, consts.SFNT_OFFSET.NAME_PLATFORM_ID)
nameEID = nameRecord.getuint(16, consts.SFNT_OFFSET.NAME_ENCODING_ID)
nameLID = nameRecord.getuint(16, consts.SFNT_OFFSET.NAME_LANGUAGE_ID)
nameID = nameRecord.getuint(16, consts.SFNT_OFFSET.NAME_NAME_ID)
nameLength = nameRecord.getuint(16, consts.SFNT_OFFSET.NAME_LENGTH)
nameOffset = nameRecord.getuint(16, consts.SFNT_OFFSET.NAME_OFFSET)
if namePID == 3 and nameEID == 1 and nameLID == consts.MAGIC.LANGUAGE_ENGLISH:
tablevalue = table.getvalue()
strbufOffset = nameTableStringOffset + nameOffset
s = strbuf(tablevalue[strbufOffset: strbufOffset + nameLength])
if nameID == 1:
familyName = s
elif nameID == 2:
subfamilyName = s
elif nameID == 4:
fullName = s
elif nameID == 5:
versionString = s
if haveOS2 and haveName and haveHead: break
if not (haveOS2 and haveName and haveHead):
raise Exception('Required section not found')
outvalue = out.getvalue()
bufvalue = buf.getvalue()
finallen = sum((len(outvalue),
len(familyName),
len(subfamilyName),
len(versionString),
len(fullName),
len(bufvalue),
2))
eot = ByteBuffer(bytearray(finallen))
eot.write(outvalue)
eot.write(familyName)
eot.write(subfamilyName)
eot.write(versionString)
eot.write(fullName)
eot.write(bytearray(2))
eot.write(bufvalue)
eot.setuint(32, consts.EOT_OFFSET.LENGTH, finallen, True)
return eot.getvalue()
|
en
| 0.545956
|
#!/usr/bin/env python # encoding: utf-8 Translate ttf files to IE8 compatible EOT files. Based on the implementation of ttf2eot in nodejs by fontello. (https://github.com/fontello/ttf2eot/)
| 2.229227
| 2
|
epregressions/tests/resources/dummy.parametric.py
|
lefticus/EnergyPlusRegressionTool
| 0
|
6626412
|
<filename>epregressions/tests/resources/dummy.parametric.py
#!/usr/bin/env python
import shutil
file_contents = open('in.idf').read().upper()
if 'PARAMETRIC:' in file_contents:
shutil.copy('in.idf', 'in-01.idf')
shutil.copy('in.idf', 'in-02.idf')
shutil.copy('in.idf', 'in-03.idf')
|
<filename>epregressions/tests/resources/dummy.parametric.py
#!/usr/bin/env python
import shutil
file_contents = open('in.idf').read().upper()
if 'PARAMETRIC:' in file_contents:
shutil.copy('in.idf', 'in-01.idf')
shutil.copy('in.idf', 'in-02.idf')
shutil.copy('in.idf', 'in-03.idf')
|
ru
| 0.26433
|
#!/usr/bin/env python
| 1.803102
| 2
|
data/dataProcess.py
|
ISSCentaurus/MGE
| 2
|
6626413
|
<filename>data/dataProcess.py<gh_stars>1-10
import argparse
class FindGrowth():
def __init__(self):
self.currentVialSet = 0 # Increment when unplugged (Sample Swapped)
self.currentDataSet = 0 # Increment when begin logging (Every Minute)
self.currentTemp = 0 # Increment when begin logging
self.currentVialNum = 0 # Increment when new vial (Every ~80ms)
self.currentInterval = 0 # Increment when new vial
self.currentDutycycle = 0 # Increment when new vial
#Vial specifc
self.minAvg = 0
self.firstPeak = 0
self.minLight = 0
self.secondPeak = 0
self.isControlVial = False
self.logEverything = False # CHANGE TO True TO LOG minLight FOR ALL VIALS
if(self.logEverything == False):
print("Average minLight", "Current Temp", "Current Dutycycle", "Current Interval", "Current VialSet", "Current DataSet", sep=", ")
else:
print("First Peak", "minLight", "Second Peak", "isControlVial", "Current Dutycycle", "Current Interval", "Current Temp", "Current VialSet", "Current DataSet", sep=", ")
def processLine(self, line):
if(line.startswith("*************************************")): # Sample Swapped
self.sampleSwapped(line)
if(line.startswith("NewSet")):
self.startSet(line)
if(line.startswith("Maint")):
self.newVial(line)
if(line.startswith("Light")):
self.parseLight(line)
if(line.startswith("Interval")):
self.logInterval(line)
if(line.startswith("EndSet")):
self.endSet(line)
def sampleSwapped(self, line):
self.currentVialSet += 1
self.currentDataSet = 0
def startSet(self, line):
self.currentDataSet += 1
self.currentVialNum = 0
self.minAvg = 0
self.currentTemp = int(line[-4:],16)
def newVial(self, line):
self.currentVialNum += 1
self.currentDutycycle = int(line[-4:],16)
if(self.logEverything):
print(self.firstPeak, self.minLight, self.secondPeak, self.isControlVial, self.currentDutycycle, self.currentInterval, self.currentTemp, self.currentVialSet, self.currentDataSet, sep=", ")
if(not self.isControlVial):
if(self.minAvg == 0):
self.minAvg = self.minLight
else:
self.minAvg = (self.minAvg + self.minLight) / 2
self.firstPeak = 0
self.minLight = 0
self.secondPeak = 0
self.isControlVial = False
def parseLight(self, line):
lightVal = int(line[-4:],16)
if(lightVal > self.firstPeak and self.minLight == 0):
self.firstPeak = lightVal
elif(self.secondPeak == 0 and lightVal < self.minLight or (lightVal < self.firstPeak and self.minLight == 0)):
self.minLight = lightVal
elif(lightVal > self.secondPeak and self.minLight != 0):
self.secondPeak = lightVal
if(lightVal > 512):
self.isControlVial = True
def logInterval(self, line):
self.currentInterval = int(line[-4:],16)
def endSet(self, line):
if(self.logEverything == False):
print(self.minAvg, self.currentTemp, self.currentDutycycle, self.currentInterval, self.currentVialSet, self.currentDataSet, sep=", ")
if __name__ == "__main__":
process = FindGrowth()
lines = [line.rstrip('\n') for line in open('lightLog.space.txt')]
for i in lines:
process.processLine(i)
|
<filename>data/dataProcess.py<gh_stars>1-10
import argparse
class FindGrowth():
def __init__(self):
self.currentVialSet = 0 # Increment when unplugged (Sample Swapped)
self.currentDataSet = 0 # Increment when begin logging (Every Minute)
self.currentTemp = 0 # Increment when begin logging
self.currentVialNum = 0 # Increment when new vial (Every ~80ms)
self.currentInterval = 0 # Increment when new vial
self.currentDutycycle = 0 # Increment when new vial
#Vial specifc
self.minAvg = 0
self.firstPeak = 0
self.minLight = 0
self.secondPeak = 0
self.isControlVial = False
self.logEverything = False # CHANGE TO True TO LOG minLight FOR ALL VIALS
if(self.logEverything == False):
print("Average minLight", "Current Temp", "Current Dutycycle", "Current Interval", "Current VialSet", "Current DataSet", sep=", ")
else:
print("First Peak", "minLight", "Second Peak", "isControlVial", "Current Dutycycle", "Current Interval", "Current Temp", "Current VialSet", "Current DataSet", sep=", ")
def processLine(self, line):
if(line.startswith("*************************************")): # Sample Swapped
self.sampleSwapped(line)
if(line.startswith("NewSet")):
self.startSet(line)
if(line.startswith("Maint")):
self.newVial(line)
if(line.startswith("Light")):
self.parseLight(line)
if(line.startswith("Interval")):
self.logInterval(line)
if(line.startswith("EndSet")):
self.endSet(line)
def sampleSwapped(self, line):
self.currentVialSet += 1
self.currentDataSet = 0
def startSet(self, line):
self.currentDataSet += 1
self.currentVialNum = 0
self.minAvg = 0
self.currentTemp = int(line[-4:],16)
def newVial(self, line):
self.currentVialNum += 1
self.currentDutycycle = int(line[-4:],16)
if(self.logEverything):
print(self.firstPeak, self.minLight, self.secondPeak, self.isControlVial, self.currentDutycycle, self.currentInterval, self.currentTemp, self.currentVialSet, self.currentDataSet, sep=", ")
if(not self.isControlVial):
if(self.minAvg == 0):
self.minAvg = self.minLight
else:
self.minAvg = (self.minAvg + self.minLight) / 2
self.firstPeak = 0
self.minLight = 0
self.secondPeak = 0
self.isControlVial = False
def parseLight(self, line):
lightVal = int(line[-4:],16)
if(lightVal > self.firstPeak and self.minLight == 0):
self.firstPeak = lightVal
elif(self.secondPeak == 0 and lightVal < self.minLight or (lightVal < self.firstPeak and self.minLight == 0)):
self.minLight = lightVal
elif(lightVal > self.secondPeak and self.minLight != 0):
self.secondPeak = lightVal
if(lightVal > 512):
self.isControlVial = True
def logInterval(self, line):
self.currentInterval = int(line[-4:],16)
def endSet(self, line):
if(self.logEverything == False):
print(self.minAvg, self.currentTemp, self.currentDutycycle, self.currentInterval, self.currentVialSet, self.currentDataSet, sep=", ")
if __name__ == "__main__":
process = FindGrowth()
lines = [line.rstrip('\n') for line in open('lightLog.space.txt')]
for i in lines:
process.processLine(i)
|
en
| 0.453568
|
# Increment when unplugged (Sample Swapped) # Increment when begin logging (Every Minute) # Increment when begin logging # Increment when new vial (Every ~80ms) # Increment when new vial # Increment when new vial #Vial specifc # CHANGE TO True TO LOG minLight FOR ALL VIALS # Sample Swapped
| 2.545363
| 3
|
zappa/wsgi.py
|
SongYunSeop/Zappa
| 1
|
6626414
|
import logging
import base64
from urllib import urlencode
from requestlogger import ApacheFormatter
from StringIO import StringIO
def create_wsgi_request(event_info, server_name='zappa', script_name=None,
trailing_slash=True):
"""
Given some event_info,
create and return a valid WSGI request environ.
"""
method = event_info['httpMethod']
params = event_info['pathParameters']
query = event_info['queryStringParameters']
headers = event_info['headers']
# Extract remote user from context if Authorizer is enabled
remote_user = None
if event_info['requestContext'].get('authorizer'):
remote_user = event_info['requestContext']['authorizer'].get('principalId')
# Non-GET data is B64'd through the APIGW.
# if method in ["POST", "PUT", "PATCH"]:
# encoded_body = event_info['body']
# body = base64.b64decode(encoded_body)
# else:
body = event_info['body']
# Will this generate unicode errors?
# Early experiments indicate no, but this still looks unsafe to me.
body = str(body)
# Make header names canonical, e.g. content-type => Content-Type
for header in headers.keys():
canonical = header.title()
if canonical != header:
headers[canonical] = headers.pop(header)
path = event_info['path']
# if 'url' in params:
# # new style
# path = '/' + params.get('url') + "/"
# else:
# # old style
# path = "/"
# for key in sorted(params.keys()):
# path = path + params[key] + "/"
# # This determines if we should return
# # site.com/resource/ : site.com/resource
# # site.com/resource : site.com/resource
# # vs.
# # site.com/resource/ : site.com/resource/
# # site.com/resource : site.com/resource/
# # If no params are present, keep the slash.
# if not trailing_slash and params.keys():
# path = path[:-1]
if query:
query_string = urlencode(query)
else:
query_string = ""
x_forwarded_for = headers.get('X-Forwarded-For', '')
if ',' in x_forwarded_for:
remote_addr = x_forwarded_for.split(', ')[0]
else:
remote_addr = '127.0.0.1'
environ = {
'PATH_INFO': path,
'QUERY_STRING': query_string,
'REMOTE_ADDR': remote_addr,
'REQUEST_METHOD': method,
'SCRIPT_NAME': str(script_name) if script_name else '',
'SERVER_NAME': str(server_name),
'SERVER_PORT': str('80'),
'SERVER_PROTOCOL': str('HTTP/1.1'),
'wsgi.version': (1, 0),
'wsgi.url_scheme': str('http'),
'wsgi.input': body,
'wsgi.errors': str(''),
'wsgi.multiprocess': False,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
# Input processing
if method in ["POST", "PUT", "PATCH"]:
if 'Content-Type' in headers:
environ['CONTENT_TYPE'] = headers['Content-Type']
environ['wsgi.input'] = StringIO(body)
if body:
environ['CONTENT_LENGTH'] = str(len(body))
else:
environ['CONTENT_LENGTH'] = '0'
for header in headers:
wsgi_name = "HTTP_" + header.upper().replace('-', '_')
environ[wsgi_name] = str(headers[header])
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if script_name in path_info:
environ['PATH_INFO'].replace(script_name, '')
if remote_user:
environ['REMOTE_USER'] = remote_user
return environ
def common_log(environ, response, response_time=None):
"""
Given the WSGI environ and the response,
log this event in Common Log Format.
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if response_time:
formatter = ApacheFormatter(with_response_time=True)
try:
log_entry = formatter(response.status_code, environ,
len(response.content), rt_us=response_time)
except TypeError:
# Upstream introduced a very annoying breaking change on the rt_ms/rt_us kwarg.
log_entry = formatter(response.status_code, environ,
len(response.content), rt_ms=response_time)
else:
formatter = ApacheFormatter(with_response_time=False)
log_entry = formatter(response.status_code, environ,
len(response.content))
logger.info(log_entry)
return log_entry
|
import logging
import base64
from urllib import urlencode
from requestlogger import ApacheFormatter
from StringIO import StringIO
def create_wsgi_request(event_info, server_name='zappa', script_name=None,
trailing_slash=True):
"""
Given some event_info,
create and return a valid WSGI request environ.
"""
method = event_info['httpMethod']
params = event_info['pathParameters']
query = event_info['queryStringParameters']
headers = event_info['headers']
# Extract remote user from context if Authorizer is enabled
remote_user = None
if event_info['requestContext'].get('authorizer'):
remote_user = event_info['requestContext']['authorizer'].get('principalId')
# Non-GET data is B64'd through the APIGW.
# if method in ["POST", "PUT", "PATCH"]:
# encoded_body = event_info['body']
# body = base64.b64decode(encoded_body)
# else:
body = event_info['body']
# Will this generate unicode errors?
# Early experiments indicate no, but this still looks unsafe to me.
body = str(body)
# Make header names canonical, e.g. content-type => Content-Type
for header in headers.keys():
canonical = header.title()
if canonical != header:
headers[canonical] = headers.pop(header)
path = event_info['path']
# if 'url' in params:
# # new style
# path = '/' + params.get('url') + "/"
# else:
# # old style
# path = "/"
# for key in sorted(params.keys()):
# path = path + params[key] + "/"
# # This determines if we should return
# # site.com/resource/ : site.com/resource
# # site.com/resource : site.com/resource
# # vs.
# # site.com/resource/ : site.com/resource/
# # site.com/resource : site.com/resource/
# # If no params are present, keep the slash.
# if not trailing_slash and params.keys():
# path = path[:-1]
if query:
query_string = urlencode(query)
else:
query_string = ""
x_forwarded_for = headers.get('X-Forwarded-For', '')
if ',' in x_forwarded_for:
remote_addr = x_forwarded_for.split(', ')[0]
else:
remote_addr = '127.0.0.1'
environ = {
'PATH_INFO': path,
'QUERY_STRING': query_string,
'REMOTE_ADDR': remote_addr,
'REQUEST_METHOD': method,
'SCRIPT_NAME': str(script_name) if script_name else '',
'SERVER_NAME': str(server_name),
'SERVER_PORT': str('80'),
'SERVER_PROTOCOL': str('HTTP/1.1'),
'wsgi.version': (1, 0),
'wsgi.url_scheme': str('http'),
'wsgi.input': body,
'wsgi.errors': str(''),
'wsgi.multiprocess': False,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
# Input processing
if method in ["POST", "PUT", "PATCH"]:
if 'Content-Type' in headers:
environ['CONTENT_TYPE'] = headers['Content-Type']
environ['wsgi.input'] = StringIO(body)
if body:
environ['CONTENT_LENGTH'] = str(len(body))
else:
environ['CONTENT_LENGTH'] = '0'
for header in headers:
wsgi_name = "HTTP_" + header.upper().replace('-', '_')
environ[wsgi_name] = str(headers[header])
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if script_name in path_info:
environ['PATH_INFO'].replace(script_name, '')
if remote_user:
environ['REMOTE_USER'] = remote_user
return environ
def common_log(environ, response, response_time=None):
"""
Given the WSGI environ and the response,
log this event in Common Log Format.
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if response_time:
formatter = ApacheFormatter(with_response_time=True)
try:
log_entry = formatter(response.status_code, environ,
len(response.content), rt_us=response_time)
except TypeError:
# Upstream introduced a very annoying breaking change on the rt_ms/rt_us kwarg.
log_entry = formatter(response.status_code, environ,
len(response.content), rt_ms=response_time)
else:
formatter = ApacheFormatter(with_response_time=False)
log_entry = formatter(response.status_code, environ,
len(response.content))
logger.info(log_entry)
return log_entry
|
en
| 0.699895
|
Given some event_info, create and return a valid WSGI request environ. # Extract remote user from context if Authorizer is enabled # Non-GET data is B64'd through the APIGW. # if method in ["POST", "PUT", "PATCH"]: # encoded_body = event_info['body'] # body = base64.b64decode(encoded_body) # else: # Will this generate unicode errors? # Early experiments indicate no, but this still looks unsafe to me. # Make header names canonical, e.g. content-type => Content-Type # if 'url' in params: # # new style # path = '/' + params.get('url') + "/" # else: # # old style # path = "/" # for key in sorted(params.keys()): # path = path + params[key] + "/" # # This determines if we should return # # site.com/resource/ : site.com/resource # # site.com/resource : site.com/resource # # vs. # # site.com/resource/ : site.com/resource/ # # site.com/resource : site.com/resource/ # # If no params are present, keep the slash. # if not trailing_slash and params.keys(): # path = path[:-1] # Input processing Given the WSGI environ and the response, log this event in Common Log Format. # Upstream introduced a very annoying breaking change on the rt_ms/rt_us kwarg.
| 2.529758
| 3
|
vsts/vsts/release/v4_0/models/manual_intervention.py
|
kenkuo/azure-devops-python-api
| 0
|
6626415
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ManualIntervention(Model):
"""ManualIntervention.
:param approver:
:type approver: :class:`IdentityRef <release.v4_0.models.IdentityRef>`
:param comments:
:type comments: str
:param created_on:
:type created_on: datetime
:param id:
:type id: int
:param instructions:
:type instructions: str
:param modified_on:
:type modified_on: datetime
:param name:
:type name: str
:param release:
:type release: :class:`ReleaseShallowReference <release.v4_0.models.ReleaseShallowReference>`
:param release_definition:
:type release_definition: :class:`ReleaseDefinitionShallowReference <release.v4_0.models.ReleaseDefinitionShallowReference>`
:param release_environment:
:type release_environment: :class:`ReleaseEnvironmentShallowReference <release.v4_0.models.ReleaseEnvironmentShallowReference>`
:param status:
:type status: object
:param task_instance_id:
:type task_instance_id: str
:param url:
:type url: str
"""
_attribute_map = {
'approver': {'key': 'approver', 'type': 'IdentityRef'},
'comments': {'key': 'comments', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'id': {'key': 'id', 'type': 'int'},
'instructions': {'key': 'instructions', 'type': 'str'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'release': {'key': 'release', 'type': 'ReleaseShallowReference'},
'release_definition': {'key': 'releaseDefinition', 'type': 'ReleaseDefinitionShallowReference'},
'release_environment': {'key': 'releaseEnvironment', 'type': 'ReleaseEnvironmentShallowReference'},
'status': {'key': 'status', 'type': 'object'},
'task_instance_id': {'key': 'taskInstanceId', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, approver=None, comments=None, created_on=None, id=None, instructions=None, modified_on=None, name=None, release=None, release_definition=None, release_environment=None, status=None, task_instance_id=None, url=None):
super(ManualIntervention, self).__init__()
self.approver = approver
self.comments = comments
self.created_on = created_on
self.id = id
self.instructions = instructions
self.modified_on = modified_on
self.name = name
self.release = release
self.release_definition = release_definition
self.release_environment = release_environment
self.status = status
self.task_instance_id = task_instance_id
self.url = url
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ManualIntervention(Model):
"""ManualIntervention.
:param approver:
:type approver: :class:`IdentityRef <release.v4_0.models.IdentityRef>`
:param comments:
:type comments: str
:param created_on:
:type created_on: datetime
:param id:
:type id: int
:param instructions:
:type instructions: str
:param modified_on:
:type modified_on: datetime
:param name:
:type name: str
:param release:
:type release: :class:`ReleaseShallowReference <release.v4_0.models.ReleaseShallowReference>`
:param release_definition:
:type release_definition: :class:`ReleaseDefinitionShallowReference <release.v4_0.models.ReleaseDefinitionShallowReference>`
:param release_environment:
:type release_environment: :class:`ReleaseEnvironmentShallowReference <release.v4_0.models.ReleaseEnvironmentShallowReference>`
:param status:
:type status: object
:param task_instance_id:
:type task_instance_id: str
:param url:
:type url: str
"""
_attribute_map = {
'approver': {'key': 'approver', 'type': 'IdentityRef'},
'comments': {'key': 'comments', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'id': {'key': 'id', 'type': 'int'},
'instructions': {'key': 'instructions', 'type': 'str'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'release': {'key': 'release', 'type': 'ReleaseShallowReference'},
'release_definition': {'key': 'releaseDefinition', 'type': 'ReleaseDefinitionShallowReference'},
'release_environment': {'key': 'releaseEnvironment', 'type': 'ReleaseEnvironmentShallowReference'},
'status': {'key': 'status', 'type': 'object'},
'task_instance_id': {'key': 'taskInstanceId', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, approver=None, comments=None, created_on=None, id=None, instructions=None, modified_on=None, name=None, release=None, release_definition=None, release_environment=None, status=None, task_instance_id=None, url=None):
super(ManualIntervention, self).__init__()
self.approver = approver
self.comments = comments
self.created_on = created_on
self.id = id
self.instructions = instructions
self.modified_on = modified_on
self.name = name
self.release = release
self.release_definition = release_definition
self.release_environment = release_environment
self.status = status
self.task_instance_id = task_instance_id
self.url = url
|
en
| 0.49135
|
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- ManualIntervention. :param approver: :type approver: :class:`IdentityRef <release.v4_0.models.IdentityRef>` :param comments: :type comments: str :param created_on: :type created_on: datetime :param id: :type id: int :param instructions: :type instructions: str :param modified_on: :type modified_on: datetime :param name: :type name: str :param release: :type release: :class:`ReleaseShallowReference <release.v4_0.models.ReleaseShallowReference>` :param release_definition: :type release_definition: :class:`ReleaseDefinitionShallowReference <release.v4_0.models.ReleaseDefinitionShallowReference>` :param release_environment: :type release_environment: :class:`ReleaseEnvironmentShallowReference <release.v4_0.models.ReleaseEnvironmentShallowReference>` :param status: :type status: object :param task_instance_id: :type task_instance_id: str :param url: :type url: str
| 1.584211
| 2
|
bitmovin_api_sdk/encoding/inputs/s3_role_based/customdata/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
| 11
|
6626416
|
from bitmovin_api_sdk.encoding.inputs.s3_role_based.customdata.customdata_api import CustomdataApi
|
from bitmovin_api_sdk.encoding.inputs.s3_role_based.customdata.customdata_api import CustomdataApi
|
none
| 1
| 1.134507
| 1
|
|
rl_sandbox/algorithms/sac/sac_drq.py
|
chanb/rl_sandbox_public
| 14
|
6626417
|
<filename>rl_sandbox/algorithms/sac/sac_drq.py
import timeit
import torch
import torch.nn as nn
import rl_sandbox.constants as c
from rl_sandbox.algorithms.sac.sac import SAC
from rl_sandbox.algorithms.utils import aug_data
from rl_sandbox.auxiliary_tasks.auxiliary_tasks import AuxiliaryTask
class SACDrQ(SAC):
def __init__(self, model, policy_opt, qs_opt, alpha_opt, learn_alpha, buffer, algo_params, aux_tasks=AuxiliaryTask()):
super().__init__(model=model,
policy_opt=policy_opt,
qs_opt=qs_opt,
alpha_opt=alpha_opt,
learn_alpha=learn_alpha,
buffer=buffer,
algo_params=algo_params,
aux_tasks=aux_tasks)
self.evaluation_preprocessing = algo_params[c.EVALUATION_PREPROCESSING]
# Number of target Q augmentations
self.K = algo_params[c.K]
# Number of Q augmentations
self.M = algo_params[c.M]
def _compute_qs_loss(self, obss, h_states, acts, rews, dones, next_obss, next_h_states, discounting, lengths):
batch_size = obss.shape[0]
m_aug_batch_size = batch_size * self.M
k_aug_batch_size = batch_size * self.K
rews, dones, discounting = rews.to(self.device), dones.to(self.device), discounting.to(self.device)
_, q1_val, q2_val, next_h_states = self.model.q_vals(
self.train_preprocessing(aug_data(data=obss, num_aug=self.M, aug_batch_size=m_aug_batch_size)),
aug_data(data=h_states, num_aug=self.M, aug_batch_size=m_aug_batch_size),
aug_data(data=acts, num_aug=self.M, aug_batch_size=m_aug_batch_size),
lengths=lengths.repeat(1, self.M).reshape(m_aug_batch_size))
with torch.no_grad():
next_acts, next_lprobs = self.model.act_lprob(
self.train_preprocessing(aug_data(data=next_obss, num_aug=self.K, aug_batch_size=k_aug_batch_size)),
aug_data(data=next_h_states[::self.M], num_aug=self.K, aug_batch_size=k_aug_batch_size))
_, _, _, targ_next_h_states = self._target_model.q_vals(
self.train_preprocessing(aug_data(data=obss, num_aug=self.K, aug_batch_size=k_aug_batch_size)),
aug_data(data=h_states, num_aug=self.K, aug_batch_size=k_aug_batch_size),
aug_data(data=acts, num_aug=self.K, aug_batch_size=k_aug_batch_size),
lengths=lengths.repeat(1, self.K).reshape(k_aug_batch_size))
min_q_targ, _, _, _ = self._target_model.q_vals(
self.train_preprocessing(aug_data(data=next_obss, num_aug=self.K, aug_batch_size=k_aug_batch_size)),
targ_next_h_states,
next_acts)
min_q_targ = min_q_targ.reshape(batch_size, self.M).detach()
next_lprobs = next_lprobs.reshape(batch_size, self.M)
if hasattr(self.model, c.VALUE_RMS):
min_q_targ = self.model.value_rms.unnormalize(min_q_targ.cpu()).to(self.device)
v_next = torch.mean(min_q_targ - self.model.alpha.detach() * next_lprobs, dim=1, keepdim=True)
target = rews + (self._gamma ** discounting) * (1 - dones) * v_next
if hasattr(self.model, c.VALUE_RMS):
target = target.cpu()
self.model.value_rms.update(target)
target = self.model.value_rms.normalize(target).to(self.device)
q1_loss = ((q1_val - target.repeat(1, self.M).reshape(m_aug_batch_size, 1)) ** 2).sum() / self.M
q2_loss = ((q2_val - target.repeat(1, self.M).reshape(m_aug_batch_size, 1)) ** 2).sum() / self.M
return q1_loss, q2_loss
def update_qs(self, batch_start_idx, obss, h_states, acts, rews, dones, next_obss, next_h_states, discounting, infos, lengths, update_info):
tic = timeit.default_timer()
self.qs_opt.zero_grad()
total_q1_loss = 0.
total_q2_loss = 0.
for grad_i in range(self._accum_num_grad):
opt_idxes = range(batch_start_idx + grad_i * self._num_samples_per_accum,
batch_start_idx + (grad_i + 1) * self._num_samples_per_accum)
q1_loss, q2_loss = self._compute_qs_loss(obss[opt_idxes],
h_states[opt_idxes],
acts[opt_idxes],
rews[opt_idxes],
dones[opt_idxes],
next_obss[opt_idxes],
next_h_states[opt_idxes],
discounting[opt_idxes],
lengths[opt_idxes])
q1_loss /= self._batch_size
q2_loss /= self._batch_size
qs_loss = q1_loss + q2_loss
total_q1_loss += q1_loss.detach().cpu()
total_q2_loss += q2_loss.detach().cpu()
qs_loss.backward()
nn.utils.clip_grad_norm_(self.model.qs_parameters,
self._max_grad_norm)
self.qs_opt.step()
update_info[c.Q_UPDATE_TIME].append(timeit.default_timer() - tic)
update_info[c.Q1_LOSS].append(total_q1_loss.numpy())
update_info[c.Q2_LOSS].append(total_q2_loss.numpy())
def update(self, curr_obs, curr_h_state, act, rew, done, info, next_obs, next_h_state):
self._store_to_buffer(curr_obs, curr_h_state, act, rew, done, info, next_obs, next_h_state)
self.step += 1
update_info = {}
if hasattr(self.model, c.OBS_RMS):
self.model.obs_rms.update(self.eval_preprocessing(torch.tensor(curr_obs)))
# Perform SAC update
if self.step >= self._buffer_warmup and self.step % self._steps_between_update == 0:
update_info[c.PI_LOSS] = []
update_info[c.Q1_LOSS] = []
update_info[c.Q2_LOSS] = []
update_info[c.ALPHA] = []
update_info[c.SAMPLE_TIME] = []
update_info[c.Q_UPDATE_TIME] = []
update_info[c.POLICY_UPDATE_TIME] = []
update_info[c.ALPHA_LOSS] = []
update_info[c.ALPHA_UPDATE_TIME] = []
for _ in range(self._num_gradient_updates // self._num_prefetch):
tic = timeit.default_timer()
obss, h_states, acts, rews, dones, next_obss, next_h_states, infos, lengths = self.buffer.sample_with_next_obs(
self._batch_size * self._num_prefetch, next_obs, next_h_state)
eval_obss = self.evaluation_preprocessing(obss)
rews = rews * self._reward_scaling
discounting = infos[c.DISCOUNTING]
update_info[c.SAMPLE_TIME].append(timeit.default_timer() - tic)
for batch_i in range(self._num_prefetch):
self._update_num += 1
batch_start_idx = batch_i * self._batch_size
# Auxiliary tasks are usually for shared layers, which is updated along with Q
aux_loss, aux_update_info = self._aux_tasks.compute_loss(next_obs, next_h_state)
if hasattr(aux_loss, c.BACKWARD):
aux_loss.backward()
# Update Q functions
self.update_qs(batch_start_idx,
obss,
h_states,
acts,
rews,
dones,
next_obss,
next_h_states,
discounting,
infos,
lengths,
update_info)
self._aux_tasks.step()
update_info.update(aux_update_info)
if self._update_num % self._actor_update_interval == 0:
# Update policy
self.update_policy(batch_start_idx,
eval_obss,
h_states,
acts,
rews,
dones,
next_obss,
next_h_states,
discounting,
infos,
lengths,
update_info)
# Update Alpha
if self.learn_alpha:
self.update_alpha(batch_start_idx,
eval_obss,
h_states,
acts,
rews,
dones,
next_obss,
next_h_states,
discounting,
infos,
lengths,
update_info)
if self._update_num % self._target_update_interval == 0:
update_info[c.TARGET_UPDATE_TIME] = []
tic = timeit.default_timer()
self._update_target_network()
update_info[c.TARGET_UPDATE_TIME].append(timeit.default_timer() - tic)
update_info[c.ALPHA].append(self.model.alpha.detach().cpu().numpy())
if hasattr(self.model, c.VALUE_RMS):
update_info[f"{c.VALUE_RMS}/{c.MEAN}"] = self.model.value_rms.mean.numpy()
update_info[f"{c.VALUE_RMS}/{c.VARIANCE}"] = self.model.value_rms.var.numpy()
return True, update_info
return False, update_info
|
<filename>rl_sandbox/algorithms/sac/sac_drq.py
import timeit
import torch
import torch.nn as nn
import rl_sandbox.constants as c
from rl_sandbox.algorithms.sac.sac import SAC
from rl_sandbox.algorithms.utils import aug_data
from rl_sandbox.auxiliary_tasks.auxiliary_tasks import AuxiliaryTask
class SACDrQ(SAC):
def __init__(self, model, policy_opt, qs_opt, alpha_opt, learn_alpha, buffer, algo_params, aux_tasks=AuxiliaryTask()):
super().__init__(model=model,
policy_opt=policy_opt,
qs_opt=qs_opt,
alpha_opt=alpha_opt,
learn_alpha=learn_alpha,
buffer=buffer,
algo_params=algo_params,
aux_tasks=aux_tasks)
self.evaluation_preprocessing = algo_params[c.EVALUATION_PREPROCESSING]
# Number of target Q augmentations
self.K = algo_params[c.K]
# Number of Q augmentations
self.M = algo_params[c.M]
def _compute_qs_loss(self, obss, h_states, acts, rews, dones, next_obss, next_h_states, discounting, lengths):
batch_size = obss.shape[0]
m_aug_batch_size = batch_size * self.M
k_aug_batch_size = batch_size * self.K
rews, dones, discounting = rews.to(self.device), dones.to(self.device), discounting.to(self.device)
_, q1_val, q2_val, next_h_states = self.model.q_vals(
self.train_preprocessing(aug_data(data=obss, num_aug=self.M, aug_batch_size=m_aug_batch_size)),
aug_data(data=h_states, num_aug=self.M, aug_batch_size=m_aug_batch_size),
aug_data(data=acts, num_aug=self.M, aug_batch_size=m_aug_batch_size),
lengths=lengths.repeat(1, self.M).reshape(m_aug_batch_size))
with torch.no_grad():
next_acts, next_lprobs = self.model.act_lprob(
self.train_preprocessing(aug_data(data=next_obss, num_aug=self.K, aug_batch_size=k_aug_batch_size)),
aug_data(data=next_h_states[::self.M], num_aug=self.K, aug_batch_size=k_aug_batch_size))
_, _, _, targ_next_h_states = self._target_model.q_vals(
self.train_preprocessing(aug_data(data=obss, num_aug=self.K, aug_batch_size=k_aug_batch_size)),
aug_data(data=h_states, num_aug=self.K, aug_batch_size=k_aug_batch_size),
aug_data(data=acts, num_aug=self.K, aug_batch_size=k_aug_batch_size),
lengths=lengths.repeat(1, self.K).reshape(k_aug_batch_size))
min_q_targ, _, _, _ = self._target_model.q_vals(
self.train_preprocessing(aug_data(data=next_obss, num_aug=self.K, aug_batch_size=k_aug_batch_size)),
targ_next_h_states,
next_acts)
min_q_targ = min_q_targ.reshape(batch_size, self.M).detach()
next_lprobs = next_lprobs.reshape(batch_size, self.M)
if hasattr(self.model, c.VALUE_RMS):
min_q_targ = self.model.value_rms.unnormalize(min_q_targ.cpu()).to(self.device)
v_next = torch.mean(min_q_targ - self.model.alpha.detach() * next_lprobs, dim=1, keepdim=True)
target = rews + (self._gamma ** discounting) * (1 - dones) * v_next
if hasattr(self.model, c.VALUE_RMS):
target = target.cpu()
self.model.value_rms.update(target)
target = self.model.value_rms.normalize(target).to(self.device)
q1_loss = ((q1_val - target.repeat(1, self.M).reshape(m_aug_batch_size, 1)) ** 2).sum() / self.M
q2_loss = ((q2_val - target.repeat(1, self.M).reshape(m_aug_batch_size, 1)) ** 2).sum() / self.M
return q1_loss, q2_loss
def update_qs(self, batch_start_idx, obss, h_states, acts, rews, dones, next_obss, next_h_states, discounting, infos, lengths, update_info):
tic = timeit.default_timer()
self.qs_opt.zero_grad()
total_q1_loss = 0.
total_q2_loss = 0.
for grad_i in range(self._accum_num_grad):
opt_idxes = range(batch_start_idx + grad_i * self._num_samples_per_accum,
batch_start_idx + (grad_i + 1) * self._num_samples_per_accum)
q1_loss, q2_loss = self._compute_qs_loss(obss[opt_idxes],
h_states[opt_idxes],
acts[opt_idxes],
rews[opt_idxes],
dones[opt_idxes],
next_obss[opt_idxes],
next_h_states[opt_idxes],
discounting[opt_idxes],
lengths[opt_idxes])
q1_loss /= self._batch_size
q2_loss /= self._batch_size
qs_loss = q1_loss + q2_loss
total_q1_loss += q1_loss.detach().cpu()
total_q2_loss += q2_loss.detach().cpu()
qs_loss.backward()
nn.utils.clip_grad_norm_(self.model.qs_parameters,
self._max_grad_norm)
self.qs_opt.step()
update_info[c.Q_UPDATE_TIME].append(timeit.default_timer() - tic)
update_info[c.Q1_LOSS].append(total_q1_loss.numpy())
update_info[c.Q2_LOSS].append(total_q2_loss.numpy())
def update(self, curr_obs, curr_h_state, act, rew, done, info, next_obs, next_h_state):
self._store_to_buffer(curr_obs, curr_h_state, act, rew, done, info, next_obs, next_h_state)
self.step += 1
update_info = {}
if hasattr(self.model, c.OBS_RMS):
self.model.obs_rms.update(self.eval_preprocessing(torch.tensor(curr_obs)))
# Perform SAC update
if self.step >= self._buffer_warmup and self.step % self._steps_between_update == 0:
update_info[c.PI_LOSS] = []
update_info[c.Q1_LOSS] = []
update_info[c.Q2_LOSS] = []
update_info[c.ALPHA] = []
update_info[c.SAMPLE_TIME] = []
update_info[c.Q_UPDATE_TIME] = []
update_info[c.POLICY_UPDATE_TIME] = []
update_info[c.ALPHA_LOSS] = []
update_info[c.ALPHA_UPDATE_TIME] = []
for _ in range(self._num_gradient_updates // self._num_prefetch):
tic = timeit.default_timer()
obss, h_states, acts, rews, dones, next_obss, next_h_states, infos, lengths = self.buffer.sample_with_next_obs(
self._batch_size * self._num_prefetch, next_obs, next_h_state)
eval_obss = self.evaluation_preprocessing(obss)
rews = rews * self._reward_scaling
discounting = infos[c.DISCOUNTING]
update_info[c.SAMPLE_TIME].append(timeit.default_timer() - tic)
for batch_i in range(self._num_prefetch):
self._update_num += 1
batch_start_idx = batch_i * self._batch_size
# Auxiliary tasks are usually for shared layers, which is updated along with Q
aux_loss, aux_update_info = self._aux_tasks.compute_loss(next_obs, next_h_state)
if hasattr(aux_loss, c.BACKWARD):
aux_loss.backward()
# Update Q functions
self.update_qs(batch_start_idx,
obss,
h_states,
acts,
rews,
dones,
next_obss,
next_h_states,
discounting,
infos,
lengths,
update_info)
self._aux_tasks.step()
update_info.update(aux_update_info)
if self._update_num % self._actor_update_interval == 0:
# Update policy
self.update_policy(batch_start_idx,
eval_obss,
h_states,
acts,
rews,
dones,
next_obss,
next_h_states,
discounting,
infos,
lengths,
update_info)
# Update Alpha
if self.learn_alpha:
self.update_alpha(batch_start_idx,
eval_obss,
h_states,
acts,
rews,
dones,
next_obss,
next_h_states,
discounting,
infos,
lengths,
update_info)
if self._update_num % self._target_update_interval == 0:
update_info[c.TARGET_UPDATE_TIME] = []
tic = timeit.default_timer()
self._update_target_network()
update_info[c.TARGET_UPDATE_TIME].append(timeit.default_timer() - tic)
update_info[c.ALPHA].append(self.model.alpha.detach().cpu().numpy())
if hasattr(self.model, c.VALUE_RMS):
update_info[f"{c.VALUE_RMS}/{c.MEAN}"] = self.model.value_rms.mean.numpy()
update_info[f"{c.VALUE_RMS}/{c.VARIANCE}"] = self.model.value_rms.var.numpy()
return True, update_info
return False, update_info
|
en
| 0.898768
|
# Number of target Q augmentations # Number of Q augmentations # Perform SAC update # Auxiliary tasks are usually for shared layers, which is updated along with Q # Update Q functions # Update policy # Update Alpha
| 1.978395
| 2
|
scripts/auth.py
|
maxkraus1/brightspace_admin_app
| 0
|
6626418
|
import json
import os
import requests
dir = os.path.dirname(__file__)
creds_json = os.path.join(dir, "records/credentials.json")
with open(creds_json) as infile:
creds = json.load(infile) # get current credentials
payload = {
"grant_type": "refresh_token",
"refresh_token": creds["refresh_token"],
"client_id": creds["client_id"],
"client_secret": creds["client_secret"],
"scope": creds["scope"]
}
url = "https://auth.brightspace.com/core/connect/token"
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
response = requests.post(url, headers=headers, data=payload)
if response.status_code == 200: # check if response was successful
new_creds = json.loads(response.text)
creds.update(new_creds) # update stored credentials with new access and refresh tokens
with open(creds_json, "w") as outfile:
json.dump(creds, outfile, indent=4) # save new credentials to records/credentials.json
else:
print("Error: " + str(response.status_code))
print(json.dumps(response.text, indent=4))
|
import json
import os
import requests
dir = os.path.dirname(__file__)
creds_json = os.path.join(dir, "records/credentials.json")
with open(creds_json) as infile:
creds = json.load(infile) # get current credentials
payload = {
"grant_type": "refresh_token",
"refresh_token": creds["refresh_token"],
"client_id": creds["client_id"],
"client_secret": creds["client_secret"],
"scope": creds["scope"]
}
url = "https://auth.brightspace.com/core/connect/token"
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
response = requests.post(url, headers=headers, data=payload)
if response.status_code == 200: # check if response was successful
new_creds = json.loads(response.text)
creds.update(new_creds) # update stored credentials with new access and refresh tokens
with open(creds_json, "w") as outfile:
json.dump(creds, outfile, indent=4) # save new credentials to records/credentials.json
else:
print("Error: " + str(response.status_code))
print(json.dumps(response.text, indent=4))
|
en
| 0.912905
|
# get current credentials # check if response was successful # update stored credentials with new access and refresh tokens # save new credentials to records/credentials.json
| 2.793135
| 3
|
DailyProgrammer/DP20141008B.py
|
DayGitH/Python-Challenges
| 2
|
6626419
|
<reponame>DayGitH/Python-Challenges
"""
[10/08/2014] Challenge #183 [Intermediate] Edge Matching Tile Puzzle
https://www.reddit.com/r/dailyprogrammer/comments/2ip1gj/10082014_challenge_183_intermediate_edge_matching/
#Credit:
Thanks to /u/skeeto for this challenge. As posted on our /r/dailyprogrammer_ideas subreddit.
#Description:
There's a tile puzzle game you might find at your local game store. There are 9 tiles to be arranged in a 3x3 grid.
Each of a tile's contains half of some image, to be met up with the appropriate half on another tile. The images are
usually animals (cats, beetles). There are 4 kinds of images in total. For example, here's a picture of completed
puzzle.
* (http://i.imgur.com/NbLum43.jpg)
Your task is to write a program that finds solutions to a given set of tiles.
#Formal Input Description:
On standard input you'll be given a number, n, indicating the size of the side of the puzzle. For example, for a 3x3
puzzle n = 3. What will follow are n * n lines of 4 letters indicating the edges of each tile. The order of the edges
is north, east, south, west (clockwise). Your program should be able to handle up to n = 5.
Instead of images, we'll use the 4 colors Cyan, Magenta, Yellow, and Black (CMYK). The two "halves" are uppercase and
lower case. For two tiles to legally touch, an uppercase letter can only touch its lowercase matchin letter on an
adjacent tile and vice versa.
For the sake of communication, [ the tiles will be labeled A-Z] (http://i.imgur.com/pnVvQ8L.jpg) in the order that they
were input. So on a 3x3 puzzle, the tiles are A-I.
#Formal Output Description:
This is where you can get creative. The simplest output could just list the tiles, left to right, top to bottom, and
their orientations (N, E, S, W). Or if you're feeling ambitious, output an image showing the completed tile
arrangement. For a 3x3 puzzle, there are over 95 billion possible such arrangements (9! * 4^9), though all but a
handful of them will be illegal.
You may output just one solution or all solutions. Keep symmetry in mind.
#Sample Input 1
3
CYMk
CmKm
cKyM
cYkY
CMky
ckyM
CYMK
CMKy
CkmY
This corresponds to these tiles:
* (http://i.imgur.com/eok9gTt.png)
With these graphics, half circles must be matched up with half squares of the same color. The solution should look like
those [cannon bullet things from Super Mario.] (http://i.imgur.com/etCrWXi.jpg)
#Sample Input 2
3
ycKC
kKcY
cKMc
mYmk
CCYk
mMyC
MyYk
mKMy
YCMk
#Sample Output 1
Simplest output showing one solution:
AN CW GE BW FE DS HE IS EN
A more graphical output (same solution):
+---------+
| C M Y |
|kAYyCcCGM|
| M K K |
| m k k |
|KBCcFyYDY|
| m M c |
| M m C |
|CHKkIYyEM|
| y C k |
+---------+
Or drawing the solution:
* (http://i.imgur.com/GJh2eOI.png)
#Challenge Input #1:
4
mcYC
MmCk
yYcm
yMYC
Ykcy
kkkm
KKcy
KMYK
YMkk
ymKc
MyMK
CmmY
kMMY
yCCM
yccc
kcck
Graphical version (if this helps):
* (http://i.imgur.com/mpO8HGJ.png)
#Challenge Input #2:
5
cKCk
yYcc
YcCK
kKCM
CMKc
cKYC
kYcm
KYyY
Mccm
yKcm
mykK
MMCm
ckYC
ycmm
MmKM
kymc
KMMK
KcyM
kYck
YCKM
myYm
kYyY
CMKM
yYCM
YKyk
Graphical version:
* (http://i.imgur.com/Msa9F6G.png)
"""
def main():
pass
if __name__ == "__main__":
main()
|
"""
[10/08/2014] Challenge #183 [Intermediate] Edge Matching Tile Puzzle
https://www.reddit.com/r/dailyprogrammer/comments/2ip1gj/10082014_challenge_183_intermediate_edge_matching/
#Credit:
Thanks to /u/skeeto for this challenge. As posted on our /r/dailyprogrammer_ideas subreddit.
#Description:
There's a tile puzzle game you might find at your local game store. There are 9 tiles to be arranged in a 3x3 grid.
Each of a tile's contains half of some image, to be met up with the appropriate half on another tile. The images are
usually animals (cats, beetles). There are 4 kinds of images in total. For example, here's a picture of completed
puzzle.
* (http://i.imgur.com/NbLum43.jpg)
Your task is to write a program that finds solutions to a given set of tiles.
#Formal Input Description:
On standard input you'll be given a number, n, indicating the size of the side of the puzzle. For example, for a 3x3
puzzle n = 3. What will follow are n * n lines of 4 letters indicating the edges of each tile. The order of the edges
is north, east, south, west (clockwise). Your program should be able to handle up to n = 5.
Instead of images, we'll use the 4 colors Cyan, Magenta, Yellow, and Black (CMYK). The two "halves" are uppercase and
lower case. For two tiles to legally touch, an uppercase letter can only touch its lowercase matchin letter on an
adjacent tile and vice versa.
For the sake of communication, [ the tiles will be labeled A-Z] (http://i.imgur.com/pnVvQ8L.jpg) in the order that they
were input. So on a 3x3 puzzle, the tiles are A-I.
#Formal Output Description:
This is where you can get creative. The simplest output could just list the tiles, left to right, top to bottom, and
their orientations (N, E, S, W). Or if you're feeling ambitious, output an image showing the completed tile
arrangement. For a 3x3 puzzle, there are over 95 billion possible such arrangements (9! * 4^9), though all but a
handful of them will be illegal.
You may output just one solution or all solutions. Keep symmetry in mind.
#Sample Input 1
3
CYMk
CmKm
cKyM
cYkY
CMky
ckyM
CYMK
CMKy
CkmY
This corresponds to these tiles:
* (http://i.imgur.com/eok9gTt.png)
With these graphics, half circles must be matched up with half squares of the same color. The solution should look like
those [cannon bullet things from Super Mario.] (http://i.imgur.com/etCrWXi.jpg)
#Sample Input 2
3
ycKC
kKcY
cKMc
mYmk
CCYk
mMyC
MyYk
mKMy
YCMk
#Sample Output 1
Simplest output showing one solution:
AN CW GE BW FE DS HE IS EN
A more graphical output (same solution):
+---------+
| C M Y |
|kAYyCcCGM|
| M K K |
| m k k |
|KBCcFyYDY|
| m M c |
| M m C |
|CHKkIYyEM|
| y C k |
+---------+
Or drawing the solution:
* (http://i.imgur.com/GJh2eOI.png)
#Challenge Input #1:
4
mcYC
MmCk
yYcm
yMYC
Ykcy
kkkm
KKcy
KMYK
YMkk
ymKc
MyMK
CmmY
kMMY
yCCM
yccc
kcck
Graphical version (if this helps):
* (http://i.imgur.com/mpO8HGJ.png)
#Challenge Input #2:
5
cKCk
yYcc
YcCK
kKCM
CMKc
cKYC
kYcm
KYyY
Mccm
yKcm
mykK
MMCm
ckYC
ycmm
MmKM
kymc
KMMK
KcyM
kYck
YCKM
myYm
kYyY
CMKM
yYCM
YKyk
Graphical version:
* (http://i.imgur.com/Msa9F6G.png)
"""
def main():
pass
if __name__ == "__main__":
main()
|
en
| 0.796323
|
[10/08/2014] Challenge #183 [Intermediate] Edge Matching Tile Puzzle https://www.reddit.com/r/dailyprogrammer/comments/2ip1gj/10082014_challenge_183_intermediate_edge_matching/ #Credit: Thanks to /u/skeeto for this challenge. As posted on our /r/dailyprogrammer_ideas subreddit. #Description: There's a tile puzzle game you might find at your local game store. There are 9 tiles to be arranged in a 3x3 grid. Each of a tile's contains half of some image, to be met up with the appropriate half on another tile. The images are usually animals (cats, beetles). There are 4 kinds of images in total. For example, here's a picture of completed puzzle. * (http://i.imgur.com/NbLum43.jpg) Your task is to write a program that finds solutions to a given set of tiles. #Formal Input Description: On standard input you'll be given a number, n, indicating the size of the side of the puzzle. For example, for a 3x3 puzzle n = 3. What will follow are n * n lines of 4 letters indicating the edges of each tile. The order of the edges is north, east, south, west (clockwise). Your program should be able to handle up to n = 5. Instead of images, we'll use the 4 colors Cyan, Magenta, Yellow, and Black (CMYK). The two "halves" are uppercase and lower case. For two tiles to legally touch, an uppercase letter can only touch its lowercase matchin letter on an adjacent tile and vice versa. For the sake of communication, [ the tiles will be labeled A-Z] (http://i.imgur.com/pnVvQ8L.jpg) in the order that they were input. So on a 3x3 puzzle, the tiles are A-I. #Formal Output Description: This is where you can get creative. The simplest output could just list the tiles, left to right, top to bottom, and their orientations (N, E, S, W). Or if you're feeling ambitious, output an image showing the completed tile arrangement. For a 3x3 puzzle, there are over 95 billion possible such arrangements (9! * 4^9), though all but a handful of them will be illegal. You may output just one solution or all solutions. Keep symmetry in mind. #Sample Input 1 3 CYMk CmKm cKyM cYkY CMky ckyM CYMK CMKy CkmY This corresponds to these tiles: * (http://i.imgur.com/eok9gTt.png) With these graphics, half circles must be matched up with half squares of the same color. The solution should look like those [cannon bullet things from Super Mario.] (http://i.imgur.com/etCrWXi.jpg) #Sample Input 2 3 ycKC kKcY cKMc mYmk CCYk mMyC MyYk mKMy YCMk #Sample Output 1 Simplest output showing one solution: AN CW GE BW FE DS HE IS EN A more graphical output (same solution): +---------+ | C M Y | |kAYyCcCGM| | M K K | | m k k | |KBCcFyYDY| | m M c | | M m C | |CHKkIYyEM| | y C k | +---------+ Or drawing the solution: * (http://i.imgur.com/GJh2eOI.png) #Challenge Input #1: 4 mcYC MmCk yYcm yMYC Ykcy kkkm KKcy KMYK YMkk ymKc MyMK CmmY kMMY yCCM yccc kcck Graphical version (if this helps): * (http://i.imgur.com/mpO8HGJ.png) #Challenge Input #2: 5 cKCk yYcc YcCK kKCM CMKc cKYC kYcm KYyY Mccm yKcm mykK MMCm ckYC ycmm MmKM kymc KMMK KcyM kYck YCKM myYm kYyY CMKM yYCM YKyk Graphical version: * (http://i.imgur.com/Msa9F6G.png)
| 3.623425
| 4
|
tango.py
|
cg2v/Tango
| 41
|
6626420
|
#
# Tango is a job management service that manages requests for jobs to
# be run in virtual machines. Tango consists of five main components:
#
# 1. The Restful API: This is the interface for Tango that receives
# requests from clients via HTTP. AddJob requests are converted
# into a form that the tangoServer understands and then passed on
# to an instance of the tangoServer class. (restful_tango/*)
#
# 2. The TangoServer Class: This is a class that accepts addJob requests
# from the restful server. Job requests are validated and placed in
# a job queue. This class also implements various administrative
# functions to manage instances of tangoServer. (tango.py)
#
# 3. The Job Manager: This thread runs continuously. It watches the job
# queue for new job requests. When it finds one it creates a new
# worker thread to handle the job, and assigns a preallocated or new VM
# to the job. (jobQueue.py)
#
# 4. Workers: Worker threads do the actual work of running a job. The
# process of running a job is broken down into the following steps:
# (1) initializeVM, (2) waitVM, (3) copyIn, (4) runJob, (5)
# copyOut, (6) destroyVM. The actual process involved in
# each of those steps is handled by a virtual machine management
# system (VMMS) such as Local or Amazon EC2. Each job request
# specifies the VMMS to use. The worker thread dynamically loads
# and uses the module written for that particular VMMS. (worker.py
# and vmms/*.py)
#
# 5. The Preallocator: Virtual machines can preallocated in a pool in
# order to reduce response time. Each virtual machine image has its
# own pool. Users control the size of each pool via an external HTTP
# call. Each time a machine is assigned to a job and removed from
# the pool, the preallocator creates another instance and adds it
# to the pool. (preallocator.py)
import threading
import logging
import time
import stat
import re
import os
from datetime import datetime
from jobManager import JobManager
from preallocator import Preallocator
from jobQueue import JobQueue
from tangoObjects import TangoJob
from config import Config
class TangoServer(object):
"""TangoServer - Implements the API functions that the server accepts"""
def __init__(self):
self.daemon = True
vmms = None
if Config.VMMS_NAME == "tashiSSH":
from vmms.tashiSSH import TashiSSH
vmms = TashiSSH()
elif Config.VMMS_NAME == "ec2SSH":
from vmms.ec2SSH import Ec2SSH
vmms = Ec2SSH()
elif Config.VMMS_NAME == "localDocker":
from vmms.localDocker import LocalDocker
vmms = LocalDocker()
elif Config.VMMS_NAME == "distDocker":
from vmms.distDocker import DistDocker
vmms = DistDocker()
self.preallocator = Preallocator({Config.VMMS_NAME: vmms})
self.jobQueue = JobQueue(self.preallocator)
if not Config.USE_REDIS:
# creates a local Job Manager if there is no persistent
# memory between processes. Otherwise, JobManager will
# be initiated separately
JobManager(self.jobQueue).start()
logging.basicConfig(
filename=Config.LOGFILE,
format="%(levelname)s|%(asctime)s|%(name)s|%(message)s",
level=Config.LOGLEVEL,
)
self.start_time = time.time()
self.log = logging.getLogger("TangoServer")
self.log.info("Starting Tango server")
def addJob(self, job):
"""addJob - Add a job to the job queue"""
Config.job_requests += 1
self.log.debug("Received addJob request")
ret = self.__validateJob(job, self.preallocator.vmms)
self.log.info("Done validating job %s" % (job.name))
if ret == 0:
return self.jobQueue.add(job)
else:
self.jobQueue.addDead(job)
return -1
def delJob(self, id, deadjob):
"""delJob - Delete a job
@param id: Id of job to delete
@param deadjob - If 0, move the job from the live queue to the
dead queue. If non-zero, remove the job from the dead queue
and discard. Use with caution!
"""
self.log.debug("Received delJob(%d, %d) request" % (id, deadjob))
return self.jobQueue.delJob(id, deadjob)
def getJobs(self, item):
"""getJobs - Return the list of live jobs (item == 0) or the
list of dead jobs (item == -1).
"""
try:
self.log.debug("Received getJobs(%s) request" % (item))
if item == -1: # return the list of dead jobs
return self.jobQueue.deadJobs.values()
elif item == 0: # return the list of live jobs
return self.jobQueue.liveJobs.values()
else: # invalid parameter
return []
except Exception as e:
self.log.debug("getJobs: %s" % str(e))
def preallocVM(self, vm, num):
"""preallocVM - Set the pool size for VMs of type vm to num"""
self.log.debug("Received preallocVM(%s,%d)request" % (vm.name, num))
try:
vmms = self.preallocator.vmms[vm.vmms]
if not vm or num < 0:
return -2
if vm.image not in vmms.getImages():
self.log.error("Invalid image name")
return -3
(name, ext) = os.path.splitext(vm.image)
vm.name = name
self.preallocator.update(vm, num)
return 0
except Exception as err:
self.log.error("preallocVM failed: %s" % err)
return -1
def getVMs(self, vmms_name):
"""getVMs - return the list of VMs managed by the service vmms_name"""
self.log.debug("Received getVMs request(%s)" % vmms_name)
try:
if vmms_name in self.preallocator.vmms:
vmms_inst = self.preallocator.vmms[vmms_name]
return vmms_inst.getVMs()
else:
return []
except Exception as err:
self.log.error("getVMs request failed: %s" % err)
return []
def delVM(self, vmName, id):
"""delVM - delete a specific VM instance from a pool"""
self.log.debug("Received delVM request(%s, %d)" % (vmName, id))
try:
if not vmName or vmName == "" or not id:
return -1
return self.preallocator.destroyVM(vmName, id)
except Exception as err:
self.log.error("delVM request failed: %s" % err)
return -1
def getPool(self, vmName):
"""getPool - Return the current members of a pool and its free list"""
self.log.debug("Received getPool request(%s)" % (vmName))
try:
if not vmName or vmName == "":
return []
result = self.preallocator.getPool(vmName)
return [
"pool_size=%d" % len(result["pool"]),
"free_size=%d" % len(result["free"]),
"pool=%s" % result["pool"],
"free=%s" % result["free"],
]
except Exception as err:
self.log.error("getPool request failed: %s" % err)
return []
def getInfo(self):
"""getInfo - return various statistics about the Tango daemon"""
stats = {}
stats["elapsed_secs"] = time.time() - self.start_time
stats["job_requests"] = Config.job_requests
stats["job_retries"] = Config.job_retries
stats["waitvm_timeouts"] = Config.waitvm_timeouts
stats["runjob_timeouts"] = Config.runjob_timeouts
stats["copyin_errors"] = Config.copyin_errors
stats["runjob_errors"] = Config.runjob_errors
stats["copyout_errors"] = Config.copyout_errors
stats["num_threads"] = threading.activeCount()
return stats
#
# Helper functions
#
def resetTango(self, vmms):
"""resetTango - resets Tango to a clean predictable state and
ensures that it has a working virtualization environment. A side
effect is that also checks that each supported VMMS is actually
running.
"""
self.log.debug("Received resetTango request.")
try:
# For each supported VMM system, get the instances it knows about,
# and kill those in the current Tango name space.
for vmms_name in vmms:
vobj = vmms[vmms_name]
vms = vobj.getVMs()
self.log.debug("Pre-existing VMs: %s" % [vm.name for vm in vms])
namelist = []
for vm in vms:
if re.match("%s-" % Config.PREFIX, vm.name):
vobj.destroyVM(vm)
# Need a consistent abstraction for a vm between
# interfaces
namelist.append(vm.name)
if namelist:
self.log.warning(
"Killed these %s VMs on restart: %s" % (vmms_name, namelist)
)
for _, job in self.jobQueue.liveJobs.items():
if not job.isNotAssigned():
job.makeUnassigned()
self.log.debug(
"job: %s, assigned: %s" % (str(job.name), str(job.assigned))
)
except Exception as err:
self.log.error("resetTango: Call to VMMS %s failed: %s" % (vmms_name, err))
os._exit(1)
def __validateJob(self, job, vmms):
"""validateJob - validate the input arguments in an addJob request."""
errors = 0
# If this isn't a Tango job then bail with an error
if not isinstance(job, TangoJob):
return -1
# Every job must have a name
if not job.name:
self.log.error("validateJob: Missing job.name")
job.appendTrace(
"%s|validateJob: Missing job.name" % (datetime.utcnow().ctime())
)
errors += 1
# Check the virtual machine field
if not job.vm:
self.log.error("validateJob: Missing job.vm")
job.appendTrace(
"%s|validateJob: Missing job.vm" % (datetime.utcnow().ctime())
)
errors += 1
else:
if not job.vm.image:
self.log.error("validateJob: Missing job.vm.image")
job.appendTrace(
"%s|validateJob: Missing job.vm.image" % (datetime.utcnow().ctime())
)
errors += 1
else:
vobj = vmms[Config.VMMS_NAME]
imgList = vobj.getImages()
if job.vm.image not in imgList:
self.log.error("validateJob: Image not found: %s" % job.vm.image)
job.appendTrace(
"%s|validateJob: Image not found: %s"
% (datetime.utcnow().ctime(), job.vm.image)
)
errors += 1
else:
(name, ext) = os.path.splitext(job.vm.image)
job.vm.name = name
if not job.vm.vmms:
self.log.error("validateJob: Missing job.vm.vmms")
job.appendTrace(
"%s|validateJob: Missing job.vm.vmms" % (datetime.utcnow().ctime())
)
errors += 1
else:
if job.vm.vmms not in vmms:
self.log.error("validateJob: Invalid vmms name: %s" % job.vm.vmms)
job.appendTrace(
"%s|validateJob: Invalid vmms name: %s"
% (datetime.utcnow().ctime(), job.vm.vmms)
)
errors += 1
# Check the output file
if not job.outputFile:
self.log.error("validateJob: Missing job.outputFile")
job.appendTrace(
"%s|validateJob: Missing job.outputFile" % (datetime.utcnow().ctime())
)
errors += 1
else:
if not os.path.exists(os.path.dirname(job.outputFile)):
self.log.error("validateJob: Bad output path: %s", job.outputFile)
job.appendTrace(
"%s|validateJob: Bad output path: %s"
% (datetime.utcnow().ctime(), job.outputFile)
)
errors += 1
# Check for max output file size parameter
if not job.maxOutputFileSize:
self.log.debug(
"validateJob: Setting job.maxOutputFileSize "
"to default value: %d bytes",
Config.MAX_OUTPUT_FILE_SIZE,
)
job.maxOutputFileSize = Config.MAX_OUTPUT_FILE_SIZE
# Check the list of input files
hasMakefile = False
for inputFile in job.input:
if not inputFile.localFile:
self.log.error("validateJob: Missing inputFile.localFile")
job.appendTrace(
"%s|validateJob: Missing inputFile.localFile"
% (datetime.utcnow().ctime())
)
errors += 1
else:
if not os.path.exists(os.path.dirname(job.outputFile)):
self.log.error("validateJob: Bad output path: %s", job.outputFile)
job.appendTrace(
"%s|validateJob: Bad output path: %s"
% (datetime.utcnow().ctime(), job.outputFile)
)
errors += 1
if inputFile.destFile == "Makefile":
hasMakefile = True
# Check if input files include a Makefile
if not hasMakefile:
self.log.error("validateJob: Missing Makefile in input files.")
job.appendTrace(
"%s|validateJob: Missing Makefile in input files."
% (datetime.utcnow().ctime())
)
errors += 1
# Check if job timeout has been set; If not set timeout to default
if not job.timeout or job.timeout <= 0:
self.log.debug(
"validateJob: Setting job.timeout to" " default config value: %d secs",
Config.RUNJOB_TIMEOUT,
)
job.timeout = Config.RUNJOB_TIMEOUT
# Any problems, return an error status
if errors > 0:
self.log.error("validateJob: Job rejected: %d errors" % errors)
job.appendTrace(
"%s|validateJob: Job rejected: %d errors"
% (datetime.utcnow().ctime(), errors)
)
return -1
else:
return 0
|
#
# Tango is a job management service that manages requests for jobs to
# be run in virtual machines. Tango consists of five main components:
#
# 1. The Restful API: This is the interface for Tango that receives
# requests from clients via HTTP. AddJob requests are converted
# into a form that the tangoServer understands and then passed on
# to an instance of the tangoServer class. (restful_tango/*)
#
# 2. The TangoServer Class: This is a class that accepts addJob requests
# from the restful server. Job requests are validated and placed in
# a job queue. This class also implements various administrative
# functions to manage instances of tangoServer. (tango.py)
#
# 3. The Job Manager: This thread runs continuously. It watches the job
# queue for new job requests. When it finds one it creates a new
# worker thread to handle the job, and assigns a preallocated or new VM
# to the job. (jobQueue.py)
#
# 4. Workers: Worker threads do the actual work of running a job. The
# process of running a job is broken down into the following steps:
# (1) initializeVM, (2) waitVM, (3) copyIn, (4) runJob, (5)
# copyOut, (6) destroyVM. The actual process involved in
# each of those steps is handled by a virtual machine management
# system (VMMS) such as Local or Amazon EC2. Each job request
# specifies the VMMS to use. The worker thread dynamically loads
# and uses the module written for that particular VMMS. (worker.py
# and vmms/*.py)
#
# 5. The Preallocator: Virtual machines can preallocated in a pool in
# order to reduce response time. Each virtual machine image has its
# own pool. Users control the size of each pool via an external HTTP
# call. Each time a machine is assigned to a job and removed from
# the pool, the preallocator creates another instance and adds it
# to the pool. (preallocator.py)
import threading
import logging
import time
import stat
import re
import os
from datetime import datetime
from jobManager import JobManager
from preallocator import Preallocator
from jobQueue import JobQueue
from tangoObjects import TangoJob
from config import Config
class TangoServer(object):
"""TangoServer - Implements the API functions that the server accepts"""
def __init__(self):
self.daemon = True
vmms = None
if Config.VMMS_NAME == "tashiSSH":
from vmms.tashiSSH import TashiSSH
vmms = TashiSSH()
elif Config.VMMS_NAME == "ec2SSH":
from vmms.ec2SSH import Ec2SSH
vmms = Ec2SSH()
elif Config.VMMS_NAME == "localDocker":
from vmms.localDocker import LocalDocker
vmms = LocalDocker()
elif Config.VMMS_NAME == "distDocker":
from vmms.distDocker import DistDocker
vmms = DistDocker()
self.preallocator = Preallocator({Config.VMMS_NAME: vmms})
self.jobQueue = JobQueue(self.preallocator)
if not Config.USE_REDIS:
# creates a local Job Manager if there is no persistent
# memory between processes. Otherwise, JobManager will
# be initiated separately
JobManager(self.jobQueue).start()
logging.basicConfig(
filename=Config.LOGFILE,
format="%(levelname)s|%(asctime)s|%(name)s|%(message)s",
level=Config.LOGLEVEL,
)
self.start_time = time.time()
self.log = logging.getLogger("TangoServer")
self.log.info("Starting Tango server")
def addJob(self, job):
"""addJob - Add a job to the job queue"""
Config.job_requests += 1
self.log.debug("Received addJob request")
ret = self.__validateJob(job, self.preallocator.vmms)
self.log.info("Done validating job %s" % (job.name))
if ret == 0:
return self.jobQueue.add(job)
else:
self.jobQueue.addDead(job)
return -1
def delJob(self, id, deadjob):
"""delJob - Delete a job
@param id: Id of job to delete
@param deadjob - If 0, move the job from the live queue to the
dead queue. If non-zero, remove the job from the dead queue
and discard. Use with caution!
"""
self.log.debug("Received delJob(%d, %d) request" % (id, deadjob))
return self.jobQueue.delJob(id, deadjob)
def getJobs(self, item):
"""getJobs - Return the list of live jobs (item == 0) or the
list of dead jobs (item == -1).
"""
try:
self.log.debug("Received getJobs(%s) request" % (item))
if item == -1: # return the list of dead jobs
return self.jobQueue.deadJobs.values()
elif item == 0: # return the list of live jobs
return self.jobQueue.liveJobs.values()
else: # invalid parameter
return []
except Exception as e:
self.log.debug("getJobs: %s" % str(e))
def preallocVM(self, vm, num):
"""preallocVM - Set the pool size for VMs of type vm to num"""
self.log.debug("Received preallocVM(%s,%d)request" % (vm.name, num))
try:
vmms = self.preallocator.vmms[vm.vmms]
if not vm or num < 0:
return -2
if vm.image not in vmms.getImages():
self.log.error("Invalid image name")
return -3
(name, ext) = os.path.splitext(vm.image)
vm.name = name
self.preallocator.update(vm, num)
return 0
except Exception as err:
self.log.error("preallocVM failed: %s" % err)
return -1
def getVMs(self, vmms_name):
"""getVMs - return the list of VMs managed by the service vmms_name"""
self.log.debug("Received getVMs request(%s)" % vmms_name)
try:
if vmms_name in self.preallocator.vmms:
vmms_inst = self.preallocator.vmms[vmms_name]
return vmms_inst.getVMs()
else:
return []
except Exception as err:
self.log.error("getVMs request failed: %s" % err)
return []
def delVM(self, vmName, id):
"""delVM - delete a specific VM instance from a pool"""
self.log.debug("Received delVM request(%s, %d)" % (vmName, id))
try:
if not vmName or vmName == "" or not id:
return -1
return self.preallocator.destroyVM(vmName, id)
except Exception as err:
self.log.error("delVM request failed: %s" % err)
return -1
def getPool(self, vmName):
"""getPool - Return the current members of a pool and its free list"""
self.log.debug("Received getPool request(%s)" % (vmName))
try:
if not vmName or vmName == "":
return []
result = self.preallocator.getPool(vmName)
return [
"pool_size=%d" % len(result["pool"]),
"free_size=%d" % len(result["free"]),
"pool=%s" % result["pool"],
"free=%s" % result["free"],
]
except Exception as err:
self.log.error("getPool request failed: %s" % err)
return []
def getInfo(self):
"""getInfo - return various statistics about the Tango daemon"""
stats = {}
stats["elapsed_secs"] = time.time() - self.start_time
stats["job_requests"] = Config.job_requests
stats["job_retries"] = Config.job_retries
stats["waitvm_timeouts"] = Config.waitvm_timeouts
stats["runjob_timeouts"] = Config.runjob_timeouts
stats["copyin_errors"] = Config.copyin_errors
stats["runjob_errors"] = Config.runjob_errors
stats["copyout_errors"] = Config.copyout_errors
stats["num_threads"] = threading.activeCount()
return stats
#
# Helper functions
#
def resetTango(self, vmms):
"""resetTango - resets Tango to a clean predictable state and
ensures that it has a working virtualization environment. A side
effect is that also checks that each supported VMMS is actually
running.
"""
self.log.debug("Received resetTango request.")
try:
# For each supported VMM system, get the instances it knows about,
# and kill those in the current Tango name space.
for vmms_name in vmms:
vobj = vmms[vmms_name]
vms = vobj.getVMs()
self.log.debug("Pre-existing VMs: %s" % [vm.name for vm in vms])
namelist = []
for vm in vms:
if re.match("%s-" % Config.PREFIX, vm.name):
vobj.destroyVM(vm)
# Need a consistent abstraction for a vm between
# interfaces
namelist.append(vm.name)
if namelist:
self.log.warning(
"Killed these %s VMs on restart: %s" % (vmms_name, namelist)
)
for _, job in self.jobQueue.liveJobs.items():
if not job.isNotAssigned():
job.makeUnassigned()
self.log.debug(
"job: %s, assigned: %s" % (str(job.name), str(job.assigned))
)
except Exception as err:
self.log.error("resetTango: Call to VMMS %s failed: %s" % (vmms_name, err))
os._exit(1)
def __validateJob(self, job, vmms):
"""validateJob - validate the input arguments in an addJob request."""
errors = 0
# If this isn't a Tango job then bail with an error
if not isinstance(job, TangoJob):
return -1
# Every job must have a name
if not job.name:
self.log.error("validateJob: Missing job.name")
job.appendTrace(
"%s|validateJob: Missing job.name" % (datetime.utcnow().ctime())
)
errors += 1
# Check the virtual machine field
if not job.vm:
self.log.error("validateJob: Missing job.vm")
job.appendTrace(
"%s|validateJob: Missing job.vm" % (datetime.utcnow().ctime())
)
errors += 1
else:
if not job.vm.image:
self.log.error("validateJob: Missing job.vm.image")
job.appendTrace(
"%s|validateJob: Missing job.vm.image" % (datetime.utcnow().ctime())
)
errors += 1
else:
vobj = vmms[Config.VMMS_NAME]
imgList = vobj.getImages()
if job.vm.image not in imgList:
self.log.error("validateJob: Image not found: %s" % job.vm.image)
job.appendTrace(
"%s|validateJob: Image not found: %s"
% (datetime.utcnow().ctime(), job.vm.image)
)
errors += 1
else:
(name, ext) = os.path.splitext(job.vm.image)
job.vm.name = name
if not job.vm.vmms:
self.log.error("validateJob: Missing job.vm.vmms")
job.appendTrace(
"%s|validateJob: Missing job.vm.vmms" % (datetime.utcnow().ctime())
)
errors += 1
else:
if job.vm.vmms not in vmms:
self.log.error("validateJob: Invalid vmms name: %s" % job.vm.vmms)
job.appendTrace(
"%s|validateJob: Invalid vmms name: %s"
% (datetime.utcnow().ctime(), job.vm.vmms)
)
errors += 1
# Check the output file
if not job.outputFile:
self.log.error("validateJob: Missing job.outputFile")
job.appendTrace(
"%s|validateJob: Missing job.outputFile" % (datetime.utcnow().ctime())
)
errors += 1
else:
if not os.path.exists(os.path.dirname(job.outputFile)):
self.log.error("validateJob: Bad output path: %s", job.outputFile)
job.appendTrace(
"%s|validateJob: Bad output path: %s"
% (datetime.utcnow().ctime(), job.outputFile)
)
errors += 1
# Check for max output file size parameter
if not job.maxOutputFileSize:
self.log.debug(
"validateJob: Setting job.maxOutputFileSize "
"to default value: %d bytes",
Config.MAX_OUTPUT_FILE_SIZE,
)
job.maxOutputFileSize = Config.MAX_OUTPUT_FILE_SIZE
# Check the list of input files
hasMakefile = False
for inputFile in job.input:
if not inputFile.localFile:
self.log.error("validateJob: Missing inputFile.localFile")
job.appendTrace(
"%s|validateJob: Missing inputFile.localFile"
% (datetime.utcnow().ctime())
)
errors += 1
else:
if not os.path.exists(os.path.dirname(job.outputFile)):
self.log.error("validateJob: Bad output path: %s", job.outputFile)
job.appendTrace(
"%s|validateJob: Bad output path: %s"
% (datetime.utcnow().ctime(), job.outputFile)
)
errors += 1
if inputFile.destFile == "Makefile":
hasMakefile = True
# Check if input files include a Makefile
if not hasMakefile:
self.log.error("validateJob: Missing Makefile in input files.")
job.appendTrace(
"%s|validateJob: Missing Makefile in input files."
% (datetime.utcnow().ctime())
)
errors += 1
# Check if job timeout has been set; If not set timeout to default
if not job.timeout or job.timeout <= 0:
self.log.debug(
"validateJob: Setting job.timeout to" " default config value: %d secs",
Config.RUNJOB_TIMEOUT,
)
job.timeout = Config.RUNJOB_TIMEOUT
# Any problems, return an error status
if errors > 0:
self.log.error("validateJob: Job rejected: %d errors" % errors)
job.appendTrace(
"%s|validateJob: Job rejected: %d errors"
% (datetime.utcnow().ctime(), errors)
)
return -1
else:
return 0
|
en
| 0.842681
|
# # Tango is a job management service that manages requests for jobs to # be run in virtual machines. Tango consists of five main components: # # 1. The Restful API: This is the interface for Tango that receives # requests from clients via HTTP. AddJob requests are converted # into a form that the tangoServer understands and then passed on # to an instance of the tangoServer class. (restful_tango/*) # # 2. The TangoServer Class: This is a class that accepts addJob requests # from the restful server. Job requests are validated and placed in # a job queue. This class also implements various administrative # functions to manage instances of tangoServer. (tango.py) # # 3. The Job Manager: This thread runs continuously. It watches the job # queue for new job requests. When it finds one it creates a new # worker thread to handle the job, and assigns a preallocated or new VM # to the job. (jobQueue.py) # # 4. Workers: Worker threads do the actual work of running a job. The # process of running a job is broken down into the following steps: # (1) initializeVM, (2) waitVM, (3) copyIn, (4) runJob, (5) # copyOut, (6) destroyVM. The actual process involved in # each of those steps is handled by a virtual machine management # system (VMMS) such as Local or Amazon EC2. Each job request # specifies the VMMS to use. The worker thread dynamically loads # and uses the module written for that particular VMMS. (worker.py # and vmms/*.py) # # 5. The Preallocator: Virtual machines can preallocated in a pool in # order to reduce response time. Each virtual machine image has its # own pool. Users control the size of each pool via an external HTTP # call. Each time a machine is assigned to a job and removed from # the pool, the preallocator creates another instance and adds it # to the pool. (preallocator.py) TangoServer - Implements the API functions that the server accepts # creates a local Job Manager if there is no persistent # memory between processes. Otherwise, JobManager will # be initiated separately addJob - Add a job to the job queue delJob - Delete a job @param id: Id of job to delete @param deadjob - If 0, move the job from the live queue to the dead queue. If non-zero, remove the job from the dead queue and discard. Use with caution! getJobs - Return the list of live jobs (item == 0) or the list of dead jobs (item == -1). # return the list of dead jobs # return the list of live jobs # invalid parameter preallocVM - Set the pool size for VMs of type vm to num getVMs - return the list of VMs managed by the service vmms_name delVM - delete a specific VM instance from a pool getPool - Return the current members of a pool and its free list getInfo - return various statistics about the Tango daemon # # Helper functions # resetTango - resets Tango to a clean predictable state and ensures that it has a working virtualization environment. A side effect is that also checks that each supported VMMS is actually running. # For each supported VMM system, get the instances it knows about, # and kill those in the current Tango name space. # Need a consistent abstraction for a vm between # interfaces validateJob - validate the input arguments in an addJob request. # If this isn't a Tango job then bail with an error # Every job must have a name # Check the virtual machine field # Check the output file # Check for max output file size parameter # Check the list of input files # Check if input files include a Makefile # Check if job timeout has been set; If not set timeout to default # Any problems, return an error status
| 2.969218
| 3
|
tests/test_diffuser.py
|
tkonopka/crossmap
| 1
|
6626421
|
"""
Tests for obtaining feature co-occurance and diffusing vectors
"""
import unittest
from os.path import join
from crossmap.settings import CrossmapSettings
from crossmap.indexer import CrossmapIndexer
from crossmap.diffuser import CrossmapDiffuser
from crossmap.tokenizer import CrossmapTokenizer, CrossmapDiffusionTokenizer
from crossmap.diffuser import _pass_weights
from .tools import remove_crossmap_cache
from crossmap.vectors import sparse_to_dense
from crossmap.distance import euc_dist
data_dir = join("tests", "testdata")
config_plain = join(data_dir, "config-simple.yaml")
config_longword = join(data_dir, "config-longword.yaml")
class CrossmapDiffuserBuildTests(unittest.TestCase):
"""Managing co-occurrence counts"""
@classmethod
def setUpClass(cls):
settings = CrossmapSettings(config_plain, create_dir=True)
cls.indexer = CrossmapIndexer(settings)
cls.indexer.build()
cls.diffuser = CrossmapDiffuser(settings)
cls.diffuser.build()
cls.feature_map = cls.diffuser.feature_map
cls.db = cls.diffuser.db
cls.encoder = cls.indexer.encoder
@classmethod
def tearDownClass(cls):
remove_crossmap_cache(data_dir, "crossmap_simple")
def test_diffuser_build_adds_count_rows(self):
"""count tables should have one row per feature"""
n = len(self.feature_map)
# there are two datasets (targets, documents), each with n rows
self.assertEqual(self.db.count_rows("targets", "counts"), n)
self.assertEqual(self.db.count_rows("documents", "counts"), n)
def test_retrieve_counts(self):
"""extract counts from db for one feature"""
fm = self.feature_map
alice_idx = fm["alice"][0]
# db fetch should provide counts for one feature
result = self.db.get_counts("targets", [alice_idx])
self.assertEqual(len(result), 1)
# count vector should match feature map length
data = result[alice_idx].toarray()[0]
self.assertEqual(len(data), len(fm))
# count vector should have nonzero counts for co-occuring features
start_idx = fm["start"][0]
with_idx = fm["with"][0]
self.assertGreater(data[start_idx], 0)
self.assertGreater(data[with_idx], 0)
# count vector should not have counts for non-co-occurring features
abcde_idx = fm["abcde"][0]
self.assertEqual(data[abcde_idx], 0)
def test_retrieve_many_counts(self):
"""extract counts from db for multiple features"""
fm = self.feature_map
a, b = fm["a"][0], fm["b"][0]
# db fetch should provide counts for one feature
result = self.db.get_counts("targets", [a, b])
self.assertEqual(len(result), 2)
# count vector should match feature map length
self.assertEqual(len(result[a].toarray()[0]), len(fm))
self.assertEqual(len(result[b].toarray()[0]), len(fm))
def test_retrieve_from_documents(self):
"""extract counts from documents"""
fm = self.feature_map
a = fm["a"][0]
# db fetch should provide counts for one feature
result_targets = self.db.get_counts("targets", [a])
result_docs = self.db.get_counts("documents", [a])
self.assertEqual(len(result_targets), 1)
self.assertEqual(len(result_docs), 1)
counts_targets = result_targets[a].toarray()[0]
counts_docs = result_docs[a].toarray()[0]
# the letter a appears more often in documents than in targets
self.assertGreater(counts_docs[a], counts_targets[a])
# the letter co-occurs with 'alpha' in documents
alpha = fm["alpha"][0]
self.assertGreater(counts_docs[alpha], counts_targets[alpha])
def test_diffuse_vector(self):
"""diffuse a vector values, basic properties"""
doc = {"data": "alice"}
doc_data = self.encoder.document(doc)
strength = dict(targets=1, documents=1)
result = self.diffuser.diffuse(doc_data, strength)
result_array = result.toarray()[0]
# raw data has only one feature
self.assertEqual(len(doc_data.indices), 1)
# diffused data should have several
self.assertGreater(len(result.indices), 2)
result_indices = list(result.indices)
alice_idx = self.feature_map["alice"][0]
with_idx = self.feature_map["with"][0]
a_idx = self.feature_map["a"][0]
self.assertTrue(with_idx in result_indices)
self.assertTrue(a_idx in result_indices)
# diffused values should be lower than the primary item
self.assertLess(result_array[with_idx], result_array[alice_idx])
self.assertLess(result_array[a_idx], result_array[alice_idx])
def test_diffuse_vector_custom_weights(self):
"""diffuse a vector with custom weights"""
doc = {"data": "alice"}
doc_data = self.encoder.document(doc)
strength_weak = dict(targets=0.5, documents=0.5)
result1 = self.diffuser.diffuse(doc_data, strength_weak)
array1 = result1.toarray()[0]
strength_strong = dict(targets=2, documents=2)
result2 = self.diffuser.diffuse(doc_data, strength_strong)
array2 = result2.toarray()[0]
# second result uses more aggressive diffusion,
# diffused values should be larger
with_idx = self.feature_map["with"][0]
self.assertEqual(doc_data.toarray()[0][with_idx], 0.0)
self.assertGreater(array2[with_idx], array1[with_idx])
class CrossmapDiffuserBuildReBuildTests(unittest.TestCase):
"""Managing co-occurance counts"""
@classmethod
def setUpClass(cls):
settings = CrossmapSettings(config_plain, create_dir=True)
cls.indexer = CrossmapIndexer(settings)
cls.indexer.build()
cls.diffuser = CrossmapDiffuser(settings)
cls.diffuser.build()
@classmethod
def tearDownClass(cls):
remove_crossmap_cache(data_dir, "crossmap_simple")
def test_rebuild_aborts(self):
"""attempting to rebuild should signal and abort"""
diffuser = self.diffuser
before = diffuser.db.count_rows("targets", "counts")
self.assertGreater(before, 0)
with self.assertLogs(level="WARNING") as cm:
self.diffuser.build()
self.assertTrue("exists" in str(cm.output))
after = diffuser.db.count_rows("targets", "counts")
self.assertEqual(after, before)
class CrossmapDiffuserWeightsTests(unittest.TestCase):
"""Checking overlapping tokens do not swamp diffusion"""
long_b = dict(data="longword B")
gh = dict(data="G H")
@classmethod
def setUpClass(cls):
settings = CrossmapSettings(config_longword, create_dir=True)
cls.indexer = CrossmapIndexer(settings)
cls.indexer.build()
cls.diffuser = CrossmapDiffuser(settings)
cls.diffuser.build()
cls.feature_map = cls.diffuser.feature_map
cls.db = cls.diffuser.db
cls.encoder = cls.indexer.encoder
cls.plain_tokenizer = CrossmapTokenizer(settings)
cls.diff_tokenizer = CrossmapDiffusionTokenizer(settings)
# extract data vectors
cls.data = dict()
temp = cls.db.get_data(dataset="targets",
ids=["L0", "L1", "L2", "L3", "L4"])
for _ in temp:
cls.data[_["id"]] = sparse_to_dense(_["data"])
@classmethod
def tearDownClass(cls):
remove_crossmap_cache(data_dir, "crossmap_longword")
def test_diffusion_shifts_away_from_a_target(self):
"""example using diffusion unaffected by longword"""
# before diffusion gh should be roughly equally distant to L3 and L4
v = self.encoder.document(self.gh)
v_dense = sparse_to_dense(v)
self.assertAlmostEqual(euc_dist(v_dense, self.data["L3"]),
euc_dist(v_dense, self.data["L4"]))
# after diffusion, L3 should be clearly preferred
# the strengths of diffusion should not matter here
# (any diffusion should break a tie in favor or L3)
vd = self.diffuser.diffuse(v, dict(documents=1))
vd_dense = sparse_to_dense(vd)
self.assertLess(euc_dist(vd_dense, self.data["L3"]),
euc_dist(vd_dense, self.data["L4"]))
def test_longword_document_before_diffusion(self):
"""encoding before diffusion accounts for overlapping tokens"""
v = self.encoder.document(self.long_b)
self.assertGreater(len(v.data), 4)
# overlapping tokens from "longword" should be weighted lower than
# tokens from "B" or "C" that are stand-alone
v_dense = sparse_to_dense(v)
fm = self.feature_map
self.assertGreater(v_dense[fm["b"][0]], v_dense[fm["ngwor"][0]])
# document should be closer to L0 than to L1
d0 = euc_dist(v_dense, self.data["L0"])
d1 = euc_dist(v_dense, self.data["L1"])
self.assertLess(d0, d1)
def test_diffusion_keeps_original_feature_strong(self):
"""diffusing from one feature should mantain that feature strong"""
doc = dict(data="C")
c_index = self.feature_map["c"][0]
v = self.encoder.document(doc)
# diffuse at different strengths
# all should maintain feature "C" as the most important feature
for w in [1, 2, 4, 8, 20]:
result = self.diffuser.diffuse(v, dict(targets=w))
result_dense = sparse_to_dense(result)
result_c = result_dense[c_index]
result_max = max(result_dense)
self.assertEqual(result_max, result_c)
class CrossmapDiffuserMultistep(unittest.TestCase):
"""diffusion through multiple passes"""
def test_multistep_weights_1(self):
"""weighting for a single-step diffusion"""
result = _pass_weights(1)
self.assertEqual(len(result), 1)
self.assertEqual(result[0], 1.0)
def test_multistep_weights_2(self):
"""weighting for a two-step diffusion"""
result = _pass_weights(2)
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], 2/3)
self.assertAlmostEqual(result[1], 1/3)
def test_multistep_weights_3(self):
"""weighting for a three-step diffusion"""
result = _pass_weights(3)
self.assertEqual(len(result), 3)
self.assertAlmostEqual(result[0], 6/11)
self.assertAlmostEqual(result[1], 3/11)
self.assertAlmostEqual(result[2], 2/11)
|
"""
Tests for obtaining feature co-occurance and diffusing vectors
"""
import unittest
from os.path import join
from crossmap.settings import CrossmapSettings
from crossmap.indexer import CrossmapIndexer
from crossmap.diffuser import CrossmapDiffuser
from crossmap.tokenizer import CrossmapTokenizer, CrossmapDiffusionTokenizer
from crossmap.diffuser import _pass_weights
from .tools import remove_crossmap_cache
from crossmap.vectors import sparse_to_dense
from crossmap.distance import euc_dist
data_dir = join("tests", "testdata")
config_plain = join(data_dir, "config-simple.yaml")
config_longword = join(data_dir, "config-longword.yaml")
class CrossmapDiffuserBuildTests(unittest.TestCase):
"""Managing co-occurrence counts"""
@classmethod
def setUpClass(cls):
settings = CrossmapSettings(config_plain, create_dir=True)
cls.indexer = CrossmapIndexer(settings)
cls.indexer.build()
cls.diffuser = CrossmapDiffuser(settings)
cls.diffuser.build()
cls.feature_map = cls.diffuser.feature_map
cls.db = cls.diffuser.db
cls.encoder = cls.indexer.encoder
@classmethod
def tearDownClass(cls):
remove_crossmap_cache(data_dir, "crossmap_simple")
def test_diffuser_build_adds_count_rows(self):
"""count tables should have one row per feature"""
n = len(self.feature_map)
# there are two datasets (targets, documents), each with n rows
self.assertEqual(self.db.count_rows("targets", "counts"), n)
self.assertEqual(self.db.count_rows("documents", "counts"), n)
def test_retrieve_counts(self):
"""extract counts from db for one feature"""
fm = self.feature_map
alice_idx = fm["alice"][0]
# db fetch should provide counts for one feature
result = self.db.get_counts("targets", [alice_idx])
self.assertEqual(len(result), 1)
# count vector should match feature map length
data = result[alice_idx].toarray()[0]
self.assertEqual(len(data), len(fm))
# count vector should have nonzero counts for co-occuring features
start_idx = fm["start"][0]
with_idx = fm["with"][0]
self.assertGreater(data[start_idx], 0)
self.assertGreater(data[with_idx], 0)
# count vector should not have counts for non-co-occurring features
abcde_idx = fm["abcde"][0]
self.assertEqual(data[abcde_idx], 0)
def test_retrieve_many_counts(self):
"""extract counts from db for multiple features"""
fm = self.feature_map
a, b = fm["a"][0], fm["b"][0]
# db fetch should provide counts for one feature
result = self.db.get_counts("targets", [a, b])
self.assertEqual(len(result), 2)
# count vector should match feature map length
self.assertEqual(len(result[a].toarray()[0]), len(fm))
self.assertEqual(len(result[b].toarray()[0]), len(fm))
def test_retrieve_from_documents(self):
"""extract counts from documents"""
fm = self.feature_map
a = fm["a"][0]
# db fetch should provide counts for one feature
result_targets = self.db.get_counts("targets", [a])
result_docs = self.db.get_counts("documents", [a])
self.assertEqual(len(result_targets), 1)
self.assertEqual(len(result_docs), 1)
counts_targets = result_targets[a].toarray()[0]
counts_docs = result_docs[a].toarray()[0]
# the letter a appears more often in documents than in targets
self.assertGreater(counts_docs[a], counts_targets[a])
# the letter co-occurs with 'alpha' in documents
alpha = fm["alpha"][0]
self.assertGreater(counts_docs[alpha], counts_targets[alpha])
def test_diffuse_vector(self):
"""diffuse a vector values, basic properties"""
doc = {"data": "alice"}
doc_data = self.encoder.document(doc)
strength = dict(targets=1, documents=1)
result = self.diffuser.diffuse(doc_data, strength)
result_array = result.toarray()[0]
# raw data has only one feature
self.assertEqual(len(doc_data.indices), 1)
# diffused data should have several
self.assertGreater(len(result.indices), 2)
result_indices = list(result.indices)
alice_idx = self.feature_map["alice"][0]
with_idx = self.feature_map["with"][0]
a_idx = self.feature_map["a"][0]
self.assertTrue(with_idx in result_indices)
self.assertTrue(a_idx in result_indices)
# diffused values should be lower than the primary item
self.assertLess(result_array[with_idx], result_array[alice_idx])
self.assertLess(result_array[a_idx], result_array[alice_idx])
def test_diffuse_vector_custom_weights(self):
"""diffuse a vector with custom weights"""
doc = {"data": "alice"}
doc_data = self.encoder.document(doc)
strength_weak = dict(targets=0.5, documents=0.5)
result1 = self.diffuser.diffuse(doc_data, strength_weak)
array1 = result1.toarray()[0]
strength_strong = dict(targets=2, documents=2)
result2 = self.diffuser.diffuse(doc_data, strength_strong)
array2 = result2.toarray()[0]
# second result uses more aggressive diffusion,
# diffused values should be larger
with_idx = self.feature_map["with"][0]
self.assertEqual(doc_data.toarray()[0][with_idx], 0.0)
self.assertGreater(array2[with_idx], array1[with_idx])
class CrossmapDiffuserBuildReBuildTests(unittest.TestCase):
"""Managing co-occurance counts"""
@classmethod
def setUpClass(cls):
settings = CrossmapSettings(config_plain, create_dir=True)
cls.indexer = CrossmapIndexer(settings)
cls.indexer.build()
cls.diffuser = CrossmapDiffuser(settings)
cls.diffuser.build()
@classmethod
def tearDownClass(cls):
remove_crossmap_cache(data_dir, "crossmap_simple")
def test_rebuild_aborts(self):
"""attempting to rebuild should signal and abort"""
diffuser = self.diffuser
before = diffuser.db.count_rows("targets", "counts")
self.assertGreater(before, 0)
with self.assertLogs(level="WARNING") as cm:
self.diffuser.build()
self.assertTrue("exists" in str(cm.output))
after = diffuser.db.count_rows("targets", "counts")
self.assertEqual(after, before)
class CrossmapDiffuserWeightsTests(unittest.TestCase):
"""Checking overlapping tokens do not swamp diffusion"""
long_b = dict(data="longword B")
gh = dict(data="G H")
@classmethod
def setUpClass(cls):
settings = CrossmapSettings(config_longword, create_dir=True)
cls.indexer = CrossmapIndexer(settings)
cls.indexer.build()
cls.diffuser = CrossmapDiffuser(settings)
cls.diffuser.build()
cls.feature_map = cls.diffuser.feature_map
cls.db = cls.diffuser.db
cls.encoder = cls.indexer.encoder
cls.plain_tokenizer = CrossmapTokenizer(settings)
cls.diff_tokenizer = CrossmapDiffusionTokenizer(settings)
# extract data vectors
cls.data = dict()
temp = cls.db.get_data(dataset="targets",
ids=["L0", "L1", "L2", "L3", "L4"])
for _ in temp:
cls.data[_["id"]] = sparse_to_dense(_["data"])
@classmethod
def tearDownClass(cls):
remove_crossmap_cache(data_dir, "crossmap_longword")
def test_diffusion_shifts_away_from_a_target(self):
"""example using diffusion unaffected by longword"""
# before diffusion gh should be roughly equally distant to L3 and L4
v = self.encoder.document(self.gh)
v_dense = sparse_to_dense(v)
self.assertAlmostEqual(euc_dist(v_dense, self.data["L3"]),
euc_dist(v_dense, self.data["L4"]))
# after diffusion, L3 should be clearly preferred
# the strengths of diffusion should not matter here
# (any diffusion should break a tie in favor or L3)
vd = self.diffuser.diffuse(v, dict(documents=1))
vd_dense = sparse_to_dense(vd)
self.assertLess(euc_dist(vd_dense, self.data["L3"]),
euc_dist(vd_dense, self.data["L4"]))
def test_longword_document_before_diffusion(self):
"""encoding before diffusion accounts for overlapping tokens"""
v = self.encoder.document(self.long_b)
self.assertGreater(len(v.data), 4)
# overlapping tokens from "longword" should be weighted lower than
# tokens from "B" or "C" that are stand-alone
v_dense = sparse_to_dense(v)
fm = self.feature_map
self.assertGreater(v_dense[fm["b"][0]], v_dense[fm["ngwor"][0]])
# document should be closer to L0 than to L1
d0 = euc_dist(v_dense, self.data["L0"])
d1 = euc_dist(v_dense, self.data["L1"])
self.assertLess(d0, d1)
def test_diffusion_keeps_original_feature_strong(self):
"""diffusing from one feature should mantain that feature strong"""
doc = dict(data="C")
c_index = self.feature_map["c"][0]
v = self.encoder.document(doc)
# diffuse at different strengths
# all should maintain feature "C" as the most important feature
for w in [1, 2, 4, 8, 20]:
result = self.diffuser.diffuse(v, dict(targets=w))
result_dense = sparse_to_dense(result)
result_c = result_dense[c_index]
result_max = max(result_dense)
self.assertEqual(result_max, result_c)
class CrossmapDiffuserMultistep(unittest.TestCase):
"""diffusion through multiple passes"""
def test_multistep_weights_1(self):
"""weighting for a single-step diffusion"""
result = _pass_weights(1)
self.assertEqual(len(result), 1)
self.assertEqual(result[0], 1.0)
def test_multistep_weights_2(self):
"""weighting for a two-step diffusion"""
result = _pass_weights(2)
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], 2/3)
self.assertAlmostEqual(result[1], 1/3)
def test_multistep_weights_3(self):
"""weighting for a three-step diffusion"""
result = _pass_weights(3)
self.assertEqual(len(result), 3)
self.assertAlmostEqual(result[0], 6/11)
self.assertAlmostEqual(result[1], 3/11)
self.assertAlmostEqual(result[2], 2/11)
|
en
| 0.911517
|
Tests for obtaining feature co-occurance and diffusing vectors Managing co-occurrence counts count tables should have one row per feature # there are two datasets (targets, documents), each with n rows extract counts from db for one feature # db fetch should provide counts for one feature # count vector should match feature map length # count vector should have nonzero counts for co-occuring features # count vector should not have counts for non-co-occurring features extract counts from db for multiple features # db fetch should provide counts for one feature # count vector should match feature map length extract counts from documents # db fetch should provide counts for one feature # the letter a appears more often in documents than in targets # the letter co-occurs with 'alpha' in documents diffuse a vector values, basic properties # raw data has only one feature # diffused data should have several # diffused values should be lower than the primary item diffuse a vector with custom weights # second result uses more aggressive diffusion, # diffused values should be larger Managing co-occurance counts attempting to rebuild should signal and abort Checking overlapping tokens do not swamp diffusion # extract data vectors example using diffusion unaffected by longword # before diffusion gh should be roughly equally distant to L3 and L4 # after diffusion, L3 should be clearly preferred # the strengths of diffusion should not matter here # (any diffusion should break a tie in favor or L3) encoding before diffusion accounts for overlapping tokens # overlapping tokens from "longword" should be weighted lower than # tokens from "B" or "C" that are stand-alone # document should be closer to L0 than to L1 diffusing from one feature should mantain that feature strong # diffuse at different strengths # all should maintain feature "C" as the most important feature diffusion through multiple passes weighting for a single-step diffusion weighting for a two-step diffusion weighting for a three-step diffusion
| 2.266695
| 2
|
cupyimg/skimage/restoration/_denoise.py
|
haesleinhuepf/cupyimg
| 39
|
6626422
|
<reponame>haesleinhuepf/cupyimg
import cupy as cp
from .. import img_as_float
def _denoise_tv_chambolle_nd(image, weight=0.1, eps=2.0e-4, n_iter_max=200):
"""Perform total-variation denoising on n-dimensional images.
Parameters
----------
image : ndarray
n-D input data to be denoised.
weight : float, optional
Denoising weight. The greater `weight`, the more denoising (at
the expense of fidelity to `input`).
eps : float, optional
Relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
n_iter_max : int, optional
Maximal number of iterations used for the optimization.
Returns
-------
out : ndarray
Denoised array of floats.
Notes
-----
Rudin, Osher and Fatemi algorithm.
"""
ndim = image.ndim
p = cp.zeros((image.ndim,) + image.shape, dtype=image.dtype)
g = cp.zeros_like(p)
d = cp.zeros_like(image)
i = 0
slices_g = [slice(None)] * (ndim + 1)
slices_d = [slice(None)] * ndim
slices_p = [slice(None)] * (ndim + 1)
while i < n_iter_max:
if i > 0:
# d will be the (negative) divergence of p
d = -p.sum(0)
for ax in range(ndim):
slices_d[ax] = slice(1, None)
slices_p[ax + 1] = slice(0, -1)
slices_p[0] = ax
d[tuple(slices_d)] += p[tuple(slices_p)]
slices_d[ax] = slice(None)
slices_p[ax + 1] = slice(None)
out = image + d
E = (d * d).sum()
else:
out = image
E = 0.0
# g stores the gradients of out along each axis
# e.g. g[0] is the first order finite difference along axis 0
for ax in range(ndim):
slices_g[ax + 1] = slice(0, -1)
slices_g[0] = ax
g[tuple(slices_g)] = cp.diff(out, axis=ax)
slices_g[ax + 1] = slice(None)
norm = (g * g).sum(axis=0, keepdims=True)
cp.sqrt(norm, out=norm)
E += weight * norm.sum()
tau = 1.0 / (2.0 * ndim)
norm *= tau / weight
norm += 1.0
p -= tau * g
p /= norm
E /= float(image.size)
if i == 0:
E_init = E
E_previous = E
else:
if abs(E_previous - E) < eps * E_init:
break
else:
E_previous = E
i += 1
return out
def denoise_tv_chambolle(
image, weight=0.1, eps=2.0e-4, n_iter_max=200, multichannel=False
):
"""Perform total-variation denoising on n-dimensional images.
Parameters
----------
image : ndarray of ints, uints or floats
Input data to be denoised. `image` can be of any numeric type,
but it is cast into an ndarray of floats for the computation
of the denoised image.
weight : float, optional
Denoising weight. The greater `weight`, the more denoising (at
the expense of fidelity to `input`).
eps : float, optional
Relative difference of the value of the cost function that
determines the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
n_iter_max : int, optional
Maximal number of iterations used for the optimization.
multichannel : bool, optional
Apply total-variation denoising separately for each channel. This
option should be true for color images, otherwise the denoising is
also applied in the channels dimension.
Returns
-------
out : ndarray
Denoised image.
Notes
-----
Make sure to set the multichannel parameter appropriately for color images.
The principle of total variation denoising is explained in
https://en.wikipedia.org/wiki/Total_variation_denoising
The principle of total variation denoising is to minimize the
total variation of the image, which can be roughly described as
the integral of the norm of the image gradient. Total variation
denoising tends to produce "cartoon-like" images, that is,
piecewise-constant images.
This code is an implementation of the algorithm of Rudin, Fatemi and Osher
that was proposed by Chambolle in [1]_.
References
----------
.. [1] <NAME>, An algorithm for total variation minimization and
applications, Journal of Mathematical Imaging and Vision,
Springer, 2004, 20, 89-97.
Examples
--------
2D example on astronaut image:
>>> from skimage import color, data
>>> img = color.rgb2gray(data.astronaut())[:50, :50]
>>> img += 0.5 * img.std() * np.random.randn(*img.shape)
>>> denoised_img = denoise_tv_chambolle(img, weight=60)
3D example on synthetic data:
>>> x, y, z = np.ogrid[0:20, 0:20, 0:20]
>>> mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2
>>> mask = mask.astype(np.float64)
>>> mask += 0.2*np.random.randn(*mask.shape)
>>> res = denoise_tv_chambolle(mask, weight=100)
"""
im_type = image.dtype
if not im_type.kind == "f":
image = img_as_float(image)
if multichannel:
out = cp.zeros_like(image)
for c in range(image.shape[-1]):
out[..., c] = _denoise_tv_chambolle_nd(
image[..., c], weight, eps, n_iter_max
)
else:
out = _denoise_tv_chambolle_nd(image, weight, eps, n_iter_max)
return out
|
import cupy as cp
from .. import img_as_float
def _denoise_tv_chambolle_nd(image, weight=0.1, eps=2.0e-4, n_iter_max=200):
"""Perform total-variation denoising on n-dimensional images.
Parameters
----------
image : ndarray
n-D input data to be denoised.
weight : float, optional
Denoising weight. The greater `weight`, the more denoising (at
the expense of fidelity to `input`).
eps : float, optional
Relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
n_iter_max : int, optional
Maximal number of iterations used for the optimization.
Returns
-------
out : ndarray
Denoised array of floats.
Notes
-----
Rudin, Osher and Fatemi algorithm.
"""
ndim = image.ndim
p = cp.zeros((image.ndim,) + image.shape, dtype=image.dtype)
g = cp.zeros_like(p)
d = cp.zeros_like(image)
i = 0
slices_g = [slice(None)] * (ndim + 1)
slices_d = [slice(None)] * ndim
slices_p = [slice(None)] * (ndim + 1)
while i < n_iter_max:
if i > 0:
# d will be the (negative) divergence of p
d = -p.sum(0)
for ax in range(ndim):
slices_d[ax] = slice(1, None)
slices_p[ax + 1] = slice(0, -1)
slices_p[0] = ax
d[tuple(slices_d)] += p[tuple(slices_p)]
slices_d[ax] = slice(None)
slices_p[ax + 1] = slice(None)
out = image + d
E = (d * d).sum()
else:
out = image
E = 0.0
# g stores the gradients of out along each axis
# e.g. g[0] is the first order finite difference along axis 0
for ax in range(ndim):
slices_g[ax + 1] = slice(0, -1)
slices_g[0] = ax
g[tuple(slices_g)] = cp.diff(out, axis=ax)
slices_g[ax + 1] = slice(None)
norm = (g * g).sum(axis=0, keepdims=True)
cp.sqrt(norm, out=norm)
E += weight * norm.sum()
tau = 1.0 / (2.0 * ndim)
norm *= tau / weight
norm += 1.0
p -= tau * g
p /= norm
E /= float(image.size)
if i == 0:
E_init = E
E_previous = E
else:
if abs(E_previous - E) < eps * E_init:
break
else:
E_previous = E
i += 1
return out
def denoise_tv_chambolle(
image, weight=0.1, eps=2.0e-4, n_iter_max=200, multichannel=False
):
"""Perform total-variation denoising on n-dimensional images.
Parameters
----------
image : ndarray of ints, uints or floats
Input data to be denoised. `image` can be of any numeric type,
but it is cast into an ndarray of floats for the computation
of the denoised image.
weight : float, optional
Denoising weight. The greater `weight`, the more denoising (at
the expense of fidelity to `input`).
eps : float, optional
Relative difference of the value of the cost function that
determines the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
n_iter_max : int, optional
Maximal number of iterations used for the optimization.
multichannel : bool, optional
Apply total-variation denoising separately for each channel. This
option should be true for color images, otherwise the denoising is
also applied in the channels dimension.
Returns
-------
out : ndarray
Denoised image.
Notes
-----
Make sure to set the multichannel parameter appropriately for color images.
The principle of total variation denoising is explained in
https://en.wikipedia.org/wiki/Total_variation_denoising
The principle of total variation denoising is to minimize the
total variation of the image, which can be roughly described as
the integral of the norm of the image gradient. Total variation
denoising tends to produce "cartoon-like" images, that is,
piecewise-constant images.
This code is an implementation of the algorithm of Rudin, Fatemi and Osher
that was proposed by Chambolle in [1]_.
References
----------
.. [1] <NAME>, An algorithm for total variation minimization and
applications, Journal of Mathematical Imaging and Vision,
Springer, 2004, 20, 89-97.
Examples
--------
2D example on astronaut image:
>>> from skimage import color, data
>>> img = color.rgb2gray(data.astronaut())[:50, :50]
>>> img += 0.5 * img.std() * np.random.randn(*img.shape)
>>> denoised_img = denoise_tv_chambolle(img, weight=60)
3D example on synthetic data:
>>> x, y, z = np.ogrid[0:20, 0:20, 0:20]
>>> mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2
>>> mask = mask.astype(np.float64)
>>> mask += 0.2*np.random.randn(*mask.shape)
>>> res = denoise_tv_chambolle(mask, weight=100)
"""
im_type = image.dtype
if not im_type.kind == "f":
image = img_as_float(image)
if multichannel:
out = cp.zeros_like(image)
for c in range(image.shape[-1]):
out[..., c] = _denoise_tv_chambolle_nd(
image[..., c], weight, eps, n_iter_max
)
else:
out = _denoise_tv_chambolle_nd(image, weight, eps, n_iter_max)
return out
|
en
| 0.712176
|
Perform total-variation denoising on n-dimensional images. Parameters ---------- image : ndarray n-D input data to be denoised. weight : float, optional Denoising weight. The greater `weight`, the more denoising (at the expense of fidelity to `input`). eps : float, optional Relative difference of the value of the cost function that determines the stop criterion. The algorithm stops when: (E_(n-1) - E_n) < eps * E_0 n_iter_max : int, optional Maximal number of iterations used for the optimization. Returns ------- out : ndarray Denoised array of floats. Notes ----- Rudin, Osher and Fatemi algorithm. # d will be the (negative) divergence of p # g stores the gradients of out along each axis # e.g. g[0] is the first order finite difference along axis 0 Perform total-variation denoising on n-dimensional images. Parameters ---------- image : ndarray of ints, uints or floats Input data to be denoised. `image` can be of any numeric type, but it is cast into an ndarray of floats for the computation of the denoised image. weight : float, optional Denoising weight. The greater `weight`, the more denoising (at the expense of fidelity to `input`). eps : float, optional Relative difference of the value of the cost function that determines the stop criterion. The algorithm stops when: (E_(n-1) - E_n) < eps * E_0 n_iter_max : int, optional Maximal number of iterations used for the optimization. multichannel : bool, optional Apply total-variation denoising separately for each channel. This option should be true for color images, otherwise the denoising is also applied in the channels dimension. Returns ------- out : ndarray Denoised image. Notes ----- Make sure to set the multichannel parameter appropriately for color images. The principle of total variation denoising is explained in https://en.wikipedia.org/wiki/Total_variation_denoising The principle of total variation denoising is to minimize the total variation of the image, which can be roughly described as the integral of the norm of the image gradient. Total variation denoising tends to produce "cartoon-like" images, that is, piecewise-constant images. This code is an implementation of the algorithm of Rudin, Fatemi and Osher that was proposed by Chambolle in [1]_. References ---------- .. [1] <NAME>, An algorithm for total variation minimization and applications, Journal of Mathematical Imaging and Vision, Springer, 2004, 20, 89-97. Examples -------- 2D example on astronaut image: >>> from skimage import color, data >>> img = color.rgb2gray(data.astronaut())[:50, :50] >>> img += 0.5 * img.std() * np.random.randn(*img.shape) >>> denoised_img = denoise_tv_chambolle(img, weight=60) 3D example on synthetic data: >>> x, y, z = np.ogrid[0:20, 0:20, 0:20] >>> mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2 >>> mask = mask.astype(np.float64) >>> mask += 0.2*np.random.randn(*mask.shape) >>> res = denoise_tv_chambolle(mask, weight=100)
| 2.880289
| 3
|
tests/test_flask_reuploaded.py
|
pkesavap/flask-reuploaded
| 0
|
6626423
|
"""
:copyright: 2010 Matthew "LeafStorm" Frazier
:license: MIT/X11, see LICENSE for details
"""
import os
import os.path
import pytest
from flask import Flask
from flask import url_for
from flask_uploads import ALL
from flask_uploads import AllExcept
from flask_uploads import TestingFileStorage
from flask_uploads import UploadConfiguration
from flask_uploads import UploadSet
from flask_uploads import addslash
from flask_uploads import configure_uploads
from flask_uploads import extension
from flask_uploads import lowercase_ext
class TestMiscellaneous:
def test_tfs(self):
tfs = TestingFileStorage(filename='foo.bar')
assert tfs.filename == 'foo.bar'
assert tfs.name is None
assert tfs.saved is None
tfs.save('foo_bar.txt')
assert tfs.saved == 'foo_bar.txt'
def test_extension(self):
assert extension('foo.txt') == 'txt'
assert extension('foo') == ''
assert extension('archive.tar.gz') == 'gz'
assert extension('audio.m4a') == 'm4a'
def test_lowercase_ext(self):
assert lowercase_ext('foo.txt') == 'foo.txt'
assert lowercase_ext('FOO.TXT') == 'FOO.txt'
assert lowercase_ext('foo') == 'foo'
assert lowercase_ext('FOO') == 'FOO'
assert lowercase_ext('archive.tar.gz') == 'archive.tar.gz'
assert lowercase_ext('ARCHIVE.TAR.GZ') == 'ARCHIVE.TAR.gz'
assert lowercase_ext('audio.m4a') == 'audio.m4a'
assert lowercase_ext('AUDIO.M4A') == 'AUDIO.m4a'
def test_addslash(self):
assert (addslash('http://localhost:4000') ==
'http://localhost:4000/')
assert (addslash('http://localhost/uploads') ==
'http://localhost/uploads/')
assert (addslash('http://localhost:4000/') ==
'http://localhost:4000/')
assert (addslash('http://localhost/uploads/') ==
'http://localhost/uploads/')
def test_custom_iterables(self):
assert 'txt' in ALL
assert 'exe' in ALL
ax = AllExcept(['exe'])
assert 'txt' in ax
assert 'exe' not in ax
Config = UploadConfiguration
class TestConfiguration:
def setup(self):
self.app = Flask(__name__)
def teardown(self):
del self.app
def configure(self, *sets, **options):
self.app.config.update(options)
configure_uploads(self.app, sets)
return self.app.upload_set_config
def test_manual(self):
f, p = UploadSet('files'), UploadSet('photos')
setconfig = self.configure(
f, p,
UPLOADED_FILES_DEST='/var/files',
UPLOADED_FILES_URL='http://localhost:6001/',
UPLOADED_PHOTOS_DEST='/mnt/photos',
UPLOADED_PHOTOS_URL='http://localhost:6002/'
)
file_conf, photo_conf = setconfig['files'], setconfig['photos']
assert file_conf == Config('/var/files', 'http://localhost:6001/')
assert photo_conf == Config('/mnt/photos', 'http://localhost:6002/')
def test_selfserve(self):
f, p = UploadSet('files'), UploadSet('photos')
setconfig = self.configure(
f, p,
UPLOADED_FILES_DEST='/var/files',
UPLOADED_PHOTOS_DEST='/mnt/photos'
)
file_conf, photo_conf = setconfig['files'], setconfig['photos']
assert file_conf == Config('/var/files', None)
assert photo_conf == Config('/mnt/photos', None)
def test_defaults(self):
f, p = UploadSet('files'), UploadSet('photos')
setconfig = self.configure(
f, p,
UPLOADS_DEFAULT_DEST='/var/uploads',
UPLOADS_DEFAULT_URL='http://localhost:6000/'
)
file_conf, photo_conf = setconfig['files'], setconfig['photos']
assert file_conf == Config(
'/var/uploads/files', 'http://localhost:6000/files/')
assert photo_conf == Config(
'/var/uploads/photos', 'http://localhost:6000/photos/')
def test_default_selfserve(self):
f, p = UploadSet('files'), UploadSet('photos')
setconfig = self.configure(
f, p,
UPLOADS_DEFAULT_DEST='/var/uploads'
)
file_conf, photo_conf = setconfig['files'], setconfig['photos']
assert file_conf == Config('/var/uploads/files', None)
assert photo_conf == Config('/var/uploads/photos', None)
def test_mixed_defaults(self):
f, p = UploadSet('files'), UploadSet('photos')
setconfig = self.configure(
f, p,
UPLOADS_DEFAULT_DEST='/var/uploads',
UPLOADS_DEFAULT_URL='http://localhost:6001/',
UPLOADED_PHOTOS_DEST='/mnt/photos',
UPLOADED_PHOTOS_URL='http://localhost:6002/'
)
file_conf, photo_conf = setconfig['files'], setconfig['photos']
assert file_conf == Config(
'/var/uploads/files', 'http://localhost:6001/files/')
assert photo_conf == Config('/mnt/photos', 'http://localhost:6002/')
def test_default_destination_callable(self):
f = UploadSet('files', default_dest=lambda app: os.path.join(
app.config['INSTANCE'], 'files'
))
p = UploadSet('photos')
setconfig = self.configure(
f, p,
INSTANCE='/home/me/webapps/thisapp',
UPLOADED_PHOTOS_DEST='/mnt/photos',
UPLOADED_PHOTOS_URL='http://localhost:6002/'
)
file_conf, photo_conf = setconfig['files'], setconfig['photos']
assert file_conf == Config('/home/me/webapps/thisapp/files', None)
assert photo_conf == Config('/mnt/photos', 'http://localhost:6002/')
class TestPreconditions:
def test_filenames(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
namepairs = (
('foo.txt', True),
('boat.jpg', True),
('warez.exe', False)
)
for name, result in namepairs:
tfs = TestingFileStorage(filename=name)
assert uset.file_allowed(tfs, name) is result
def test_default_extensions(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
extpairs = (('txt', True), ('jpg', True), ('exe', False))
for ext, result in extpairs:
assert uset.extension_allowed(ext) is result
class TestSaving:
def setup(self):
self.old_makedirs = os.makedirs
os.makedirs = lambda v: None
def teardown(self):
os.makedirs = self.old_makedirs
del self.old_makedirs
def test_saved(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
tfs = TestingFileStorage(filename='foo.txt')
res = uset.save(tfs)
assert res == 'foo.txt'
assert tfs.saved == '/uploads/foo.txt'
def test_save_folders(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
tfs = TestingFileStorage(filename='foo.txt')
res = uset.save(tfs, folder='someguy')
assert res == 'someguy/foo.txt'
assert tfs.saved == '/uploads/someguy/foo.txt'
def test_save_named(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
tfs = TestingFileStorage(filename='foo.txt')
res = uset.save(tfs, name='file_123.txt')
assert res == 'file_123.txt'
assert tfs.saved == '/uploads/file_123.txt'
def test_save_namedext(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
tfs = TestingFileStorage(filename='boat.jpg')
res = uset.save(tfs, name='photo_123.')
assert res == 'photo_123.jpg'
assert tfs.saved == '/uploads/photo_123.jpg'
def test_folder_namedext(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
tfs = TestingFileStorage(filename='boat.jpg')
res = uset.save(tfs, folder='someguy', name='photo_123.')
assert res == 'someguy/photo_123.jpg'
assert tfs.saved == '/uploads/someguy/photo_123.jpg'
def test_implicit_folder(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
tfs = TestingFileStorage(filename='boat.jpg')
res = uset.save(tfs, name='someguy/photo_123.')
assert res == 'someguy/photo_123.jpg'
assert tfs.saved == '/uploads/someguy/photo_123.jpg'
def test_secured_filename(self):
uset = UploadSet('files', ALL)
uset._config = Config('/uploads')
tfs1 = TestingFileStorage(filename='/etc/passwd')
tfs2 = TestingFileStorage(filename='../../my_app.wsgi')
res1 = uset.save(tfs1)
assert res1 == 'etc_passwd'
assert tfs1.saved == '/uploads/etc_passwd'
res2 = uset.save(tfs2)
assert res2 == 'my_app.wsgi'
assert tfs2.saved == '/uploads/my_app.wsgi'
def test_storage_is_not_a_werkzeug_datastructure(self):
"""UploadSet.save needs a valid FileStorage object.
When something different is passed in, a TypeError gets raised.
"""
uset = UploadSet('files', ALL)
uset._config = Config('/uploads')
non_storage = 'this is no werkzeug.datastructure.FileStorage'
with pytest.raises(TypeError):
uset.save(non_storage)
class TestConflictResolution:
def setup(self):
self.extant_files = []
self.old_exists = os.path.exists
os.path.exists = self.exists
self.old_makedirs = os.makedirs
os.makedirs = lambda v: None
def teardown(self):
os.path.exists = self.old_exists
del self.extant_files, self.old_exists
os.makedirs = self.old_makedirs
del self.old_makedirs
def extant(self, *files):
self.extant_files.extend(files)
def exists(self, file_name):
return file_name in self.extant_files
def test_self(self):
assert not os.path.exists('/uploads/foo.txt')
self.extant('/uploads/foo.txt')
assert os.path.exists('/uploads/foo.txt')
def test_conflict(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
tfs = TestingFileStorage(filename='foo.txt')
self.extant('/uploads/foo.txt')
res = uset.save(tfs)
assert res == 'foo_1.txt'
def test_multi_conflict(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
tfs = TestingFileStorage(filename='foo.txt')
self.extant('/uploads/foo.txt',
*('/uploads/foo_%d.txt' % n for n in range(1, 6)))
res = uset.save(tfs)
assert res == 'foo_6.txt'
def test_conflict_without_extension(self):
uset = UploadSet('files', extensions=(''))
uset._config = Config('/uploads')
tfs = TestingFileStorage(filename='foo')
self.extant('/uploads/foo')
res = uset.save(tfs)
assert res == 'foo_1'
class TestPathsAndURLs:
def test_path(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
assert uset.path('foo.txt') == '/uploads/foo.txt'
assert uset.path('someguy/foo.txt') == '/uploads/someguy/foo.txt'
assert (uset.path('foo.txt', folder='someguy') ==
'/uploads/someguy/foo.txt')
assert (uset.path('foo/bar.txt', folder='someguy') ==
'/uploads/someguy/foo/bar.txt')
def test_url_generated(self):
app = Flask(__name__)
app.config.update(
UPLOADED_FILES_DEST='/uploads'
)
uset = UploadSet('files')
configure_uploads(app, uset)
with app.test_request_context():
url = uset.url('foo.txt')
gen = url_for('_uploads.uploaded_file', setname='files',
filename='foo.txt', _external=True)
assert url == gen
def test_url_based(self):
app = Flask(__name__)
app.config.update(
UPLOADED_FILES_DEST='/uploads',
UPLOADED_FILES_URL='http://localhost:5001/'
)
uset = UploadSet('files')
configure_uploads(app, uset)
with app.test_request_context():
url = uset.url('foo.txt')
assert url == 'http://localhost:5001/foo.txt'
assert '_uploads' not in app.blueprints
|
"""
:copyright: 2010 Matthew "LeafStorm" Frazier
:license: MIT/X11, see LICENSE for details
"""
import os
import os.path
import pytest
from flask import Flask
from flask import url_for
from flask_uploads import ALL
from flask_uploads import AllExcept
from flask_uploads import TestingFileStorage
from flask_uploads import UploadConfiguration
from flask_uploads import UploadSet
from flask_uploads import addslash
from flask_uploads import configure_uploads
from flask_uploads import extension
from flask_uploads import lowercase_ext
class TestMiscellaneous:
def test_tfs(self):
tfs = TestingFileStorage(filename='foo.bar')
assert tfs.filename == 'foo.bar'
assert tfs.name is None
assert tfs.saved is None
tfs.save('foo_bar.txt')
assert tfs.saved == 'foo_bar.txt'
def test_extension(self):
assert extension('foo.txt') == 'txt'
assert extension('foo') == ''
assert extension('archive.tar.gz') == 'gz'
assert extension('audio.m4a') == 'm4a'
def test_lowercase_ext(self):
assert lowercase_ext('foo.txt') == 'foo.txt'
assert lowercase_ext('FOO.TXT') == 'FOO.txt'
assert lowercase_ext('foo') == 'foo'
assert lowercase_ext('FOO') == 'FOO'
assert lowercase_ext('archive.tar.gz') == 'archive.tar.gz'
assert lowercase_ext('ARCHIVE.TAR.GZ') == 'ARCHIVE.TAR.gz'
assert lowercase_ext('audio.m4a') == 'audio.m4a'
assert lowercase_ext('AUDIO.M4A') == 'AUDIO.m4a'
def test_addslash(self):
assert (addslash('http://localhost:4000') ==
'http://localhost:4000/')
assert (addslash('http://localhost/uploads') ==
'http://localhost/uploads/')
assert (addslash('http://localhost:4000/') ==
'http://localhost:4000/')
assert (addslash('http://localhost/uploads/') ==
'http://localhost/uploads/')
def test_custom_iterables(self):
assert 'txt' in ALL
assert 'exe' in ALL
ax = AllExcept(['exe'])
assert 'txt' in ax
assert 'exe' not in ax
Config = UploadConfiguration
class TestConfiguration:
def setup(self):
self.app = Flask(__name__)
def teardown(self):
del self.app
def configure(self, *sets, **options):
self.app.config.update(options)
configure_uploads(self.app, sets)
return self.app.upload_set_config
def test_manual(self):
f, p = UploadSet('files'), UploadSet('photos')
setconfig = self.configure(
f, p,
UPLOADED_FILES_DEST='/var/files',
UPLOADED_FILES_URL='http://localhost:6001/',
UPLOADED_PHOTOS_DEST='/mnt/photos',
UPLOADED_PHOTOS_URL='http://localhost:6002/'
)
file_conf, photo_conf = setconfig['files'], setconfig['photos']
assert file_conf == Config('/var/files', 'http://localhost:6001/')
assert photo_conf == Config('/mnt/photos', 'http://localhost:6002/')
def test_selfserve(self):
f, p = UploadSet('files'), UploadSet('photos')
setconfig = self.configure(
f, p,
UPLOADED_FILES_DEST='/var/files',
UPLOADED_PHOTOS_DEST='/mnt/photos'
)
file_conf, photo_conf = setconfig['files'], setconfig['photos']
assert file_conf == Config('/var/files', None)
assert photo_conf == Config('/mnt/photos', None)
def test_defaults(self):
f, p = UploadSet('files'), UploadSet('photos')
setconfig = self.configure(
f, p,
UPLOADS_DEFAULT_DEST='/var/uploads',
UPLOADS_DEFAULT_URL='http://localhost:6000/'
)
file_conf, photo_conf = setconfig['files'], setconfig['photos']
assert file_conf == Config(
'/var/uploads/files', 'http://localhost:6000/files/')
assert photo_conf == Config(
'/var/uploads/photos', 'http://localhost:6000/photos/')
def test_default_selfserve(self):
f, p = UploadSet('files'), UploadSet('photos')
setconfig = self.configure(
f, p,
UPLOADS_DEFAULT_DEST='/var/uploads'
)
file_conf, photo_conf = setconfig['files'], setconfig['photos']
assert file_conf == Config('/var/uploads/files', None)
assert photo_conf == Config('/var/uploads/photos', None)
def test_mixed_defaults(self):
f, p = UploadSet('files'), UploadSet('photos')
setconfig = self.configure(
f, p,
UPLOADS_DEFAULT_DEST='/var/uploads',
UPLOADS_DEFAULT_URL='http://localhost:6001/',
UPLOADED_PHOTOS_DEST='/mnt/photos',
UPLOADED_PHOTOS_URL='http://localhost:6002/'
)
file_conf, photo_conf = setconfig['files'], setconfig['photos']
assert file_conf == Config(
'/var/uploads/files', 'http://localhost:6001/files/')
assert photo_conf == Config('/mnt/photos', 'http://localhost:6002/')
def test_default_destination_callable(self):
f = UploadSet('files', default_dest=lambda app: os.path.join(
app.config['INSTANCE'], 'files'
))
p = UploadSet('photos')
setconfig = self.configure(
f, p,
INSTANCE='/home/me/webapps/thisapp',
UPLOADED_PHOTOS_DEST='/mnt/photos',
UPLOADED_PHOTOS_URL='http://localhost:6002/'
)
file_conf, photo_conf = setconfig['files'], setconfig['photos']
assert file_conf == Config('/home/me/webapps/thisapp/files', None)
assert photo_conf == Config('/mnt/photos', 'http://localhost:6002/')
class TestPreconditions:
def test_filenames(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
namepairs = (
('foo.txt', True),
('boat.jpg', True),
('warez.exe', False)
)
for name, result in namepairs:
tfs = TestingFileStorage(filename=name)
assert uset.file_allowed(tfs, name) is result
def test_default_extensions(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
extpairs = (('txt', True), ('jpg', True), ('exe', False))
for ext, result in extpairs:
assert uset.extension_allowed(ext) is result
class TestSaving:
def setup(self):
self.old_makedirs = os.makedirs
os.makedirs = lambda v: None
def teardown(self):
os.makedirs = self.old_makedirs
del self.old_makedirs
def test_saved(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
tfs = TestingFileStorage(filename='foo.txt')
res = uset.save(tfs)
assert res == 'foo.txt'
assert tfs.saved == '/uploads/foo.txt'
def test_save_folders(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
tfs = TestingFileStorage(filename='foo.txt')
res = uset.save(tfs, folder='someguy')
assert res == 'someguy/foo.txt'
assert tfs.saved == '/uploads/someguy/foo.txt'
def test_save_named(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
tfs = TestingFileStorage(filename='foo.txt')
res = uset.save(tfs, name='file_123.txt')
assert res == 'file_123.txt'
assert tfs.saved == '/uploads/file_123.txt'
def test_save_namedext(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
tfs = TestingFileStorage(filename='boat.jpg')
res = uset.save(tfs, name='photo_123.')
assert res == 'photo_123.jpg'
assert tfs.saved == '/uploads/photo_123.jpg'
def test_folder_namedext(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
tfs = TestingFileStorage(filename='boat.jpg')
res = uset.save(tfs, folder='someguy', name='photo_123.')
assert res == 'someguy/photo_123.jpg'
assert tfs.saved == '/uploads/someguy/photo_123.jpg'
def test_implicit_folder(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
tfs = TestingFileStorage(filename='boat.jpg')
res = uset.save(tfs, name='someguy/photo_123.')
assert res == 'someguy/photo_123.jpg'
assert tfs.saved == '/uploads/someguy/photo_123.jpg'
def test_secured_filename(self):
uset = UploadSet('files', ALL)
uset._config = Config('/uploads')
tfs1 = TestingFileStorage(filename='/etc/passwd')
tfs2 = TestingFileStorage(filename='../../my_app.wsgi')
res1 = uset.save(tfs1)
assert res1 == 'etc_passwd'
assert tfs1.saved == '/uploads/etc_passwd'
res2 = uset.save(tfs2)
assert res2 == 'my_app.wsgi'
assert tfs2.saved == '/uploads/my_app.wsgi'
def test_storage_is_not_a_werkzeug_datastructure(self):
"""UploadSet.save needs a valid FileStorage object.
When something different is passed in, a TypeError gets raised.
"""
uset = UploadSet('files', ALL)
uset._config = Config('/uploads')
non_storage = 'this is no werkzeug.datastructure.FileStorage'
with pytest.raises(TypeError):
uset.save(non_storage)
class TestConflictResolution:
def setup(self):
self.extant_files = []
self.old_exists = os.path.exists
os.path.exists = self.exists
self.old_makedirs = os.makedirs
os.makedirs = lambda v: None
def teardown(self):
os.path.exists = self.old_exists
del self.extant_files, self.old_exists
os.makedirs = self.old_makedirs
del self.old_makedirs
def extant(self, *files):
self.extant_files.extend(files)
def exists(self, file_name):
return file_name in self.extant_files
def test_self(self):
assert not os.path.exists('/uploads/foo.txt')
self.extant('/uploads/foo.txt')
assert os.path.exists('/uploads/foo.txt')
def test_conflict(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
tfs = TestingFileStorage(filename='foo.txt')
self.extant('/uploads/foo.txt')
res = uset.save(tfs)
assert res == 'foo_1.txt'
def test_multi_conflict(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
tfs = TestingFileStorage(filename='foo.txt')
self.extant('/uploads/foo.txt',
*('/uploads/foo_%d.txt' % n for n in range(1, 6)))
res = uset.save(tfs)
assert res == 'foo_6.txt'
def test_conflict_without_extension(self):
uset = UploadSet('files', extensions=(''))
uset._config = Config('/uploads')
tfs = TestingFileStorage(filename='foo')
self.extant('/uploads/foo')
res = uset.save(tfs)
assert res == 'foo_1'
class TestPathsAndURLs:
def test_path(self):
uset = UploadSet('files')
uset._config = Config('/uploads')
assert uset.path('foo.txt') == '/uploads/foo.txt'
assert uset.path('someguy/foo.txt') == '/uploads/someguy/foo.txt'
assert (uset.path('foo.txt', folder='someguy') ==
'/uploads/someguy/foo.txt')
assert (uset.path('foo/bar.txt', folder='someguy') ==
'/uploads/someguy/foo/bar.txt')
def test_url_generated(self):
app = Flask(__name__)
app.config.update(
UPLOADED_FILES_DEST='/uploads'
)
uset = UploadSet('files')
configure_uploads(app, uset)
with app.test_request_context():
url = uset.url('foo.txt')
gen = url_for('_uploads.uploaded_file', setname='files',
filename='foo.txt', _external=True)
assert url == gen
def test_url_based(self):
app = Flask(__name__)
app.config.update(
UPLOADED_FILES_DEST='/uploads',
UPLOADED_FILES_URL='http://localhost:5001/'
)
uset = UploadSet('files')
configure_uploads(app, uset)
with app.test_request_context():
url = uset.url('foo.txt')
assert url == 'http://localhost:5001/foo.txt'
assert '_uploads' not in app.blueprints
|
en
| 0.752375
|
:copyright: 2010 Matthew "LeafStorm" Frazier :license: MIT/X11, see LICENSE for details UploadSet.save needs a valid FileStorage object. When something different is passed in, a TypeError gets raised.
| 2.113042
| 2
|
tests/algorithms/gd/test_quickprop.py
|
vishalbelsare/neupy
| 0
|
6626424
|
<gh_stars>0
from functools import partial
from sklearn import datasets, model_selection, preprocessing
from neupy import algorithms
from utils import compare_networks
from base import BaseTestCase
class QuickPropTestCase(BaseTestCase):
def setUp(self):
super(QuickPropTestCase, self).setUp()
data, target = datasets.make_regression(n_samples=1500, n_features=5,
n_informative=5, n_targets=1,
random_state=33)
target_scaler = preprocessing.MinMaxScaler()
target = target_scaler.fit_transform(target.reshape(-1, 1))
self.data = model_selection.train_test_split(data, target,
train_size=0.75)
self.connection = (5, 10, 1)
def test_quickprop(self):
x_train, x_test, y_train, y_test = self.data
qp = algorithms.Quickprop(
(5, 10, 1),
step=0.1,
upper_bound=1,
shuffle_data=True,
verbose=False,
)
qp.train(x_train, y_train, epochs=50)
error = qp.prediction_error(x_test, y_test)
self.assertAlmostEqual(0, error, places=2)
def test_compare_quickprop_and_bp(self):
x_train, _, y_train, _ = self.data
compare_networks(
# Test classes
algorithms.GradientDescent,
partial(algorithms.Quickprop, upper_bound=0.5),
# Test data
(x_train, y_train),
# Network configurations
connection=self.connection,
step=0.1,
shuffle_data=True,
# Test configurations
epochs=100,
verbose=False,
show_comparison_plot=False
)
|
from functools import partial
from sklearn import datasets, model_selection, preprocessing
from neupy import algorithms
from utils import compare_networks
from base import BaseTestCase
class QuickPropTestCase(BaseTestCase):
def setUp(self):
super(QuickPropTestCase, self).setUp()
data, target = datasets.make_regression(n_samples=1500, n_features=5,
n_informative=5, n_targets=1,
random_state=33)
target_scaler = preprocessing.MinMaxScaler()
target = target_scaler.fit_transform(target.reshape(-1, 1))
self.data = model_selection.train_test_split(data, target,
train_size=0.75)
self.connection = (5, 10, 1)
def test_quickprop(self):
x_train, x_test, y_train, y_test = self.data
qp = algorithms.Quickprop(
(5, 10, 1),
step=0.1,
upper_bound=1,
shuffle_data=True,
verbose=False,
)
qp.train(x_train, y_train, epochs=50)
error = qp.prediction_error(x_test, y_test)
self.assertAlmostEqual(0, error, places=2)
def test_compare_quickprop_and_bp(self):
x_train, _, y_train, _ = self.data
compare_networks(
# Test classes
algorithms.GradientDescent,
partial(algorithms.Quickprop, upper_bound=0.5),
# Test data
(x_train, y_train),
# Network configurations
connection=self.connection,
step=0.1,
shuffle_data=True,
# Test configurations
epochs=100,
verbose=False,
show_comparison_plot=False
)
|
en
| 0.437756
|
# Test classes # Test data # Network configurations # Test configurations
| 2.413468
| 2
|
modules/config.py
|
eidanyosoy/Plex-Meta-Manager
| 0
|
6626425
|
<filename>modules/config.py<gh_stars>0
import base64, logging, os, requests
from datetime import datetime
from lxml import html
from modules import util, radarr, sonarr
from modules.anidb import AniDB
from modules.anilist import AniList
from modules.cache import Cache
from modules.convert import Convert
from modules.flixpatrol import FlixPatrol
from modules.icheckmovies import ICheckMovies
from modules.imdb import IMDb
from modules.letterboxd import Letterboxd
from modules.mal import MyAnimeList
from modules.notifiarr import Notifiarr
from modules.omdb import OMDb
from modules.plex import Plex
from modules.radarr import Radarr
from modules.sonarr import Sonarr
from modules.stevenlu import StevenLu
from modules.tautulli import Tautulli
from modules.tmdb import TMDb
from modules.trakt import Trakt
from modules.tvdb import TVDb
from modules.util import Failed
from modules.webhooks import Webhooks
from retrying import retry
from ruamel import yaml
logger = logging.getLogger("Plex Meta Manager")
sync_modes = {"append": "Only Add Items to the Collection", "sync": "Add & Remove Items from the Collection"}
mass_update_options = {"tmdb": "Use TMDb Metadata", "omdb": "Use IMDb Metadata through OMDb"}
class Config:
def __init__(self, default_dir, attrs):
logger.info("Locating config...")
config_file = attrs["config_file"]
if config_file and os.path.exists(config_file): self.config_path = os.path.abspath(config_file)
elif config_file and not os.path.exists(config_file): raise Failed(f"Config Error: config not found at {os.path.abspath(config_file)}")
elif os.path.exists(os.path.join(default_dir, "config.yml")): self.config_path = os.path.abspath(os.path.join(default_dir, "config.yml"))
else: raise Failed(f"Config Error: config not found at {os.path.abspath(default_dir)}")
logger.info(f"Using {self.config_path} as config")
self.default_dir = default_dir
self.test_mode = attrs["test"] if "test" in attrs else False
self.trace_mode = attrs["trace"] if "trace" in attrs else False
self.start_time = attrs["time_obj"]
self.run_hour = datetime.strptime(attrs["time"], "%H:%M").hour
self.requested_collections = util.get_list(attrs["collections"]) if "collections" in attrs else None
self.requested_libraries = util.get_list(attrs["libraries"]) if "libraries" in attrs else None
self.resume_from = attrs["resume"] if "resume" in attrs else None
yaml.YAML().allow_duplicate_keys = True
try:
new_config, _, _ = yaml.util.load_yaml_guess_indent(open(self.config_path, encoding="utf-8"))
def replace_attr(all_data, attr, par):
if "settings" not in all_data:
all_data["settings"] = {}
if par in all_data and all_data[par] and attr in all_data[par] and attr not in all_data["settings"]:
all_data["settings"][attr] = all_data[par][attr]
del all_data[par][attr]
if "libraries" not in new_config:
new_config["libraries"] = {}
if "settings" not in new_config:
new_config["settings"] = {}
if "tmdb" not in new_config:
new_config["tmdb"] = {}
replace_attr(new_config, "cache", "cache")
replace_attr(new_config, "cache_expiration", "cache")
if "config" in new_config:
del new_config["cache"]
replace_attr(new_config, "asset_directory", "plex")
replace_attr(new_config, "sync_mode", "plex")
replace_attr(new_config, "show_unmanaged", "plex")
replace_attr(new_config, "show_filtered", "plex")
replace_attr(new_config, "show_missing", "plex")
replace_attr(new_config, "save_missing", "plex")
if new_config["libraries"]:
for library in new_config["libraries"]:
if new_config["libraries"][library] and "plex" in new_config["libraries"][library]:
replace_attr(new_config["libraries"][library], "asset_directory", "plex")
replace_attr(new_config["libraries"][library], "sync_mode", "plex")
replace_attr(new_config["libraries"][library], "show_unmanaged", "plex")
replace_attr(new_config["libraries"][library], "show_filtered", "plex")
replace_attr(new_config["libraries"][library], "show_missing", "plex")
replace_attr(new_config["libraries"][library], "save_missing", "plex")
if new_config["libraries"][library] and "webhooks" in new_config["libraries"][library] and "collection_changes" not in new_config["libraries"][library]["webhooks"]:
changes = []
def hooks(attr):
if attr in new_config["libraries"][library]["webhooks"]:
changes.extend([w for w in util.get_list(new_config["libraries"][library]["webhooks"].pop(attr), split=False) if w not in changes])
hooks("collection_creation")
hooks("collection_addition")
hooks("collection_removal")
new_config["libraries"][library]["webhooks"]["collection_changes"] = changes if changes else None
if "libraries" in new_config: new_config["libraries"] = new_config.pop("libraries")
if "settings" in new_config: new_config["settings"] = new_config.pop("settings")
if "webhooks" in new_config:
temp = new_config.pop("webhooks")
changes = []
def hooks(attr):
if attr in temp:
items = util.get_list(temp.pop(attr), split=False)
if items:
changes.extend([w for w in items if w not in changes])
hooks("collection_creation")
hooks("collection_addition")
hooks("collection_removal")
temp["collection_changes"] = changes if changes else None
new_config["webhooks"] = temp
if "plex" in new_config: new_config["plex"] = new_config.pop("plex")
if "tmdb" in new_config: new_config["tmdb"] = new_config.pop("tmdb")
if "tautulli" in new_config: new_config["tautulli"] = new_config.pop("tautulli")
if "omdb" in new_config: new_config["omdb"] = new_config.pop("omdb")
if "notifiarr" in new_config: new_config["notifiarr"] = new_config.pop("notifiarr")
if "anidb" in new_config: new_config["anidb"] = new_config.pop("anidb")
if "radarr" in new_config: new_config["radarr"] = new_config.pop("radarr")
if "sonarr" in new_config: new_config["sonarr"] = new_config.pop("sonarr")
if "trakt" in new_config: new_config["trakt"] = new_config.pop("trakt")
if "mal" in new_config: new_config["mal"] = new_config.pop("mal")
yaml.round_trip_dump(new_config, open(self.config_path, "w", encoding="utf-8"), indent=None, block_seq_indent=2)
self.data = new_config
except yaml.scanner.ScannerError as e:
raise Failed(f"YAML Error: {util.tab_new_lines(e)}")
except Exception as e:
util.print_stacktrace()
raise Failed(f"YAML Error: {e}")
def check_for_attribute(data, attribute, parent=None, test_list=None, default=None, do_print=True, default_is_none=False, req_default=False, var_type="str", throw=False, save=True):
endline = ""
if parent is not None:
if data and parent in data:
data = data[parent]
else:
data = None
do_print = False
save = False
text = f"{attribute} attribute" if parent is None else f"{parent} sub-attribute {attribute}"
if data is None or attribute not in data:
message = f"{text} not found"
if parent and save is True:
loaded_config, _, _ = yaml.util.load_yaml_guess_indent(open(self.config_path))
endline = f"\n{parent} sub-attribute {attribute} added to config"
if parent not in loaded_config or not loaded_config[parent]: loaded_config[parent] = {attribute: default}
elif attribute not in loaded_config[parent]: loaded_config[parent][attribute] = default
else: endline = ""
yaml.round_trip_dump(loaded_config, open(self.config_path, "w"), indent=None, block_seq_indent=2)
if default_is_none and var_type in ["list", "int_list"]: return []
elif data[attribute] is None:
if default_is_none and var_type in ["list", "int_list"]: return []
elif default_is_none: return None
else: message = f"{text} is blank"
elif var_type == "url":
if data[attribute].endswith(("\\", "/")): return data[attribute][:-1]
else: return data[attribute]
elif var_type == "bool":
if isinstance(data[attribute], bool): return data[attribute]
else: message = f"{text} must be either true or false"
elif var_type == "int":
if isinstance(data[attribute], int) and data[attribute] >= 0: return data[attribute]
else: message = f"{text} must an integer >= 0"
elif var_type == "path":
if os.path.exists(os.path.abspath(data[attribute])): return data[attribute]
else: message = f"Path {os.path.abspath(data[attribute])} does not exist"
elif var_type == "list": return util.get_list(data[attribute], split=False)
elif var_type == "int_list": return util.get_list(data[attribute], int_list=True)
elif var_type == "list_path":
temp_list = []
warning_message = ""
for p in util.get_list(data[attribute], split=False):
if os.path.exists(os.path.abspath(p)):
temp_list.append(p)
else:
if len(warning_message) > 0:
warning_message += "\n"
warning_message += f"Config Warning: Path does not exist: {os.path.abspath(p)}"
if do_print:
util.print_multiline(f"Config Warning: {warning_message}")
if len(temp_list) > 0: return temp_list
else: message = "No Paths exist"
elif var_type == "lower_list": return util.get_list(data[attribute], lower=True)
elif test_list is None or data[attribute] in test_list: return data[attribute]
else: message = f"{text}: {data[attribute]} is an invalid input"
if var_type == "path" and default and os.path.exists(os.path.abspath(default)):
return default
elif var_type == "path" and default:
if data and attribute in data and data[attribute]:
message = f"neither {data[attribute]} or the default path {default} could be found"
else:
message = f"no {text} found and the default path {default} could not be found"
default = None
if default is not None or default_is_none:
message = message + f" using {default} as default"
message = message + endline
if req_default and default is None:
raise Failed(f"Config Error: {attribute} attribute must be set under {parent} globally or under this specific Library")
options = ""
if test_list:
for option, description in test_list.items():
if len(options) > 0:
options = f"{options}\n"
options = f"{options} {option} ({description})"
if (default is None and not default_is_none) or throw:
if len(options) > 0:
message = message + "\n" + options
raise Failed(f"Config Error: {message}")
if do_print:
util.print_multiline(f"Config Warning: {message}")
if data and attribute in data and data[attribute] and test_list is not None and data[attribute] not in test_list:
util.print_multiline(options)
return default
self.session = requests.Session()
self.general = {
"cache": check_for_attribute(self.data, "cache", parent="settings", var_type="bool", default=True),
"cache_expiration": check_for_attribute(self.data, "cache_expiration", parent="settings", var_type="int", default=60),
"asset_directory": check_for_attribute(self.data, "asset_directory", parent="settings", var_type="list_path", default=[os.path.join(default_dir, "assets")], default_is_none=True),
"asset_folders": check_for_attribute(self.data, "asset_folders", parent="settings", var_type="bool", default=True),
"create_asset_folders": check_for_attribute(self.data, "create_asset_folders", parent="settings", var_type="bool", default=False),
"show_missing_season_assets": check_for_attribute(self.data, "show_missing_season_assets", parent="settings", var_type="bool", default=False),
"sync_mode": check_for_attribute(self.data, "sync_mode", parent="settings", default="append", test_list=sync_modes),
"collection_minimum": check_for_attribute(self.data, "collection_minimum", parent="settings", var_type="int", default=1),
"delete_below_minimum": check_for_attribute(self.data, "delete_below_minimum", parent="settings", var_type="bool", default=False),
"delete_not_scheduled": check_for_attribute(self.data, "delete_not_scheduled", parent="settings", var_type="bool", default=False),
"run_again_delay": check_for_attribute(self.data, "run_again_delay", parent="settings", var_type="int", default=0),
"missing_only_released": check_for_attribute(self.data, "missing_only_released", parent="settings", var_type="bool", default=False),
"only_filter_missing": check_for_attribute(self.data, "only_filter_missing", parent="settings", var_type="bool", default=False),
"show_unmanaged": check_for_attribute(self.data, "show_unmanaged", parent="settings", var_type="bool", default=True),
"show_filtered": check_for_attribute(self.data, "show_filtered", parent="settings", var_type="bool", default=False),
"show_missing": check_for_attribute(self.data, "show_missing", parent="settings", var_type="bool", default=True),
"show_missing_assets": check_for_attribute(self.data, "show_missing_assets", parent="settings", var_type="bool", default=True),
"save_missing": check_for_attribute(self.data, "save_missing", parent="settings", var_type="bool", default=True),
"tvdb_language": check_for_attribute(self.data, "tvdb_language", parent="settings", default="default"),
"ignore_ids": check_for_attribute(self.data, "ignore_ids", parent="settings", var_type="int_list", default_is_none=True),
"ignore_imdb_ids": check_for_attribute(self.data, "ignore_imdb_ids", parent="settings", var_type="list", default_is_none=True),
"assets_for_all": check_for_attribute(self.data, "assets_for_all", parent="settings", var_type="bool", default=False, save=False, do_print=False)
}
self.webhooks = {
"error": check_for_attribute(self.data, "error", parent="webhooks", var_type="list", default_is_none=True),
"run_start": check_for_attribute(self.data, "run_start", parent="webhooks", var_type="list", default_is_none=True),
"run_end": check_for_attribute(self.data, "run_end", parent="webhooks", var_type="list", default_is_none=True),
"collection_changes": check_for_attribute(self.data, "collection_changes", parent="webhooks", var_type="list", default_is_none=True)
}
if self.general["cache"]:
util.separator()
self.Cache = Cache(self.config_path, self.general["cache_expiration"])
else:
self.Cache = None
util.separator()
self.NotifiarrFactory = None
if "notifiarr" in self.data:
logger.info("Connecting to Notifiarr...")
try:
self.NotifiarrFactory = Notifiarr(self, {
"apikey": check_for_attribute(self.data, "apikey", parent="notifiarr", throw=True),
"develop": check_for_attribute(self.data, "develop", parent="notifiarr", var_type="bool", default=False, do_print=False, save=False),
"test": check_for_attribute(self.data, "test", parent="notifiarr", var_type="bool", default=False, do_print=False, save=False)
})
except Failed as e:
logger.error(e)
logger.info(f"Notifiarr Connection {'Failed' if self.NotifiarrFactory is None else 'Successful'}")
else:
logger.warning("notifiarr attribute not found")
self.Webhooks = Webhooks(self, self.webhooks, notifiarr=self.NotifiarrFactory)
try:
self.Webhooks.start_time_hooks(self.start_time)
except Failed as e:
util.print_stacktrace()
logger.error(f"Webhooks Error: {e}")
self.errors = []
util.separator()
try:
self.TMDb = None
if "tmdb" in self.data:
logger.info("Connecting to TMDb...")
self.TMDb = TMDb(self, {
"apikey": check_for_attribute(self.data, "apikey", parent="tmdb", throw=True),
"language": check_for_attribute(self.data, "language", parent="tmdb", default="en")
})
logger.info(f"TMDb Connection {'Failed' if self.TMDb is None else 'Successful'}")
else:
raise Failed("Config Error: tmdb attribute not found")
util.separator()
self.OMDb = None
if "omdb" in self.data:
logger.info("Connecting to OMDb...")
try:
self.OMDb = OMDb(self, {"apikey": check_for_attribute(self.data, "apikey", parent="omdb", throw=True)})
except Failed as e:
self.errors.append(e)
logger.error(e)
logger.info(f"OMDb Connection {'Failed' if self.OMDb is None else 'Successful'}")
else:
logger.warning("omdb attribute not found")
util.separator()
self.Trakt = None
if "trakt" in self.data:
logger.info("Connecting to Trakt...")
try:
self.Trakt = Trakt(self, {
"client_id": check_for_attribute(self.data, "client_id", parent="trakt", throw=True),
"client_secret": check_for_attribute(self.data, "client_secret", parent="trakt", throw=True),
"config_path": self.config_path,
"authorization": self.data["trakt"]["authorization"] if "authorization" in self.data["trakt"] else None
})
except Failed as e:
self.errors.append(e)
logger.error(e)
logger.info(f"Trakt Connection {'Failed' if self.Trakt is None else 'Successful'}")
else:
logger.warning("trakt attribute not found")
util.separator()
self.MyAnimeList = None
if "mal" in self.data:
logger.info("Connecting to My Anime List...")
try:
self.MyAnimeList = MyAnimeList(self, {
"client_id": check_for_attribute(self.data, "client_id", parent="mal", throw=True),
"client_secret": check_for_attribute(self.data, "client_secret", parent="mal", throw=True),
"config_path": self.config_path,
"authorization": self.data["mal"]["authorization"] if "authorization" in self.data["mal"] else None
})
except Failed as e:
self.errors.append(e)
logger.error(e)
logger.info(f"My Anime List Connection {'Failed' if self.MyAnimeList is None else 'Successful'}")
else:
logger.warning("mal attribute not found")
util.separator()
self.AniDB = None
if "anidb" in self.data:
util.separator()
logger.info("Connecting to AniDB...")
try:
self.AniDB = AniDB(self, {
"username": check_for_attribute(self.data, "username", parent="anidb", throw=True),
"password": check_for_attribute(self.data, "password", parent="anidb", throw=True)
})
except Failed as e:
self.errors.append(e)
logger.error(e)
logger.info(f"My Anime List Connection {'Failed Continuing as Guest ' if self.MyAnimeList is None else 'Successful'}")
if self.AniDB is None:
self.AniDB = AniDB(self, None)
self.TVDb = TVDb(self, self.general["tvdb_language"])
self.IMDb = IMDb(self)
self.Convert = Convert(self)
self.AniList = AniList(self)
self.FlixPatrol = FlixPatrol(self)
self.ICheckMovies = ICheckMovies(self)
self.Letterboxd = Letterboxd(self)
self.StevenLu = StevenLu(self)
util.separator()
logger.info("Connecting to Plex Libraries...")
self.general["plex"] = {
"url": check_for_attribute(self.data, "url", parent="plex", var_type="url", default_is_none=True),
"token": check_for_attribute(self.data, "token", parent="plex", default_is_none=True),
"timeout": check_for_attribute(self.data, "timeout", parent="plex", var_type="int", default=60),
"clean_bundles": check_for_attribute(self.data, "clean_bundles", parent="plex", var_type="bool", default=False),
"empty_trash": check_for_attribute(self.data, "empty_trash", parent="plex", var_type="bool", default=False),
"optimize": check_for_attribute(self.data, "optimize", parent="plex", var_type="bool", default=False)
}
self.general["radarr"] = {
"url": check_for_attribute(self.data, "url", parent="radarr", var_type="url", default_is_none=True),
"token": check_for_attribute(self.data, "token", parent="radarr", default_is_none=True),
"add": check_for_attribute(self.data, "add", parent="radarr", var_type="bool", default=False),
"add_existing": check_for_attribute(self.data, "add_existing", parent="radarr", var_type="bool", default=False),
"root_folder_path": check_for_attribute(self.data, "root_folder_path", parent="radarr", default_is_none=True),
"monitor": check_for_attribute(self.data, "monitor", parent="radarr", var_type="bool", default=True),
"availability": check_for_attribute(self.data, "availability", parent="radarr", test_list=radarr.availability_descriptions, default="announced"),
"quality_profile": check_for_attribute(self.data, "quality_profile", parent="radarr", default_is_none=True),
"tag": check_for_attribute(self.data, "tag", parent="radarr", var_type="lower_list", default_is_none=True),
"search": check_for_attribute(self.data, "search", parent="radarr", var_type="bool", default=False),
"radarr_path": check_for_attribute(self.data, "radarr_path", parent="radarr", default_is_none=True),
"plex_path": check_for_attribute(self.data, "plex_path", parent="radarr", default_is_none=True)
}
self.general["sonarr"] = {
"url": check_for_attribute(self.data, "url", parent="sonarr", var_type="url", default_is_none=True),
"token": check_for_attribute(self.data, "token", parent="sonarr", default_is_none=True),
"add": check_for_attribute(self.data, "add", parent="sonarr", var_type="bool", default=False),
"add_existing": check_for_attribute(self.data, "add_existing", parent="sonarr", var_type="bool", default=False),
"root_folder_path": check_for_attribute(self.data, "root_folder_path", parent="sonarr", default_is_none=True),
"monitor": check_for_attribute(self.data, "monitor", parent="sonarr", test_list=sonarr.monitor_descriptions, default="all"),
"quality_profile": check_for_attribute(self.data, "quality_profile", parent="sonarr", default_is_none=True),
"language_profile": check_for_attribute(self.data, "language_profile", parent="sonarr", default_is_none=True),
"series_type": check_for_attribute(self.data, "series_type", parent="sonarr", test_list=sonarr.series_type_descriptions, default="standard"),
"season_folder": check_for_attribute(self.data, "season_folder", parent="sonarr", var_type="bool", default=True),
"tag": check_for_attribute(self.data, "tag", parent="sonarr", var_type="lower_list", default_is_none=True),
"search": check_for_attribute(self.data, "search", parent="sonarr", var_type="bool", default=False),
"cutoff_search": check_for_attribute(self.data, "cutoff_search", parent="sonarr", var_type="bool", default=False),
"sonarr_path": check_for_attribute(self.data, "sonarr_path", parent="sonarr", default_is_none=True),
"plex_path": check_for_attribute(self.data, "plex_path", parent="sonarr", default_is_none=True)
}
self.general["tautulli"] = {
"url": check_for_attribute(self.data, "url", parent="tautulli", var_type="url", default_is_none=True),
"apikey": check_for_attribute(self.data, "apikey", parent="tautulli", default_is_none=True)
}
self.libraries = []
libs = check_for_attribute(self.data, "libraries", throw=True)
for library_name, lib in libs.items():
if self.requested_libraries and library_name not in self.requested_libraries:
continue
util.separator()
params = {
"mapping_name": str(library_name),
"name": str(lib["library_name"]) if lib and "library_name" in lib and lib["library_name"] else str(library_name)
}
display_name = f"{params['name']} ({params['mapping_name']})" if lib and "library_name" in lib and lib["library_name"] else params["mapping_name"]
util.separator(f"{display_name} Configuration")
logger.info("")
logger.info(f"Connecting to {display_name} Library...")
params["asset_directory"] = check_for_attribute(lib, "asset_directory", parent="settings", var_type="list_path", default=self.general["asset_directory"], default_is_none=True, save=False)
if params["asset_directory"] is None:
logger.warning("Config Warning: Assets will not be used asset_directory attribute must be set under config or under this specific Library")
params["asset_folders"] = check_for_attribute(lib, "asset_folders", parent="settings", var_type="bool", default=self.general["asset_folders"], do_print=False, save=False)
params["sync_mode"] = check_for_attribute(lib, "sync_mode", parent="settings", test_list=sync_modes, default=self.general["sync_mode"], do_print=False, save=False)
params["show_unmanaged"] = check_for_attribute(lib, "show_unmanaged", parent="settings", var_type="bool", default=self.general["show_unmanaged"], do_print=False, save=False)
params["show_filtered"] = check_for_attribute(lib, "show_filtered", parent="settings", var_type="bool", default=self.general["show_filtered"], do_print=False, save=False)
params["show_missing"] = check_for_attribute(lib, "show_missing", parent="settings", var_type="bool", default=self.general["show_missing"], do_print=False, save=False)
params["show_missing_assets"] = check_for_attribute(lib, "show_missing_assets", parent="settings", var_type="bool", default=self.general["show_missing_assets"], do_print=False, save=False)
params["save_missing"] = check_for_attribute(lib, "save_missing", parent="settings", var_type="bool", default=self.general["save_missing"], do_print=False, save=False)
params["missing_only_released"] = check_for_attribute(lib, "missing_only_released", parent="settings", var_type="bool", default=self.general["missing_only_released"], do_print=False, save=False)
params["only_filter_missing"] = check_for_attribute(lib, "only_filter_missing", parent="settings", var_type="bool", default=self.general["only_filter_missing"], do_print=False, save=False)
params["create_asset_folders"] = check_for_attribute(lib, "create_asset_folders", parent="settings", var_type="bool", default=self.general["create_asset_folders"], do_print=False, save=False)
params["show_missing_season_assets"] = check_for_attribute(lib, "show_missing_season_assets", parent="settings", var_type="bool", default=self.general["show_missing_season_assets"], do_print=False, save=False)
params["collection_minimum"] = check_for_attribute(lib, "collection_minimum", parent="settings", var_type="int", default=self.general["collection_minimum"], do_print=False, save=False)
params["delete_below_minimum"] = check_for_attribute(lib, "delete_below_minimum", parent="settings", var_type="bool", default=self.general["delete_below_minimum"], do_print=False, save=False)
params["delete_not_scheduled"] = check_for_attribute(lib, "delete_not_scheduled", parent="settings", var_type="bool", default=self.general["delete_not_scheduled"], do_print=False, save=False)
params["delete_unmanaged_collections"] = check_for_attribute(lib, "delete_unmanaged_collections", parent="settings", var_type="bool", default=False, do_print=False, save=False)
params["delete_collections_with_less"] = check_for_attribute(lib, "delete_collections_with_less", parent="settings", var_type="int", default_is_none=True, do_print=False, save=False)
params["ignore_ids"] = check_for_attribute(lib, "ignore_ids", parent="settings", var_type="int_list", default_is_none=True, do_print=False, save=False)
params["ignore_ids"].extend([i for i in self.general["ignore_ids"] if i not in params["ignore_ids"]])
params["ignore_imdb_ids"] = check_for_attribute(lib, "ignore_imdb_ids", parent="settings", var_type="list", default_is_none=True, do_print=False, save=False)
params["ignore_imdb_ids"].extend([i for i in self.general["ignore_imdb_ids"] if i not in params["ignore_imdb_ids"]])
params["error_webhooks"] = check_for_attribute(lib, "error", parent="webhooks", var_type="list", default=self.webhooks["error"], do_print=False, save=False, default_is_none=True)
params["collection_changes_webhooks"] = check_for_attribute(lib, "collection_creation", parent="webhooks", var_type="list", default=self.webhooks["collection_changes"], do_print=False, save=False, default_is_none=True)
params["assets_for_all"] = check_for_attribute(lib, "assets_for_all", parent="settings", var_type="bool", default=self.general["assets_for_all"], do_print=False, save=False)
params["mass_genre_update"] = check_for_attribute(lib, "mass_genre_update", test_list=mass_update_options, default_is_none=True, save=False, do_print=False)
params["mass_audience_rating_update"] = check_for_attribute(lib, "mass_audience_rating_update", test_list=mass_update_options, default_is_none=True, save=False, do_print=False)
params["mass_critic_rating_update"] = check_for_attribute(lib, "mass_critic_rating_update", test_list=mass_update_options, default_is_none=True, save=False, do_print=False)
params["mass_trakt_rating_update"] = check_for_attribute(lib, "mass_trakt_rating_update", var_type="bool", default=False, save=False, do_print=False)
params["split_duplicates"] = check_for_attribute(lib, "split_duplicates", var_type="bool", default=False, save=False, do_print=False)
params["radarr_add_all"] = check_for_attribute(lib, "radarr_add_all", var_type="bool", default=False, save=False, do_print=False)
params["sonarr_add_all"] = check_for_attribute(lib, "sonarr_add_all", var_type="bool", default=False, save=False, do_print=False)
params["tmdb_collections"] = None
params["genre_mapper"] = None
if lib and "operations" in lib and lib["operations"]:
if isinstance(lib["operations"], dict):
if "assets_for_all" in lib["operations"]:
params["assets_for_all"] = check_for_attribute(lib["operations"], "assets_for_all", var_type="bool", default=False, save=False)
if "delete_unmanaged_collections" in lib["operations"]:
params["delete_unmanaged_collections"] = check_for_attribute(lib["operations"], "delete_unmanaged_collections", var_type="bool", default=False, save=False)
if "delete_collections_with_less" in lib["operations"]:
params["delete_collections_with_less"] = check_for_attribute(lib["operations"], "delete_collections_with_less", var_type="int", default_is_none=True, save=False)
if "mass_genre_update" in lib["operations"]:
params["mass_genre_update"] = check_for_attribute(lib["operations"], "mass_genre_update", test_list=mass_update_options, default_is_none=True, save=False)
if "mass_audience_rating_update" in lib["operations"]:
params["mass_audience_rating_update"] = check_for_attribute(lib["operations"], "mass_audience_rating_update", test_list=mass_update_options, default_is_none=True, save=False)
if "mass_critic_rating_update" in lib["operations"]:
params["mass_critic_rating_update"] = check_for_attribute(lib["operations"], "mass_critic_rating_update", test_list=mass_update_options, default_is_none=True, save=False)
if "mass_trakt_rating_update" in lib["operations"]:
params["mass_trakt_rating_update"] = check_for_attribute(lib["operations"], "mass_trakt_rating_update", var_type="bool", default=False, save=False)
if "split_duplicates" in lib["operations"]:
params["split_duplicates"] = check_for_attribute(lib["operations"], "split_duplicates", var_type="bool", default=False, save=False)
if "radarr_add_all" in lib["operations"]:
params["radarr_add_all"] = check_for_attribute(lib["operations"], "radarr_add_all", var_type="bool", default=False, save=False)
if "sonarr_add_all" in lib["operations"]:
params["sonarr_add_all"] = check_for_attribute(lib["operations"], "sonarr_add_all", var_type="bool", default=False, save=False)
if "tmdb_collections" in lib["operations"]:
params["tmdb_collections"] = {"exclude_ids": [], "remove_suffix": None, "template": {"tmdb_collection_details": "<<collection_id>>"}}
if lib["operations"]["tmdb_collections"] and isinstance(lib["operations"]["tmdb_collections"], dict):
params["tmdb_collections"]["exclude_ids"] = check_for_attribute(lib["operations"]["tmdb_collections"], "exclude_ids", var_type="int_list", default_is_none=True, save=False)
params["tmdb_collections"]["remove_suffix"] = check_for_attribute(lib["operations"]["tmdb_collections"], "remove_suffix", default_is_none=True, save=False)
if "template" in lib["operations"]["tmdb_collections"] and lib["operations"]["tmdb_collections"]["template"] and isinstance(lib["operations"]["tmdb_collections"]["template"], dict):
params["tmdb_collections"]["template"] = lib["operations"]["tmdb_collections"]["template"]
else:
logger.warning("Config Warning: Using default template for tmdb_collections")
else:
logger.error("Config Error: tmdb_collections blank using default settings")
if params["tmdb_collections"]["remove_suffix"]:
params["tmdb_collections"]["remove_suffix"] = params["tmdb_collections"]["remove_suffix"].strip()
if "genre_mapper" in lib["operations"]:
if lib["operations"]["genre_mapper"] and isinstance(lib["operations"]["genre_mapper"], dict):
params["genre_mapper"] = {}
for new_genre, old_genres in lib["operations"]["genre_mapper"].items():
for old_genre in util.get_list(old_genres, split=False):
params["genre_mapper"][old_genre] = new_genre
else:
logger.error("Config Error: genre_mapper is blank")
else:
logger.error("Config Error: operations must be a dictionary")
def error_check(attr, service):
params[attr] = None
err = f"Config Error: {attr} cannot be omdb without a successful {service} Connection"
self.errors.append(err)
logger.error(err)
if self.OMDb is None and params["mass_genre_update"] == "omdb":
error_check("mass_genre_update", "OMDb")
if self.OMDb is None and params["mass_audience_rating_update"] == "omdb":
error_check("mass_audience_rating_update", "OMDb")
if self.OMDb is None and params["mass_critic_rating_update"] == "omdb":
error_check("mass_critic_rating_update", "OMDb")
if self.Trakt is None and params["mass_trakt_rating_update"]:
error_check("mass_trakt_rating_update", "Trakt")
try:
if lib and "metadata_path" in lib:
params["metadata_path"] = []
if lib["metadata_path"] is None:
raise Failed("Config Error: metadata_path attribute is blank")
paths_to_check = lib["metadata_path"] if isinstance(lib["metadata_path"], list) else [lib["metadata_path"]]
for path in paths_to_check:
if isinstance(path, dict):
def check_dict(attr, name):
if attr in path:
if path[attr] is None:
err = f"Config Error: metadata_path {attr} is blank"
self.errors.append(err)
logger.error(err)
else:
params["metadata_path"].append((name, path[attr]))
check_dict("url", "URL")
check_dict("git", "Git")
check_dict("file", "File")
check_dict("folder", "Folder")
else:
params["metadata_path"].append(("File", path))
else:
params["metadata_path"] = [("File", os.path.join(default_dir, f"{library_name}.yml"))]
params["default_dir"] = default_dir
params["plex"] = {
"url": check_for_attribute(lib, "url", parent="plex", var_type="url", default=self.general["plex"]["url"], req_default=True, save=False),
"token": check_for_attribute(lib, "token", parent="plex", default=self.general["plex"]["token"], req_default=True, save=False),
"timeout": check_for_attribute(lib, "timeout", parent="plex", var_type="int", default=self.general["plex"]["timeout"], save=False),
"clean_bundles": check_for_attribute(lib, "clean_bundles", parent="plex", var_type="bool", default=self.general["plex"]["clean_bundles"], save=False),
"empty_trash": check_for_attribute(lib, "empty_trash", parent="plex", var_type="bool", default=self.general["plex"]["empty_trash"], save=False),
"optimize": check_for_attribute(lib, "optimize", parent="plex", var_type="bool", default=self.general["plex"]["optimize"], save=False)
}
library = Plex(self, params)
logger.info(f"{display_name} Library Connection Successful")
except Failed as e:
self.errors.append(e)
util.print_stacktrace()
util.print_multiline(e, error=True)
logger.info("")
logger.info(f"{display_name} Library Connection Failed")
continue
if self.general["radarr"]["url"] or (lib and "radarr" in lib):
logger.info("")
util.separator("Radarr Configuration", space=False, border=False)
logger.info("")
logger.info(f"Connecting to {display_name} library's Radarr...")
logger.info("")
try:
library.Radarr = Radarr(self, library, {
"url": check_for_attribute(lib, "url", parent="radarr", var_type="url", default=self.general["radarr"]["url"], req_default=True, save=False),
"token": check_for_attribute(lib, "token", parent="radarr", default=self.general["radarr"]["token"], req_default=True, save=False),
"add": check_for_attribute(lib, "add", parent="radarr", var_type="bool", default=self.general["radarr"]["add"], save=False),
"add_existing": check_for_attribute(lib, "add_existing", parent="radarr", var_type="bool", default=self.general["radarr"]["add_existing"], save=False),
"root_folder_path": check_for_attribute(lib, "root_folder_path", parent="radarr", default=self.general["radarr"]["root_folder_path"], req_default=True, save=False),
"monitor": check_for_attribute(lib, "monitor", parent="radarr", var_type="bool", default=self.general["radarr"]["monitor"], save=False),
"availability": check_for_attribute(lib, "availability", parent="radarr", test_list=radarr.availability_descriptions, default=self.general["radarr"]["availability"], save=False),
"quality_profile": check_for_attribute(lib, "quality_profile", parent="radarr", default=self.general["radarr"]["quality_profile"], req_default=True, save=False),
"tag": check_for_attribute(lib, "tag", parent="radarr", var_type="lower_list", default=self.general["radarr"]["tag"], default_is_none=True, save=False),
"search": check_for_attribute(lib, "search", parent="radarr", var_type="bool", default=self.general["radarr"]["search"], save=False),
"radarr_path": check_for_attribute(lib, "radarr_path", parent="radarr", default=self.general["radarr"]["radarr_path"], default_is_none=True, save=False),
"plex_path": check_for_attribute(lib, "plex_path", parent="radarr", default=self.general["radarr"]["plex_path"], default_is_none=True, save=False)
})
except Failed as e:
self.errors.append(e)
util.print_stacktrace()
util.print_multiline(e, error=True)
logger.info("")
logger.info(f"{display_name} library's Radarr Connection {'Failed' if library.Radarr is None else 'Successful'}")
if self.general["sonarr"]["url"] or (lib and "sonarr" in lib):
logger.info("")
util.separator("Sonarr Configuration", space=False, border=False)
logger.info("")
logger.info(f"Connecting to {display_name} library's Sonarr...")
logger.info("")
try:
library.Sonarr = Sonarr(self, library, {
"url": check_for_attribute(lib, "url", parent="sonarr", var_type="url", default=self.general["sonarr"]["url"], req_default=True, save=False),
"token": check_for_attribute(lib, "token", parent="sonarr", default=self.general["sonarr"]["token"], req_default=True, save=False),
"add": check_for_attribute(lib, "add", parent="sonarr", var_type="bool", default=self.general["sonarr"]["add"], save=False),
"add_existing": check_for_attribute(lib, "add_existing", parent="sonarr", var_type="bool", default=self.general["sonarr"]["add_existing"], save=False),
"root_folder_path": check_for_attribute(lib, "root_folder_path", parent="sonarr", default=self.general["sonarr"]["root_folder_path"], req_default=True, save=False),
"monitor": check_for_attribute(lib, "monitor", parent="sonarr", test_list=sonarr.monitor_descriptions, default=self.general["sonarr"]["monitor"], save=False),
"quality_profile": check_for_attribute(lib, "quality_profile", parent="sonarr", default=self.general["sonarr"]["quality_profile"], req_default=True, save=False),
"language_profile": check_for_attribute(lib, "language_profile", parent="sonarr", default=self.general["sonarr"]["language_profile"], save=False) if self.general["sonarr"]["language_profile"] else check_for_attribute(lib, "language_profile", parent="sonarr", default_is_none=True, save=False),
"series_type": check_for_attribute(lib, "series_type", parent="sonarr", test_list=sonarr.series_type_descriptions, default=self.general["sonarr"]["series_type"], save=False),
"season_folder": check_for_attribute(lib, "season_folder", parent="sonarr", var_type="bool", default=self.general["sonarr"]["season_folder"], save=False),
"tag": check_for_attribute(lib, "tag", parent="sonarr", var_type="lower_list", default=self.general["sonarr"]["tag"], default_is_none=True, save=False),
"search": check_for_attribute(lib, "search", parent="sonarr", var_type="bool", default=self.general["sonarr"]["search"], save=False),
"cutoff_search": check_for_attribute(lib, "cutoff_search", parent="sonarr", var_type="bool", default=self.general["sonarr"]["cutoff_search"], save=False),
"sonarr_path": check_for_attribute(lib, "sonarr_path", parent="sonarr", default=self.general["sonarr"]["sonarr_path"], default_is_none=True, save=False),
"plex_path": check_for_attribute(lib, "plex_path", parent="sonarr", default=self.general["sonarr"]["plex_path"], default_is_none=True, save=False)
})
except Failed as e:
self.errors.append(e)
util.print_stacktrace()
util.print_multiline(e, error=True)
logger.info("")
logger.info(f"{display_name} library's Sonarr Connection {'Failed' if library.Sonarr is None else 'Successful'}")
if self.general["tautulli"]["url"] or (lib and "tautulli" in lib):
logger.info("")
util.separator("Tautulli Configuration", space=False, border=False)
logger.info("")
logger.info(f"Connecting to {display_name} library's Tautulli...")
logger.info("")
try:
library.Tautulli = Tautulli(self, library, {
"url": check_for_attribute(lib, "url", parent="tautulli", var_type="url", default=self.general["tautulli"]["url"], req_default=True, save=False),
"apikey": check_for_attribute(lib, "apikey", parent="tautulli", default=self.general["tautulli"]["apikey"], req_default=True, save=False)
})
except Failed as e:
self.errors.append(e)
util.print_stacktrace()
util.print_multiline(e, error=True)
logger.info("")
logger.info(f"{display_name} library's Tautulli Connection {'Failed' if library.Tautulli is None else 'Successful'}")
library.Webhooks = Webhooks(self, {"error_webhooks": library.error_webhooks}, library=library, notifiarr=self.NotifiarrFactory)
logger.info("")
self.libraries.append(library)
util.separator()
if len(self.libraries) > 0:
logger.info(f"{len(self.libraries)} Plex Library Connection{'s' if len(self.libraries) > 1 else ''} Successful")
else:
raise Failed("Plex Error: No Plex libraries were connected to")
util.separator()
if self.errors:
self.notify(self.errors)
except Exception as e:
self.notify(e)
raise
def notify(self, text, library=None, collection=None, critical=True):
for error in util.get_list(text, split=False):
try:
self.Webhooks.error_hooks(error, library=library, collection=collection, critical=critical)
except Failed as e:
util.print_stacktrace()
logger.error(f"Webhooks Error: {e}")
def get_html(self, url, headers=None, params=None):
return html.fromstring(self.get(url, headers=headers, params=params).content)
def get_json(self, url, json=None, headers=None, params=None):
return self.get(url, json=json, headers=headers, params=params).json()
@retry(stop_max_attempt_number=6, wait_fixed=10000)
def get(self, url, json=None, headers=None, params=None):
return self.session.get(url, json=json, headers=headers, params=params)
def get_image_encoded(self, url):
return base64.b64encode(self.get(url).content).decode('utf-8')
def post_html(self, url, data=None, json=None, headers=None):
return html.fromstring(self.post(url, data=data, json=json, headers=headers).content)
def post_json(self, url, data=None, json=None, headers=None):
return self.post(url, data=data, json=json, headers=headers).json()
@retry(stop_max_attempt_number=6, wait_fixed=10000)
def post(self, url, data=None, json=None, headers=None):
return self.session.post(url, data=data, json=json, headers=headers)
|
<filename>modules/config.py<gh_stars>0
import base64, logging, os, requests
from datetime import datetime
from lxml import html
from modules import util, radarr, sonarr
from modules.anidb import AniDB
from modules.anilist import AniList
from modules.cache import Cache
from modules.convert import Convert
from modules.flixpatrol import FlixPatrol
from modules.icheckmovies import ICheckMovies
from modules.imdb import IMDb
from modules.letterboxd import Letterboxd
from modules.mal import MyAnimeList
from modules.notifiarr import Notifiarr
from modules.omdb import OMDb
from modules.plex import Plex
from modules.radarr import Radarr
from modules.sonarr import Sonarr
from modules.stevenlu import StevenLu
from modules.tautulli import Tautulli
from modules.tmdb import TMDb
from modules.trakt import Trakt
from modules.tvdb import TVDb
from modules.util import Failed
from modules.webhooks import Webhooks
from retrying import retry
from ruamel import yaml
logger = logging.getLogger("Plex Meta Manager")
sync_modes = {"append": "Only Add Items to the Collection", "sync": "Add & Remove Items from the Collection"}
mass_update_options = {"tmdb": "Use TMDb Metadata", "omdb": "Use IMDb Metadata through OMDb"}
class Config:
def __init__(self, default_dir, attrs):
logger.info("Locating config...")
config_file = attrs["config_file"]
if config_file and os.path.exists(config_file): self.config_path = os.path.abspath(config_file)
elif config_file and not os.path.exists(config_file): raise Failed(f"Config Error: config not found at {os.path.abspath(config_file)}")
elif os.path.exists(os.path.join(default_dir, "config.yml")): self.config_path = os.path.abspath(os.path.join(default_dir, "config.yml"))
else: raise Failed(f"Config Error: config not found at {os.path.abspath(default_dir)}")
logger.info(f"Using {self.config_path} as config")
self.default_dir = default_dir
self.test_mode = attrs["test"] if "test" in attrs else False
self.trace_mode = attrs["trace"] if "trace" in attrs else False
self.start_time = attrs["time_obj"]
self.run_hour = datetime.strptime(attrs["time"], "%H:%M").hour
self.requested_collections = util.get_list(attrs["collections"]) if "collections" in attrs else None
self.requested_libraries = util.get_list(attrs["libraries"]) if "libraries" in attrs else None
self.resume_from = attrs["resume"] if "resume" in attrs else None
yaml.YAML().allow_duplicate_keys = True
try:
new_config, _, _ = yaml.util.load_yaml_guess_indent(open(self.config_path, encoding="utf-8"))
def replace_attr(all_data, attr, par):
if "settings" not in all_data:
all_data["settings"] = {}
if par in all_data and all_data[par] and attr in all_data[par] and attr not in all_data["settings"]:
all_data["settings"][attr] = all_data[par][attr]
del all_data[par][attr]
if "libraries" not in new_config:
new_config["libraries"] = {}
if "settings" not in new_config:
new_config["settings"] = {}
if "tmdb" not in new_config:
new_config["tmdb"] = {}
replace_attr(new_config, "cache", "cache")
replace_attr(new_config, "cache_expiration", "cache")
if "config" in new_config:
del new_config["cache"]
replace_attr(new_config, "asset_directory", "plex")
replace_attr(new_config, "sync_mode", "plex")
replace_attr(new_config, "show_unmanaged", "plex")
replace_attr(new_config, "show_filtered", "plex")
replace_attr(new_config, "show_missing", "plex")
replace_attr(new_config, "save_missing", "plex")
if new_config["libraries"]:
for library in new_config["libraries"]:
if new_config["libraries"][library] and "plex" in new_config["libraries"][library]:
replace_attr(new_config["libraries"][library], "asset_directory", "plex")
replace_attr(new_config["libraries"][library], "sync_mode", "plex")
replace_attr(new_config["libraries"][library], "show_unmanaged", "plex")
replace_attr(new_config["libraries"][library], "show_filtered", "plex")
replace_attr(new_config["libraries"][library], "show_missing", "plex")
replace_attr(new_config["libraries"][library], "save_missing", "plex")
if new_config["libraries"][library] and "webhooks" in new_config["libraries"][library] and "collection_changes" not in new_config["libraries"][library]["webhooks"]:
changes = []
def hooks(attr):
if attr in new_config["libraries"][library]["webhooks"]:
changes.extend([w for w in util.get_list(new_config["libraries"][library]["webhooks"].pop(attr), split=False) if w not in changes])
hooks("collection_creation")
hooks("collection_addition")
hooks("collection_removal")
new_config["libraries"][library]["webhooks"]["collection_changes"] = changes if changes else None
if "libraries" in new_config: new_config["libraries"] = new_config.pop("libraries")
if "settings" in new_config: new_config["settings"] = new_config.pop("settings")
if "webhooks" in new_config:
temp = new_config.pop("webhooks")
changes = []
def hooks(attr):
if attr in temp:
items = util.get_list(temp.pop(attr), split=False)
if items:
changes.extend([w for w in items if w not in changes])
hooks("collection_creation")
hooks("collection_addition")
hooks("collection_removal")
temp["collection_changes"] = changes if changes else None
new_config["webhooks"] = temp
if "plex" in new_config: new_config["plex"] = new_config.pop("plex")
if "tmdb" in new_config: new_config["tmdb"] = new_config.pop("tmdb")
if "tautulli" in new_config: new_config["tautulli"] = new_config.pop("tautulli")
if "omdb" in new_config: new_config["omdb"] = new_config.pop("omdb")
if "notifiarr" in new_config: new_config["notifiarr"] = new_config.pop("notifiarr")
if "anidb" in new_config: new_config["anidb"] = new_config.pop("anidb")
if "radarr" in new_config: new_config["radarr"] = new_config.pop("radarr")
if "sonarr" in new_config: new_config["sonarr"] = new_config.pop("sonarr")
if "trakt" in new_config: new_config["trakt"] = new_config.pop("trakt")
if "mal" in new_config: new_config["mal"] = new_config.pop("mal")
yaml.round_trip_dump(new_config, open(self.config_path, "w", encoding="utf-8"), indent=None, block_seq_indent=2)
self.data = new_config
except yaml.scanner.ScannerError as e:
raise Failed(f"YAML Error: {util.tab_new_lines(e)}")
except Exception as e:
util.print_stacktrace()
raise Failed(f"YAML Error: {e}")
def check_for_attribute(data, attribute, parent=None, test_list=None, default=None, do_print=True, default_is_none=False, req_default=False, var_type="str", throw=False, save=True):
endline = ""
if parent is not None:
if data and parent in data:
data = data[parent]
else:
data = None
do_print = False
save = False
text = f"{attribute} attribute" if parent is None else f"{parent} sub-attribute {attribute}"
if data is None or attribute not in data:
message = f"{text} not found"
if parent and save is True:
loaded_config, _, _ = yaml.util.load_yaml_guess_indent(open(self.config_path))
endline = f"\n{parent} sub-attribute {attribute} added to config"
if parent not in loaded_config or not loaded_config[parent]: loaded_config[parent] = {attribute: default}
elif attribute not in loaded_config[parent]: loaded_config[parent][attribute] = default
else: endline = ""
yaml.round_trip_dump(loaded_config, open(self.config_path, "w"), indent=None, block_seq_indent=2)
if default_is_none and var_type in ["list", "int_list"]: return []
elif data[attribute] is None:
if default_is_none and var_type in ["list", "int_list"]: return []
elif default_is_none: return None
else: message = f"{text} is blank"
elif var_type == "url":
if data[attribute].endswith(("\\", "/")): return data[attribute][:-1]
else: return data[attribute]
elif var_type == "bool":
if isinstance(data[attribute], bool): return data[attribute]
else: message = f"{text} must be either true or false"
elif var_type == "int":
if isinstance(data[attribute], int) and data[attribute] >= 0: return data[attribute]
else: message = f"{text} must an integer >= 0"
elif var_type == "path":
if os.path.exists(os.path.abspath(data[attribute])): return data[attribute]
else: message = f"Path {os.path.abspath(data[attribute])} does not exist"
elif var_type == "list": return util.get_list(data[attribute], split=False)
elif var_type == "int_list": return util.get_list(data[attribute], int_list=True)
elif var_type == "list_path":
temp_list = []
warning_message = ""
for p in util.get_list(data[attribute], split=False):
if os.path.exists(os.path.abspath(p)):
temp_list.append(p)
else:
if len(warning_message) > 0:
warning_message += "\n"
warning_message += f"Config Warning: Path does not exist: {os.path.abspath(p)}"
if do_print:
util.print_multiline(f"Config Warning: {warning_message}")
if len(temp_list) > 0: return temp_list
else: message = "No Paths exist"
elif var_type == "lower_list": return util.get_list(data[attribute], lower=True)
elif test_list is None or data[attribute] in test_list: return data[attribute]
else: message = f"{text}: {data[attribute]} is an invalid input"
if var_type == "path" and default and os.path.exists(os.path.abspath(default)):
return default
elif var_type == "path" and default:
if data and attribute in data and data[attribute]:
message = f"neither {data[attribute]} or the default path {default} could be found"
else:
message = f"no {text} found and the default path {default} could not be found"
default = None
if default is not None or default_is_none:
message = message + f" using {default} as default"
message = message + endline
if req_default and default is None:
raise Failed(f"Config Error: {attribute} attribute must be set under {parent} globally or under this specific Library")
options = ""
if test_list:
for option, description in test_list.items():
if len(options) > 0:
options = f"{options}\n"
options = f"{options} {option} ({description})"
if (default is None and not default_is_none) or throw:
if len(options) > 0:
message = message + "\n" + options
raise Failed(f"Config Error: {message}")
if do_print:
util.print_multiline(f"Config Warning: {message}")
if data and attribute in data and data[attribute] and test_list is not None and data[attribute] not in test_list:
util.print_multiline(options)
return default
self.session = requests.Session()
self.general = {
"cache": check_for_attribute(self.data, "cache", parent="settings", var_type="bool", default=True),
"cache_expiration": check_for_attribute(self.data, "cache_expiration", parent="settings", var_type="int", default=60),
"asset_directory": check_for_attribute(self.data, "asset_directory", parent="settings", var_type="list_path", default=[os.path.join(default_dir, "assets")], default_is_none=True),
"asset_folders": check_for_attribute(self.data, "asset_folders", parent="settings", var_type="bool", default=True),
"create_asset_folders": check_for_attribute(self.data, "create_asset_folders", parent="settings", var_type="bool", default=False),
"show_missing_season_assets": check_for_attribute(self.data, "show_missing_season_assets", parent="settings", var_type="bool", default=False),
"sync_mode": check_for_attribute(self.data, "sync_mode", parent="settings", default="append", test_list=sync_modes),
"collection_minimum": check_for_attribute(self.data, "collection_minimum", parent="settings", var_type="int", default=1),
"delete_below_minimum": check_for_attribute(self.data, "delete_below_minimum", parent="settings", var_type="bool", default=False),
"delete_not_scheduled": check_for_attribute(self.data, "delete_not_scheduled", parent="settings", var_type="bool", default=False),
"run_again_delay": check_for_attribute(self.data, "run_again_delay", parent="settings", var_type="int", default=0),
"missing_only_released": check_for_attribute(self.data, "missing_only_released", parent="settings", var_type="bool", default=False),
"only_filter_missing": check_for_attribute(self.data, "only_filter_missing", parent="settings", var_type="bool", default=False),
"show_unmanaged": check_for_attribute(self.data, "show_unmanaged", parent="settings", var_type="bool", default=True),
"show_filtered": check_for_attribute(self.data, "show_filtered", parent="settings", var_type="bool", default=False),
"show_missing": check_for_attribute(self.data, "show_missing", parent="settings", var_type="bool", default=True),
"show_missing_assets": check_for_attribute(self.data, "show_missing_assets", parent="settings", var_type="bool", default=True),
"save_missing": check_for_attribute(self.data, "save_missing", parent="settings", var_type="bool", default=True),
"tvdb_language": check_for_attribute(self.data, "tvdb_language", parent="settings", default="default"),
"ignore_ids": check_for_attribute(self.data, "ignore_ids", parent="settings", var_type="int_list", default_is_none=True),
"ignore_imdb_ids": check_for_attribute(self.data, "ignore_imdb_ids", parent="settings", var_type="list", default_is_none=True),
"assets_for_all": check_for_attribute(self.data, "assets_for_all", parent="settings", var_type="bool", default=False, save=False, do_print=False)
}
self.webhooks = {
"error": check_for_attribute(self.data, "error", parent="webhooks", var_type="list", default_is_none=True),
"run_start": check_for_attribute(self.data, "run_start", parent="webhooks", var_type="list", default_is_none=True),
"run_end": check_for_attribute(self.data, "run_end", parent="webhooks", var_type="list", default_is_none=True),
"collection_changes": check_for_attribute(self.data, "collection_changes", parent="webhooks", var_type="list", default_is_none=True)
}
if self.general["cache"]:
util.separator()
self.Cache = Cache(self.config_path, self.general["cache_expiration"])
else:
self.Cache = None
util.separator()
self.NotifiarrFactory = None
if "notifiarr" in self.data:
logger.info("Connecting to Notifiarr...")
try:
self.NotifiarrFactory = Notifiarr(self, {
"apikey": check_for_attribute(self.data, "apikey", parent="notifiarr", throw=True),
"develop": check_for_attribute(self.data, "develop", parent="notifiarr", var_type="bool", default=False, do_print=False, save=False),
"test": check_for_attribute(self.data, "test", parent="notifiarr", var_type="bool", default=False, do_print=False, save=False)
})
except Failed as e:
logger.error(e)
logger.info(f"Notifiarr Connection {'Failed' if self.NotifiarrFactory is None else 'Successful'}")
else:
logger.warning("notifiarr attribute not found")
self.Webhooks = Webhooks(self, self.webhooks, notifiarr=self.NotifiarrFactory)
try:
self.Webhooks.start_time_hooks(self.start_time)
except Failed as e:
util.print_stacktrace()
logger.error(f"Webhooks Error: {e}")
self.errors = []
util.separator()
try:
self.TMDb = None
if "tmdb" in self.data:
logger.info("Connecting to TMDb...")
self.TMDb = TMDb(self, {
"apikey": check_for_attribute(self.data, "apikey", parent="tmdb", throw=True),
"language": check_for_attribute(self.data, "language", parent="tmdb", default="en")
})
logger.info(f"TMDb Connection {'Failed' if self.TMDb is None else 'Successful'}")
else:
raise Failed("Config Error: tmdb attribute not found")
util.separator()
self.OMDb = None
if "omdb" in self.data:
logger.info("Connecting to OMDb...")
try:
self.OMDb = OMDb(self, {"apikey": check_for_attribute(self.data, "apikey", parent="omdb", throw=True)})
except Failed as e:
self.errors.append(e)
logger.error(e)
logger.info(f"OMDb Connection {'Failed' if self.OMDb is None else 'Successful'}")
else:
logger.warning("omdb attribute not found")
util.separator()
self.Trakt = None
if "trakt" in self.data:
logger.info("Connecting to Trakt...")
try:
self.Trakt = Trakt(self, {
"client_id": check_for_attribute(self.data, "client_id", parent="trakt", throw=True),
"client_secret": check_for_attribute(self.data, "client_secret", parent="trakt", throw=True),
"config_path": self.config_path,
"authorization": self.data["trakt"]["authorization"] if "authorization" in self.data["trakt"] else None
})
except Failed as e:
self.errors.append(e)
logger.error(e)
logger.info(f"Trakt Connection {'Failed' if self.Trakt is None else 'Successful'}")
else:
logger.warning("trakt attribute not found")
util.separator()
self.MyAnimeList = None
if "mal" in self.data:
logger.info("Connecting to My Anime List...")
try:
self.MyAnimeList = MyAnimeList(self, {
"client_id": check_for_attribute(self.data, "client_id", parent="mal", throw=True),
"client_secret": check_for_attribute(self.data, "client_secret", parent="mal", throw=True),
"config_path": self.config_path,
"authorization": self.data["mal"]["authorization"] if "authorization" in self.data["mal"] else None
})
except Failed as e:
self.errors.append(e)
logger.error(e)
logger.info(f"My Anime List Connection {'Failed' if self.MyAnimeList is None else 'Successful'}")
else:
logger.warning("mal attribute not found")
util.separator()
self.AniDB = None
if "anidb" in self.data:
util.separator()
logger.info("Connecting to AniDB...")
try:
self.AniDB = AniDB(self, {
"username": check_for_attribute(self.data, "username", parent="anidb", throw=True),
"password": check_for_attribute(self.data, "password", parent="anidb", throw=True)
})
except Failed as e:
self.errors.append(e)
logger.error(e)
logger.info(f"My Anime List Connection {'Failed Continuing as Guest ' if self.MyAnimeList is None else 'Successful'}")
if self.AniDB is None:
self.AniDB = AniDB(self, None)
self.TVDb = TVDb(self, self.general["tvdb_language"])
self.IMDb = IMDb(self)
self.Convert = Convert(self)
self.AniList = AniList(self)
self.FlixPatrol = FlixPatrol(self)
self.ICheckMovies = ICheckMovies(self)
self.Letterboxd = Letterboxd(self)
self.StevenLu = StevenLu(self)
util.separator()
logger.info("Connecting to Plex Libraries...")
self.general["plex"] = {
"url": check_for_attribute(self.data, "url", parent="plex", var_type="url", default_is_none=True),
"token": check_for_attribute(self.data, "token", parent="plex", default_is_none=True),
"timeout": check_for_attribute(self.data, "timeout", parent="plex", var_type="int", default=60),
"clean_bundles": check_for_attribute(self.data, "clean_bundles", parent="plex", var_type="bool", default=False),
"empty_trash": check_for_attribute(self.data, "empty_trash", parent="plex", var_type="bool", default=False),
"optimize": check_for_attribute(self.data, "optimize", parent="plex", var_type="bool", default=False)
}
self.general["radarr"] = {
"url": check_for_attribute(self.data, "url", parent="radarr", var_type="url", default_is_none=True),
"token": check_for_attribute(self.data, "token", parent="radarr", default_is_none=True),
"add": check_for_attribute(self.data, "add", parent="radarr", var_type="bool", default=False),
"add_existing": check_for_attribute(self.data, "add_existing", parent="radarr", var_type="bool", default=False),
"root_folder_path": check_for_attribute(self.data, "root_folder_path", parent="radarr", default_is_none=True),
"monitor": check_for_attribute(self.data, "monitor", parent="radarr", var_type="bool", default=True),
"availability": check_for_attribute(self.data, "availability", parent="radarr", test_list=radarr.availability_descriptions, default="announced"),
"quality_profile": check_for_attribute(self.data, "quality_profile", parent="radarr", default_is_none=True),
"tag": check_for_attribute(self.data, "tag", parent="radarr", var_type="lower_list", default_is_none=True),
"search": check_for_attribute(self.data, "search", parent="radarr", var_type="bool", default=False),
"radarr_path": check_for_attribute(self.data, "radarr_path", parent="radarr", default_is_none=True),
"plex_path": check_for_attribute(self.data, "plex_path", parent="radarr", default_is_none=True)
}
self.general["sonarr"] = {
"url": check_for_attribute(self.data, "url", parent="sonarr", var_type="url", default_is_none=True),
"token": check_for_attribute(self.data, "token", parent="sonarr", default_is_none=True),
"add": check_for_attribute(self.data, "add", parent="sonarr", var_type="bool", default=False),
"add_existing": check_for_attribute(self.data, "add_existing", parent="sonarr", var_type="bool", default=False),
"root_folder_path": check_for_attribute(self.data, "root_folder_path", parent="sonarr", default_is_none=True),
"monitor": check_for_attribute(self.data, "monitor", parent="sonarr", test_list=sonarr.monitor_descriptions, default="all"),
"quality_profile": check_for_attribute(self.data, "quality_profile", parent="sonarr", default_is_none=True),
"language_profile": check_for_attribute(self.data, "language_profile", parent="sonarr", default_is_none=True),
"series_type": check_for_attribute(self.data, "series_type", parent="sonarr", test_list=sonarr.series_type_descriptions, default="standard"),
"season_folder": check_for_attribute(self.data, "season_folder", parent="sonarr", var_type="bool", default=True),
"tag": check_for_attribute(self.data, "tag", parent="sonarr", var_type="lower_list", default_is_none=True),
"search": check_for_attribute(self.data, "search", parent="sonarr", var_type="bool", default=False),
"cutoff_search": check_for_attribute(self.data, "cutoff_search", parent="sonarr", var_type="bool", default=False),
"sonarr_path": check_for_attribute(self.data, "sonarr_path", parent="sonarr", default_is_none=True),
"plex_path": check_for_attribute(self.data, "plex_path", parent="sonarr", default_is_none=True)
}
self.general["tautulli"] = {
"url": check_for_attribute(self.data, "url", parent="tautulli", var_type="url", default_is_none=True),
"apikey": check_for_attribute(self.data, "apikey", parent="tautulli", default_is_none=True)
}
self.libraries = []
libs = check_for_attribute(self.data, "libraries", throw=True)
for library_name, lib in libs.items():
if self.requested_libraries and library_name not in self.requested_libraries:
continue
util.separator()
params = {
"mapping_name": str(library_name),
"name": str(lib["library_name"]) if lib and "library_name" in lib and lib["library_name"] else str(library_name)
}
display_name = f"{params['name']} ({params['mapping_name']})" if lib and "library_name" in lib and lib["library_name"] else params["mapping_name"]
util.separator(f"{display_name} Configuration")
logger.info("")
logger.info(f"Connecting to {display_name} Library...")
params["asset_directory"] = check_for_attribute(lib, "asset_directory", parent="settings", var_type="list_path", default=self.general["asset_directory"], default_is_none=True, save=False)
if params["asset_directory"] is None:
logger.warning("Config Warning: Assets will not be used asset_directory attribute must be set under config or under this specific Library")
params["asset_folders"] = check_for_attribute(lib, "asset_folders", parent="settings", var_type="bool", default=self.general["asset_folders"], do_print=False, save=False)
params["sync_mode"] = check_for_attribute(lib, "sync_mode", parent="settings", test_list=sync_modes, default=self.general["sync_mode"], do_print=False, save=False)
params["show_unmanaged"] = check_for_attribute(lib, "show_unmanaged", parent="settings", var_type="bool", default=self.general["show_unmanaged"], do_print=False, save=False)
params["show_filtered"] = check_for_attribute(lib, "show_filtered", parent="settings", var_type="bool", default=self.general["show_filtered"], do_print=False, save=False)
params["show_missing"] = check_for_attribute(lib, "show_missing", parent="settings", var_type="bool", default=self.general["show_missing"], do_print=False, save=False)
params["show_missing_assets"] = check_for_attribute(lib, "show_missing_assets", parent="settings", var_type="bool", default=self.general["show_missing_assets"], do_print=False, save=False)
params["save_missing"] = check_for_attribute(lib, "save_missing", parent="settings", var_type="bool", default=self.general["save_missing"], do_print=False, save=False)
params["missing_only_released"] = check_for_attribute(lib, "missing_only_released", parent="settings", var_type="bool", default=self.general["missing_only_released"], do_print=False, save=False)
params["only_filter_missing"] = check_for_attribute(lib, "only_filter_missing", parent="settings", var_type="bool", default=self.general["only_filter_missing"], do_print=False, save=False)
params["create_asset_folders"] = check_for_attribute(lib, "create_asset_folders", parent="settings", var_type="bool", default=self.general["create_asset_folders"], do_print=False, save=False)
params["show_missing_season_assets"] = check_for_attribute(lib, "show_missing_season_assets", parent="settings", var_type="bool", default=self.general["show_missing_season_assets"], do_print=False, save=False)
params["collection_minimum"] = check_for_attribute(lib, "collection_minimum", parent="settings", var_type="int", default=self.general["collection_minimum"], do_print=False, save=False)
params["delete_below_minimum"] = check_for_attribute(lib, "delete_below_minimum", parent="settings", var_type="bool", default=self.general["delete_below_minimum"], do_print=False, save=False)
params["delete_not_scheduled"] = check_for_attribute(lib, "delete_not_scheduled", parent="settings", var_type="bool", default=self.general["delete_not_scheduled"], do_print=False, save=False)
params["delete_unmanaged_collections"] = check_for_attribute(lib, "delete_unmanaged_collections", parent="settings", var_type="bool", default=False, do_print=False, save=False)
params["delete_collections_with_less"] = check_for_attribute(lib, "delete_collections_with_less", parent="settings", var_type="int", default_is_none=True, do_print=False, save=False)
params["ignore_ids"] = check_for_attribute(lib, "ignore_ids", parent="settings", var_type="int_list", default_is_none=True, do_print=False, save=False)
params["ignore_ids"].extend([i for i in self.general["ignore_ids"] if i not in params["ignore_ids"]])
params["ignore_imdb_ids"] = check_for_attribute(lib, "ignore_imdb_ids", parent="settings", var_type="list", default_is_none=True, do_print=False, save=False)
params["ignore_imdb_ids"].extend([i for i in self.general["ignore_imdb_ids"] if i not in params["ignore_imdb_ids"]])
params["error_webhooks"] = check_for_attribute(lib, "error", parent="webhooks", var_type="list", default=self.webhooks["error"], do_print=False, save=False, default_is_none=True)
params["collection_changes_webhooks"] = check_for_attribute(lib, "collection_creation", parent="webhooks", var_type="list", default=self.webhooks["collection_changes"], do_print=False, save=False, default_is_none=True)
params["assets_for_all"] = check_for_attribute(lib, "assets_for_all", parent="settings", var_type="bool", default=self.general["assets_for_all"], do_print=False, save=False)
params["mass_genre_update"] = check_for_attribute(lib, "mass_genre_update", test_list=mass_update_options, default_is_none=True, save=False, do_print=False)
params["mass_audience_rating_update"] = check_for_attribute(lib, "mass_audience_rating_update", test_list=mass_update_options, default_is_none=True, save=False, do_print=False)
params["mass_critic_rating_update"] = check_for_attribute(lib, "mass_critic_rating_update", test_list=mass_update_options, default_is_none=True, save=False, do_print=False)
params["mass_trakt_rating_update"] = check_for_attribute(lib, "mass_trakt_rating_update", var_type="bool", default=False, save=False, do_print=False)
params["split_duplicates"] = check_for_attribute(lib, "split_duplicates", var_type="bool", default=False, save=False, do_print=False)
params["radarr_add_all"] = check_for_attribute(lib, "radarr_add_all", var_type="bool", default=False, save=False, do_print=False)
params["sonarr_add_all"] = check_for_attribute(lib, "sonarr_add_all", var_type="bool", default=False, save=False, do_print=False)
params["tmdb_collections"] = None
params["genre_mapper"] = None
if lib and "operations" in lib and lib["operations"]:
if isinstance(lib["operations"], dict):
if "assets_for_all" in lib["operations"]:
params["assets_for_all"] = check_for_attribute(lib["operations"], "assets_for_all", var_type="bool", default=False, save=False)
if "delete_unmanaged_collections" in lib["operations"]:
params["delete_unmanaged_collections"] = check_for_attribute(lib["operations"], "delete_unmanaged_collections", var_type="bool", default=False, save=False)
if "delete_collections_with_less" in lib["operations"]:
params["delete_collections_with_less"] = check_for_attribute(lib["operations"], "delete_collections_with_less", var_type="int", default_is_none=True, save=False)
if "mass_genre_update" in lib["operations"]:
params["mass_genre_update"] = check_for_attribute(lib["operations"], "mass_genre_update", test_list=mass_update_options, default_is_none=True, save=False)
if "mass_audience_rating_update" in lib["operations"]:
params["mass_audience_rating_update"] = check_for_attribute(lib["operations"], "mass_audience_rating_update", test_list=mass_update_options, default_is_none=True, save=False)
if "mass_critic_rating_update" in lib["operations"]:
params["mass_critic_rating_update"] = check_for_attribute(lib["operations"], "mass_critic_rating_update", test_list=mass_update_options, default_is_none=True, save=False)
if "mass_trakt_rating_update" in lib["operations"]:
params["mass_trakt_rating_update"] = check_for_attribute(lib["operations"], "mass_trakt_rating_update", var_type="bool", default=False, save=False)
if "split_duplicates" in lib["operations"]:
params["split_duplicates"] = check_for_attribute(lib["operations"], "split_duplicates", var_type="bool", default=False, save=False)
if "radarr_add_all" in lib["operations"]:
params["radarr_add_all"] = check_for_attribute(lib["operations"], "radarr_add_all", var_type="bool", default=False, save=False)
if "sonarr_add_all" in lib["operations"]:
params["sonarr_add_all"] = check_for_attribute(lib["operations"], "sonarr_add_all", var_type="bool", default=False, save=False)
if "tmdb_collections" in lib["operations"]:
params["tmdb_collections"] = {"exclude_ids": [], "remove_suffix": None, "template": {"tmdb_collection_details": "<<collection_id>>"}}
if lib["operations"]["tmdb_collections"] and isinstance(lib["operations"]["tmdb_collections"], dict):
params["tmdb_collections"]["exclude_ids"] = check_for_attribute(lib["operations"]["tmdb_collections"], "exclude_ids", var_type="int_list", default_is_none=True, save=False)
params["tmdb_collections"]["remove_suffix"] = check_for_attribute(lib["operations"]["tmdb_collections"], "remove_suffix", default_is_none=True, save=False)
if "template" in lib["operations"]["tmdb_collections"] and lib["operations"]["tmdb_collections"]["template"] and isinstance(lib["operations"]["tmdb_collections"]["template"], dict):
params["tmdb_collections"]["template"] = lib["operations"]["tmdb_collections"]["template"]
else:
logger.warning("Config Warning: Using default template for tmdb_collections")
else:
logger.error("Config Error: tmdb_collections blank using default settings")
if params["tmdb_collections"]["remove_suffix"]:
params["tmdb_collections"]["remove_suffix"] = params["tmdb_collections"]["remove_suffix"].strip()
if "genre_mapper" in lib["operations"]:
if lib["operations"]["genre_mapper"] and isinstance(lib["operations"]["genre_mapper"], dict):
params["genre_mapper"] = {}
for new_genre, old_genres in lib["operations"]["genre_mapper"].items():
for old_genre in util.get_list(old_genres, split=False):
params["genre_mapper"][old_genre] = new_genre
else:
logger.error("Config Error: genre_mapper is blank")
else:
logger.error("Config Error: operations must be a dictionary")
def error_check(attr, service):
params[attr] = None
err = f"Config Error: {attr} cannot be omdb without a successful {service} Connection"
self.errors.append(err)
logger.error(err)
if self.OMDb is None and params["mass_genre_update"] == "omdb":
error_check("mass_genre_update", "OMDb")
if self.OMDb is None and params["mass_audience_rating_update"] == "omdb":
error_check("mass_audience_rating_update", "OMDb")
if self.OMDb is None and params["mass_critic_rating_update"] == "omdb":
error_check("mass_critic_rating_update", "OMDb")
if self.Trakt is None and params["mass_trakt_rating_update"]:
error_check("mass_trakt_rating_update", "Trakt")
try:
if lib and "metadata_path" in lib:
params["metadata_path"] = []
if lib["metadata_path"] is None:
raise Failed("Config Error: metadata_path attribute is blank")
paths_to_check = lib["metadata_path"] if isinstance(lib["metadata_path"], list) else [lib["metadata_path"]]
for path in paths_to_check:
if isinstance(path, dict):
def check_dict(attr, name):
if attr in path:
if path[attr] is None:
err = f"Config Error: metadata_path {attr} is blank"
self.errors.append(err)
logger.error(err)
else:
params["metadata_path"].append((name, path[attr]))
check_dict("url", "URL")
check_dict("git", "Git")
check_dict("file", "File")
check_dict("folder", "Folder")
else:
params["metadata_path"].append(("File", path))
else:
params["metadata_path"] = [("File", os.path.join(default_dir, f"{library_name}.yml"))]
params["default_dir"] = default_dir
params["plex"] = {
"url": check_for_attribute(lib, "url", parent="plex", var_type="url", default=self.general["plex"]["url"], req_default=True, save=False),
"token": check_for_attribute(lib, "token", parent="plex", default=self.general["plex"]["token"], req_default=True, save=False),
"timeout": check_for_attribute(lib, "timeout", parent="plex", var_type="int", default=self.general["plex"]["timeout"], save=False),
"clean_bundles": check_for_attribute(lib, "clean_bundles", parent="plex", var_type="bool", default=self.general["plex"]["clean_bundles"], save=False),
"empty_trash": check_for_attribute(lib, "empty_trash", parent="plex", var_type="bool", default=self.general["plex"]["empty_trash"], save=False),
"optimize": check_for_attribute(lib, "optimize", parent="plex", var_type="bool", default=self.general["plex"]["optimize"], save=False)
}
library = Plex(self, params)
logger.info(f"{display_name} Library Connection Successful")
except Failed as e:
self.errors.append(e)
util.print_stacktrace()
util.print_multiline(e, error=True)
logger.info("")
logger.info(f"{display_name} Library Connection Failed")
continue
if self.general["radarr"]["url"] or (lib and "radarr" in lib):
logger.info("")
util.separator("Radarr Configuration", space=False, border=False)
logger.info("")
logger.info(f"Connecting to {display_name} library's Radarr...")
logger.info("")
try:
library.Radarr = Radarr(self, library, {
"url": check_for_attribute(lib, "url", parent="radarr", var_type="url", default=self.general["radarr"]["url"], req_default=True, save=False),
"token": check_for_attribute(lib, "token", parent="radarr", default=self.general["radarr"]["token"], req_default=True, save=False),
"add": check_for_attribute(lib, "add", parent="radarr", var_type="bool", default=self.general["radarr"]["add"], save=False),
"add_existing": check_for_attribute(lib, "add_existing", parent="radarr", var_type="bool", default=self.general["radarr"]["add_existing"], save=False),
"root_folder_path": check_for_attribute(lib, "root_folder_path", parent="radarr", default=self.general["radarr"]["root_folder_path"], req_default=True, save=False),
"monitor": check_for_attribute(lib, "monitor", parent="radarr", var_type="bool", default=self.general["radarr"]["monitor"], save=False),
"availability": check_for_attribute(lib, "availability", parent="radarr", test_list=radarr.availability_descriptions, default=self.general["radarr"]["availability"], save=False),
"quality_profile": check_for_attribute(lib, "quality_profile", parent="radarr", default=self.general["radarr"]["quality_profile"], req_default=True, save=False),
"tag": check_for_attribute(lib, "tag", parent="radarr", var_type="lower_list", default=self.general["radarr"]["tag"], default_is_none=True, save=False),
"search": check_for_attribute(lib, "search", parent="radarr", var_type="bool", default=self.general["radarr"]["search"], save=False),
"radarr_path": check_for_attribute(lib, "radarr_path", parent="radarr", default=self.general["radarr"]["radarr_path"], default_is_none=True, save=False),
"plex_path": check_for_attribute(lib, "plex_path", parent="radarr", default=self.general["radarr"]["plex_path"], default_is_none=True, save=False)
})
except Failed as e:
self.errors.append(e)
util.print_stacktrace()
util.print_multiline(e, error=True)
logger.info("")
logger.info(f"{display_name} library's Radarr Connection {'Failed' if library.Radarr is None else 'Successful'}")
if self.general["sonarr"]["url"] or (lib and "sonarr" in lib):
logger.info("")
util.separator("Sonarr Configuration", space=False, border=False)
logger.info("")
logger.info(f"Connecting to {display_name} library's Sonarr...")
logger.info("")
try:
library.Sonarr = Sonarr(self, library, {
"url": check_for_attribute(lib, "url", parent="sonarr", var_type="url", default=self.general["sonarr"]["url"], req_default=True, save=False),
"token": check_for_attribute(lib, "token", parent="sonarr", default=self.general["sonarr"]["token"], req_default=True, save=False),
"add": check_for_attribute(lib, "add", parent="sonarr", var_type="bool", default=self.general["sonarr"]["add"], save=False),
"add_existing": check_for_attribute(lib, "add_existing", parent="sonarr", var_type="bool", default=self.general["sonarr"]["add_existing"], save=False),
"root_folder_path": check_for_attribute(lib, "root_folder_path", parent="sonarr", default=self.general["sonarr"]["root_folder_path"], req_default=True, save=False),
"monitor": check_for_attribute(lib, "monitor", parent="sonarr", test_list=sonarr.monitor_descriptions, default=self.general["sonarr"]["monitor"], save=False),
"quality_profile": check_for_attribute(lib, "quality_profile", parent="sonarr", default=self.general["sonarr"]["quality_profile"], req_default=True, save=False),
"language_profile": check_for_attribute(lib, "language_profile", parent="sonarr", default=self.general["sonarr"]["language_profile"], save=False) if self.general["sonarr"]["language_profile"] else check_for_attribute(lib, "language_profile", parent="sonarr", default_is_none=True, save=False),
"series_type": check_for_attribute(lib, "series_type", parent="sonarr", test_list=sonarr.series_type_descriptions, default=self.general["sonarr"]["series_type"], save=False),
"season_folder": check_for_attribute(lib, "season_folder", parent="sonarr", var_type="bool", default=self.general["sonarr"]["season_folder"], save=False),
"tag": check_for_attribute(lib, "tag", parent="sonarr", var_type="lower_list", default=self.general["sonarr"]["tag"], default_is_none=True, save=False),
"search": check_for_attribute(lib, "search", parent="sonarr", var_type="bool", default=self.general["sonarr"]["search"], save=False),
"cutoff_search": check_for_attribute(lib, "cutoff_search", parent="sonarr", var_type="bool", default=self.general["sonarr"]["cutoff_search"], save=False),
"sonarr_path": check_for_attribute(lib, "sonarr_path", parent="sonarr", default=self.general["sonarr"]["sonarr_path"], default_is_none=True, save=False),
"plex_path": check_for_attribute(lib, "plex_path", parent="sonarr", default=self.general["sonarr"]["plex_path"], default_is_none=True, save=False)
})
except Failed as e:
self.errors.append(e)
util.print_stacktrace()
util.print_multiline(e, error=True)
logger.info("")
logger.info(f"{display_name} library's Sonarr Connection {'Failed' if library.Sonarr is None else 'Successful'}")
if self.general["tautulli"]["url"] or (lib and "tautulli" in lib):
logger.info("")
util.separator("Tautulli Configuration", space=False, border=False)
logger.info("")
logger.info(f"Connecting to {display_name} library's Tautulli...")
logger.info("")
try:
library.Tautulli = Tautulli(self, library, {
"url": check_for_attribute(lib, "url", parent="tautulli", var_type="url", default=self.general["tautulli"]["url"], req_default=True, save=False),
"apikey": check_for_attribute(lib, "apikey", parent="tautulli", default=self.general["tautulli"]["apikey"], req_default=True, save=False)
})
except Failed as e:
self.errors.append(e)
util.print_stacktrace()
util.print_multiline(e, error=True)
logger.info("")
logger.info(f"{display_name} library's Tautulli Connection {'Failed' if library.Tautulli is None else 'Successful'}")
library.Webhooks = Webhooks(self, {"error_webhooks": library.error_webhooks}, library=library, notifiarr=self.NotifiarrFactory)
logger.info("")
self.libraries.append(library)
util.separator()
if len(self.libraries) > 0:
logger.info(f"{len(self.libraries)} Plex Library Connection{'s' if len(self.libraries) > 1 else ''} Successful")
else:
raise Failed("Plex Error: No Plex libraries were connected to")
util.separator()
if self.errors:
self.notify(self.errors)
except Exception as e:
self.notify(e)
raise
def notify(self, text, library=None, collection=None, critical=True):
for error in util.get_list(text, split=False):
try:
self.Webhooks.error_hooks(error, library=library, collection=collection, critical=critical)
except Failed as e:
util.print_stacktrace()
logger.error(f"Webhooks Error: {e}")
def get_html(self, url, headers=None, params=None):
return html.fromstring(self.get(url, headers=headers, params=params).content)
def get_json(self, url, json=None, headers=None, params=None):
return self.get(url, json=json, headers=headers, params=params).json()
@retry(stop_max_attempt_number=6, wait_fixed=10000)
def get(self, url, json=None, headers=None, params=None):
return self.session.get(url, json=json, headers=headers, params=params)
def get_image_encoded(self, url):
return base64.b64encode(self.get(url).content).decode('utf-8')
def post_html(self, url, data=None, json=None, headers=None):
return html.fromstring(self.post(url, data=data, json=json, headers=headers).content)
def post_json(self, url, data=None, json=None, headers=None):
return self.post(url, data=data, json=json, headers=headers).json()
@retry(stop_max_attempt_number=6, wait_fixed=10000)
def post(self, url, data=None, json=None, headers=None):
return self.session.post(url, data=data, json=json, headers=headers)
|
none
| 1
| 2.016889
| 2
|
|
z3/music_men.py
|
Wikunia/hakank
| 279
|
6626426
|
<filename>z3/music_men.py<gh_stars>100-1000
#!/usr/bin/python -u
# -*- coding: latin-1 -*-
#
# Music men puzzle in Z3
#
# From
## http://groups.google.com/groups?q=FINITE+DOMAINS+With+Logic+Puzzles&hl=en&lr=&ie=UTF-8&c2coff=1&safe=off&selm=1992Jul27.034607.19386#40IRO.UMontreal.CA&rnum=4
# """"
# MUSIC MEN
#
# Three friends like different kinds of music. From the clues given
# below, can you identify them, say how old each is, and work out
# his musical preference?
#
# Clues:
# 1. Rob is older than Queen, who likes classical music.
# 2. The pop-music fan, who is not Prince, is not 24.
# 3. Leon, who is not King, is 25.
# 4. Mark's musical preference is not jazz.
# Knowledge: "this is what we know of the world."
# Names : Leon, Mark, Rob.
# Surnames : King, Prince, Queen.
# Ages : 24, 25, 26.
# Music : Classical, Jazz, Pop.
#
# This Z3 model was written by <NAME> (<EMAIL>)
# See also my Z3 page: http://hakank.org/z3/
#
from z3_utils_hakank import *
sol = Solver()
n = 3
min_age = 24
max_age = 26
# variables
Age24 = 24
Age25 = 25
Age26 = 26
Age = [Age24, Age25, Age26];
Names = makeIntVector(sol,"Names", n, min_age,max_age)
[King, Prince, Queen] = Names
Surnames = makeIntVector(sol,"Surnames", n, min_age,max_age)
[Leon, Mark, Rob] = Surnames
Music = makeIntVector(sol,"Music", n, min_age,max_age)
[Classical, Jazz, Pop] = Music
# constraints
sol.add(Distinct(Names))
sol.add(Distinct(Surnames))
sol.add(Distinct(Music))
# Rob is older than Queen who likes classical music.
sol.add(Rob > Queen )
sol.add(Queen == Classical)
# The pop-music fan who is not Prince is not 24.
sol.add(Pop != Prince)
sol.add(Pop != Age24)
# Leon who is not King is 25.
sol.add(Leon != King)
sol.add(Leon == Age25)
# Mark's musical preference is not jazz.
sol.add(Mark != Jazz)
num_solutions = 0
while sol.check() == sat:
num_solutions += 1
mod = sol.model()
print("Names :", [mod.eval(Names[i]) for i in range(n)])
print("Surnames:", [mod.eval(Surnames[i]) for i in range(n)])
print("Music :", [mod.eval(Music[i]) for i in range(n)])
print()
getDifferentSolution(sol,mod,Names,Surnames,Music)
print("num_solutions:", num_solutions)
|
<filename>z3/music_men.py<gh_stars>100-1000
#!/usr/bin/python -u
# -*- coding: latin-1 -*-
#
# Music men puzzle in Z3
#
# From
## http://groups.google.com/groups?q=FINITE+DOMAINS+With+Logic+Puzzles&hl=en&lr=&ie=UTF-8&c2coff=1&safe=off&selm=1992Jul27.034607.19386#40IRO.UMontreal.CA&rnum=4
# """"
# MUSIC MEN
#
# Three friends like different kinds of music. From the clues given
# below, can you identify them, say how old each is, and work out
# his musical preference?
#
# Clues:
# 1. Rob is older than Queen, who likes classical music.
# 2. The pop-music fan, who is not Prince, is not 24.
# 3. Leon, who is not King, is 25.
# 4. Mark's musical preference is not jazz.
# Knowledge: "this is what we know of the world."
# Names : Leon, Mark, Rob.
# Surnames : King, Prince, Queen.
# Ages : 24, 25, 26.
# Music : Classical, Jazz, Pop.
#
# This Z3 model was written by <NAME> (<EMAIL>)
# See also my Z3 page: http://hakank.org/z3/
#
from z3_utils_hakank import *
sol = Solver()
n = 3
min_age = 24
max_age = 26
# variables
Age24 = 24
Age25 = 25
Age26 = 26
Age = [Age24, Age25, Age26];
Names = makeIntVector(sol,"Names", n, min_age,max_age)
[King, Prince, Queen] = Names
Surnames = makeIntVector(sol,"Surnames", n, min_age,max_age)
[Leon, Mark, Rob] = Surnames
Music = makeIntVector(sol,"Music", n, min_age,max_age)
[Classical, Jazz, Pop] = Music
# constraints
sol.add(Distinct(Names))
sol.add(Distinct(Surnames))
sol.add(Distinct(Music))
# Rob is older than Queen who likes classical music.
sol.add(Rob > Queen )
sol.add(Queen == Classical)
# The pop-music fan who is not Prince is not 24.
sol.add(Pop != Prince)
sol.add(Pop != Age24)
# Leon who is not King is 25.
sol.add(Leon != King)
sol.add(Leon == Age25)
# Mark's musical preference is not jazz.
sol.add(Mark != Jazz)
num_solutions = 0
while sol.check() == sat:
num_solutions += 1
mod = sol.model()
print("Names :", [mod.eval(Names[i]) for i in range(n)])
print("Surnames:", [mod.eval(Surnames[i]) for i in range(n)])
print("Music :", [mod.eval(Music[i]) for i in range(n)])
print()
getDifferentSolution(sol,mod,Names,Surnames,Music)
print("num_solutions:", num_solutions)
|
en
| 0.855715
|
#!/usr/bin/python -u # -*- coding: latin-1 -*- # # Music men puzzle in Z3 # # From ## http://groups.google.com/groups?q=FINITE+DOMAINS+With+Logic+Puzzles&hl=en&lr=&ie=UTF-8&c2coff=1&safe=off&selm=1992Jul27.034607.19386#40IRO.UMontreal.CA&rnum=4 # """" # MUSIC MEN # # Three friends like different kinds of music. From the clues given # below, can you identify them, say how old each is, and work out # his musical preference? # # Clues: # 1. Rob is older than Queen, who likes classical music. # 2. The pop-music fan, who is not Prince, is not 24. # 3. Leon, who is not King, is 25. # 4. Mark's musical preference is not jazz. # Knowledge: "this is what we know of the world." # Names : Leon, Mark, Rob. # Surnames : King, Prince, Queen. # Ages : 24, 25, 26. # Music : Classical, Jazz, Pop. # # This Z3 model was written by <NAME> (<EMAIL>) # See also my Z3 page: http://hakank.org/z3/ # # variables # constraints # Rob is older than Queen who likes classical music. # The pop-music fan who is not Prince is not 24. # Leon who is not King is 25. # Mark's musical preference is not jazz.
| 3.090206
| 3
|
networkx/algorithms/connectivity/disjoint_paths.py
|
FrancescoBonacina/networkx
| 10
|
6626427
|
"""Flow based node and edge disjoint paths."""
import networkx as nx
from networkx.exception import NetworkXNoPath
# Define the default maximum flow function to use for the undelying
# maximum flow computations
from networkx.algorithms.flow import edmonds_karp
from networkx.algorithms.flow import preflow_push
from networkx.algorithms.flow import shortest_augmenting_path
default_flow_func = edmonds_karp
# Functions to build auxiliary data structures.
from .utils import build_auxiliary_node_connectivity
from .utils import build_auxiliary_edge_connectivity
from itertools import filterfalse as _filterfalse
__all__ = [
'edge_disjoint_paths',
'node_disjoint_paths',
]
def edge_disjoint_paths(G, s, t, flow_func=None, cutoff=None, auxiliary=None,
residual=None):
"""Returns the edges disjoint paths between source and target.
Edge disjoint paths are paths that do not share any edge. The
number of edge disjoint paths between source and target is equal
to their edge connectivity.
Parameters
----------
G : NetworkX graph
s : node
Source node for the flow.
t : node
Sink node for the flow.
flow_func : function
A function for computing the maximum flow among a pair of nodes.
The function has to accept at least three parameters: a Digraph,
a source node, and a target node. And return a residual network
that follows NetworkX conventions (see :meth:`maximum_flow` for
details). If flow_func is None, the default maximum flow function
(:meth:`edmonds_karp`) is used. The choice of the default function
may change from version to version and should not be relied on.
Default value: None.
cutoff : int
Maximum number of paths to yield. Some of the maximum flow
algorithms, such as :meth:`edmonds_karp` (the default) and
:meth:`shortest_augmenting_path` support the cutoff parameter,
and will terminate when the flow value reaches or exceeds the
cutoff. Other algorithms will ignore this parameter.
Default value: None.
auxiliary : NetworkX DiGraph
Auxiliary digraph to compute flow based edge connectivity. It has
to have a graph attribute called mapping with a dictionary mapping
node names in G and in the auxiliary digraph. If provided
it will be reused instead of recreated. Default value: None.
residual : NetworkX DiGraph
Residual network to compute maximum flow. If provided it will be
reused instead of recreated. Default value: None.
Returns
-------
paths : generator
A generator of edge independent paths.
Raises
------
NetworkXNoPath
If there is no path between source and target.
NetworkXError
If source or target are not in the graph G.
See also
--------
:meth:`node_disjoint_paths`
:meth:`edge_connectivity`
:meth:`maximum_flow`
:meth:`edmonds_karp`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
Examples
--------
We use in this example the platonic icosahedral graph, which has node
edge connectivity 5, thus there are 5 edge disjoint paths between any
pair of nodes.
>>> G = nx.icosahedral_graph()
>>> len(list(nx.edge_disjoint_paths(G, 0, 6)))
5
If you need to compute edge disjoint paths on several pairs of
nodes in the same graph, it is recommended that you reuse the
data structures that NetworkX uses in the computation: the
auxiliary digraph for edge connectivity, and the residual
network for the underlying maximum flow computation.
Example of how to compute edge disjoint paths among all pairs of
nodes of the platonic icosahedral graph reusing the data
structures.
>>> import itertools
>>> # You also have to explicitly import the function for
>>> # building the auxiliary digraph from the connectivity package
>>> from networkx.algorithms.connectivity import (
... build_auxiliary_edge_connectivity)
>>> H = build_auxiliary_edge_connectivity(G)
>>> # And the function for building the residual network from the
>>> # flow package
>>> from networkx.algorithms.flow import build_residual_network
>>> # Note that the auxiliary digraph has an edge attribute named capacity
>>> R = build_residual_network(H, 'capacity')
>>> result = {n: {} for n in G}
>>> # Reuse the auxiliary digraph and the residual network by passing them
>>> # as arguments
>>> for u, v in itertools.combinations(G, 2):
... k = len(list(nx.edge_disjoint_paths(G, u, v, auxiliary=H, residual=R)))
... result[u][v] = k
>>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2))
True
You can also use alternative flow algorithms for computing edge disjoint
paths. For instance, in dense networks the algorithm
:meth:`shortest_augmenting_path` will usually perform better than
the default :meth:`edmonds_karp` which is faster for sparse
networks with highly skewed degree distributions. Alternative flow
functions have to be explicitly imported from the flow package.
>>> from networkx.algorithms.flow import shortest_augmenting_path
>>> len(list(nx.edge_disjoint_paths(G, 0, 6, flow_func=shortest_augmenting_path)))
5
Notes
-----
This is a flow based implementation of edge disjoint paths. We compute
the maximum flow between source and target on an auxiliary directed
network. The saturated edges in the residual network after running the
maximum flow algorithm correspond to edge disjoint paths between source
and target in the original network. This function handles both directed
and undirected graphs, and can use all flow algorithms from NetworkX flow
package.
"""
if s not in G:
raise nx.NetworkXError(f"node {s} not in graph")
if t not in G:
raise nx.NetworkXError(f"node {t} not in graph")
if flow_func is None:
flow_func = default_flow_func
if auxiliary is None:
H = build_auxiliary_edge_connectivity(G)
else:
H = auxiliary
# Maximum possible edge disjoint paths
possible = min(H.out_degree(s), H.in_degree(t))
if not possible:
raise NetworkXNoPath
if cutoff is None:
cutoff = possible
else:
cutoff = min(cutoff, possible)
# Compute maximum flow between source and target. Flow functions in
# NetworkX return a residual network.
kwargs = dict(capacity='capacity', residual=residual, cutoff=cutoff,
value_only=True)
if flow_func is preflow_push:
del kwargs['cutoff']
if flow_func is shortest_augmenting_path:
kwargs['two_phase'] = True
R = flow_func(H, s, t, **kwargs)
if R.graph['flow_value'] == 0:
raise NetworkXNoPath
# Saturated edges in the residual network form the edge disjoint paths
# between source and target
cutset = [(u, v) for u, v, d in R.edges(data=True)
if d['capacity'] == d['flow'] and d['flow'] > 0]
# This is equivalent of what flow.utils.build_flow_dict returns, but
# only for the nodes with saturated edges and without reporting 0 flows.
flow_dict = {n: {} for edge in cutset for n in edge}
for u, v in cutset:
flow_dict[u][v] = 1
# Rebuild the edge disjoint paths from the flow dictionary.
paths_found = 0
for v in list(flow_dict[s]):
if paths_found >= cutoff:
# preflow_push does not support cutoff: we have to
# keep track of the paths founds and stop at cutoff.
break
path = [s]
if v == t:
path.append(v)
yield path
continue
u = v
while u != t:
path.append(u)
try:
u, _ = flow_dict[u].popitem()
except KeyError:
break
else:
path.append(t)
yield path
paths_found += 1
def node_disjoint_paths(G, s, t, flow_func=None, cutoff=None, auxiliary=None,
residual=None):
r"""Computes node disjoint paths between source and target.
Node disjoint paths are paths that only share their first and last
nodes. The number of node independent paths between two nodes is
equal to their local node connectivity.
Parameters
----------
G : NetworkX graph
s : node
Source node.
t : node
Target node.
flow_func : function
A function for computing the maximum flow among a pair of nodes.
The function has to accept at least three parameters: a Digraph,
a source node, and a target node. And return a residual network
that follows NetworkX conventions (see :meth:`maximum_flow` for
details). If flow_func is None, the default maximum flow function
(:meth:`edmonds_karp`) is used. See below for details. The choice
of the default function may change from version to version and
should not be relied on. Default value: None.
cutoff : int
Maximum number of paths to yield. Some of the maximum flow
algorithms, such as :meth:`edmonds_karp` (the default) and
:meth:`shortest_augmenting_path` support the cutoff parameter,
and will terminate when the flow value reaches or exceeds the
cutoff. Other algorithms will ignore this parameter.
Default value: None.
auxiliary : NetworkX DiGraph
Auxiliary digraph to compute flow based node connectivity. It has
to have a graph attribute called mapping with a dictionary mapping
node names in G and in the auxiliary digraph. If provided
it will be reused instead of recreated. Default value: None.
residual : NetworkX DiGraph
Residual network to compute maximum flow. If provided it will be
reused instead of recreated. Default value: None.
Returns
-------
paths : generator
Generator of node disjoint paths.
Raises
------
NetworkXNoPath
If there is no path between source and target.
NetworkXError
If source or target are not in the graph G.
Examples
--------
We use in this example the platonic icosahedral graph, which has node
node connectivity 5, thus there are 5 node disjoint paths between any
pair of non neighbor nodes.
>>> G = nx.icosahedral_graph()
>>> len(list(nx.node_disjoint_paths(G, 0, 6)))
5
If you need to compute node disjoint paths between several pairs of
nodes in the same graph, it is recommended that you reuse the
data structures that NetworkX uses in the computation: the
auxiliary digraph for node connectivity and node cuts, and the
residual network for the underlying maximum flow computation.
Example of how to compute node disjoint paths reusing the data
structures:
>>> # You also have to explicitly import the function for
>>> # building the auxiliary digraph from the connectivity package
>>> from networkx.algorithms.connectivity import (
... build_auxiliary_node_connectivity)
>>> H = build_auxiliary_node_connectivity(G)
>>> # And the function for building the residual network from the
>>> # flow package
>>> from networkx.algorithms.flow import build_residual_network
>>> # Note that the auxiliary digraph has an edge attribute named capacity
>>> R = build_residual_network(H, 'capacity')
>>> # Reuse the auxiliary digraph and the residual network by passing them
>>> # as arguments
>>> len(list(nx.node_disjoint_paths(G, 0, 6, auxiliary=H, residual=R)))
5
You can also use alternative flow algorithms for computing node disjoint
paths. For instance, in dense networks the algorithm
:meth:`shortest_augmenting_path` will usually perform better than
the default :meth:`edmonds_karp` which is faster for sparse
networks with highly skewed degree distributions. Alternative flow
functions have to be explicitly imported from the flow package.
>>> from networkx.algorithms.flow import shortest_augmenting_path
>>> len(list(nx.node_disjoint_paths(G, 0, 6, flow_func=shortest_augmenting_path)))
5
Notes
-----
This is a flow based implementation of node disjoint paths. We compute
the maximum flow between source and target on an auxiliary directed
network. The saturated edges in the residual network after running the
maximum flow algorithm correspond to node disjoint paths between source
and target in the original network. This function handles both directed
and undirected graphs, and can use all flow algorithms from NetworkX flow
package.
See also
--------
:meth:`edge_disjoint_paths`
:meth:`node_connectivity`
:meth:`maximum_flow`
:meth:`edmonds_karp`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
"""
if s not in G:
raise nx.NetworkXError(f"node {s} not in graph")
if t not in G:
raise nx.NetworkXError(f"node {t} not in graph")
if auxiliary is None:
H = build_auxiliary_node_connectivity(G)
else:
H = auxiliary
mapping = H.graph.get('mapping', None)
if mapping is None:
raise nx.NetworkXError('Invalid auxiliary digraph.')
# Maximum possible edge disjoint paths
possible = min(H.out_degree(f'{mapping[s]}B'),
H.in_degree(f'{mapping[t]}A'))
if not possible:
raise NetworkXNoPath
if cutoff is None:
cutoff = possible
else:
cutoff = min(cutoff, possible)
kwargs = dict(flow_func=flow_func, residual=residual, auxiliary=H,
cutoff=cutoff)
# The edge disjoint paths in the auxiliary digraph correspond to the node
# disjoint paths in the original graph.
paths_edges = edge_disjoint_paths(H, f'{mapping[s]}B', f'{mapping[t]}A',
**kwargs)
for path in paths_edges:
# Each node in the original graph maps to two nodes in auxiliary graph
yield list(_unique_everseen(H.nodes[node]['id'] for node in path))
def _unique_everseen(iterable):
# Adapted from https://docs.python.org/3/library/itertools.html examples
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
seen = set()
seen_add = seen.add
for element in _filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
|
"""Flow based node and edge disjoint paths."""
import networkx as nx
from networkx.exception import NetworkXNoPath
# Define the default maximum flow function to use for the undelying
# maximum flow computations
from networkx.algorithms.flow import edmonds_karp
from networkx.algorithms.flow import preflow_push
from networkx.algorithms.flow import shortest_augmenting_path
default_flow_func = edmonds_karp
# Functions to build auxiliary data structures.
from .utils import build_auxiliary_node_connectivity
from .utils import build_auxiliary_edge_connectivity
from itertools import filterfalse as _filterfalse
__all__ = [
'edge_disjoint_paths',
'node_disjoint_paths',
]
def edge_disjoint_paths(G, s, t, flow_func=None, cutoff=None, auxiliary=None,
residual=None):
"""Returns the edges disjoint paths between source and target.
Edge disjoint paths are paths that do not share any edge. The
number of edge disjoint paths between source and target is equal
to their edge connectivity.
Parameters
----------
G : NetworkX graph
s : node
Source node for the flow.
t : node
Sink node for the flow.
flow_func : function
A function for computing the maximum flow among a pair of nodes.
The function has to accept at least three parameters: a Digraph,
a source node, and a target node. And return a residual network
that follows NetworkX conventions (see :meth:`maximum_flow` for
details). If flow_func is None, the default maximum flow function
(:meth:`edmonds_karp`) is used. The choice of the default function
may change from version to version and should not be relied on.
Default value: None.
cutoff : int
Maximum number of paths to yield. Some of the maximum flow
algorithms, such as :meth:`edmonds_karp` (the default) and
:meth:`shortest_augmenting_path` support the cutoff parameter,
and will terminate when the flow value reaches or exceeds the
cutoff. Other algorithms will ignore this parameter.
Default value: None.
auxiliary : NetworkX DiGraph
Auxiliary digraph to compute flow based edge connectivity. It has
to have a graph attribute called mapping with a dictionary mapping
node names in G and in the auxiliary digraph. If provided
it will be reused instead of recreated. Default value: None.
residual : NetworkX DiGraph
Residual network to compute maximum flow. If provided it will be
reused instead of recreated. Default value: None.
Returns
-------
paths : generator
A generator of edge independent paths.
Raises
------
NetworkXNoPath
If there is no path between source and target.
NetworkXError
If source or target are not in the graph G.
See also
--------
:meth:`node_disjoint_paths`
:meth:`edge_connectivity`
:meth:`maximum_flow`
:meth:`edmonds_karp`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
Examples
--------
We use in this example the platonic icosahedral graph, which has node
edge connectivity 5, thus there are 5 edge disjoint paths between any
pair of nodes.
>>> G = nx.icosahedral_graph()
>>> len(list(nx.edge_disjoint_paths(G, 0, 6)))
5
If you need to compute edge disjoint paths on several pairs of
nodes in the same graph, it is recommended that you reuse the
data structures that NetworkX uses in the computation: the
auxiliary digraph for edge connectivity, and the residual
network for the underlying maximum flow computation.
Example of how to compute edge disjoint paths among all pairs of
nodes of the platonic icosahedral graph reusing the data
structures.
>>> import itertools
>>> # You also have to explicitly import the function for
>>> # building the auxiliary digraph from the connectivity package
>>> from networkx.algorithms.connectivity import (
... build_auxiliary_edge_connectivity)
>>> H = build_auxiliary_edge_connectivity(G)
>>> # And the function for building the residual network from the
>>> # flow package
>>> from networkx.algorithms.flow import build_residual_network
>>> # Note that the auxiliary digraph has an edge attribute named capacity
>>> R = build_residual_network(H, 'capacity')
>>> result = {n: {} for n in G}
>>> # Reuse the auxiliary digraph and the residual network by passing them
>>> # as arguments
>>> for u, v in itertools.combinations(G, 2):
... k = len(list(nx.edge_disjoint_paths(G, u, v, auxiliary=H, residual=R)))
... result[u][v] = k
>>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2))
True
You can also use alternative flow algorithms for computing edge disjoint
paths. For instance, in dense networks the algorithm
:meth:`shortest_augmenting_path` will usually perform better than
the default :meth:`edmonds_karp` which is faster for sparse
networks with highly skewed degree distributions. Alternative flow
functions have to be explicitly imported from the flow package.
>>> from networkx.algorithms.flow import shortest_augmenting_path
>>> len(list(nx.edge_disjoint_paths(G, 0, 6, flow_func=shortest_augmenting_path)))
5
Notes
-----
This is a flow based implementation of edge disjoint paths. We compute
the maximum flow between source and target on an auxiliary directed
network. The saturated edges in the residual network after running the
maximum flow algorithm correspond to edge disjoint paths between source
and target in the original network. This function handles both directed
and undirected graphs, and can use all flow algorithms from NetworkX flow
package.
"""
if s not in G:
raise nx.NetworkXError(f"node {s} not in graph")
if t not in G:
raise nx.NetworkXError(f"node {t} not in graph")
if flow_func is None:
flow_func = default_flow_func
if auxiliary is None:
H = build_auxiliary_edge_connectivity(G)
else:
H = auxiliary
# Maximum possible edge disjoint paths
possible = min(H.out_degree(s), H.in_degree(t))
if not possible:
raise NetworkXNoPath
if cutoff is None:
cutoff = possible
else:
cutoff = min(cutoff, possible)
# Compute maximum flow between source and target. Flow functions in
# NetworkX return a residual network.
kwargs = dict(capacity='capacity', residual=residual, cutoff=cutoff,
value_only=True)
if flow_func is preflow_push:
del kwargs['cutoff']
if flow_func is shortest_augmenting_path:
kwargs['two_phase'] = True
R = flow_func(H, s, t, **kwargs)
if R.graph['flow_value'] == 0:
raise NetworkXNoPath
# Saturated edges in the residual network form the edge disjoint paths
# between source and target
cutset = [(u, v) for u, v, d in R.edges(data=True)
if d['capacity'] == d['flow'] and d['flow'] > 0]
# This is equivalent of what flow.utils.build_flow_dict returns, but
# only for the nodes with saturated edges and without reporting 0 flows.
flow_dict = {n: {} for edge in cutset for n in edge}
for u, v in cutset:
flow_dict[u][v] = 1
# Rebuild the edge disjoint paths from the flow dictionary.
paths_found = 0
for v in list(flow_dict[s]):
if paths_found >= cutoff:
# preflow_push does not support cutoff: we have to
# keep track of the paths founds and stop at cutoff.
break
path = [s]
if v == t:
path.append(v)
yield path
continue
u = v
while u != t:
path.append(u)
try:
u, _ = flow_dict[u].popitem()
except KeyError:
break
else:
path.append(t)
yield path
paths_found += 1
def node_disjoint_paths(G, s, t, flow_func=None, cutoff=None, auxiliary=None,
residual=None):
r"""Computes node disjoint paths between source and target.
Node disjoint paths are paths that only share their first and last
nodes. The number of node independent paths between two nodes is
equal to their local node connectivity.
Parameters
----------
G : NetworkX graph
s : node
Source node.
t : node
Target node.
flow_func : function
A function for computing the maximum flow among a pair of nodes.
The function has to accept at least three parameters: a Digraph,
a source node, and a target node. And return a residual network
that follows NetworkX conventions (see :meth:`maximum_flow` for
details). If flow_func is None, the default maximum flow function
(:meth:`edmonds_karp`) is used. See below for details. The choice
of the default function may change from version to version and
should not be relied on. Default value: None.
cutoff : int
Maximum number of paths to yield. Some of the maximum flow
algorithms, such as :meth:`edmonds_karp` (the default) and
:meth:`shortest_augmenting_path` support the cutoff parameter,
and will terminate when the flow value reaches or exceeds the
cutoff. Other algorithms will ignore this parameter.
Default value: None.
auxiliary : NetworkX DiGraph
Auxiliary digraph to compute flow based node connectivity. It has
to have a graph attribute called mapping with a dictionary mapping
node names in G and in the auxiliary digraph. If provided
it will be reused instead of recreated. Default value: None.
residual : NetworkX DiGraph
Residual network to compute maximum flow. If provided it will be
reused instead of recreated. Default value: None.
Returns
-------
paths : generator
Generator of node disjoint paths.
Raises
------
NetworkXNoPath
If there is no path between source and target.
NetworkXError
If source or target are not in the graph G.
Examples
--------
We use in this example the platonic icosahedral graph, which has node
node connectivity 5, thus there are 5 node disjoint paths between any
pair of non neighbor nodes.
>>> G = nx.icosahedral_graph()
>>> len(list(nx.node_disjoint_paths(G, 0, 6)))
5
If you need to compute node disjoint paths between several pairs of
nodes in the same graph, it is recommended that you reuse the
data structures that NetworkX uses in the computation: the
auxiliary digraph for node connectivity and node cuts, and the
residual network for the underlying maximum flow computation.
Example of how to compute node disjoint paths reusing the data
structures:
>>> # You also have to explicitly import the function for
>>> # building the auxiliary digraph from the connectivity package
>>> from networkx.algorithms.connectivity import (
... build_auxiliary_node_connectivity)
>>> H = build_auxiliary_node_connectivity(G)
>>> # And the function for building the residual network from the
>>> # flow package
>>> from networkx.algorithms.flow import build_residual_network
>>> # Note that the auxiliary digraph has an edge attribute named capacity
>>> R = build_residual_network(H, 'capacity')
>>> # Reuse the auxiliary digraph and the residual network by passing them
>>> # as arguments
>>> len(list(nx.node_disjoint_paths(G, 0, 6, auxiliary=H, residual=R)))
5
You can also use alternative flow algorithms for computing node disjoint
paths. For instance, in dense networks the algorithm
:meth:`shortest_augmenting_path` will usually perform better than
the default :meth:`edmonds_karp` which is faster for sparse
networks with highly skewed degree distributions. Alternative flow
functions have to be explicitly imported from the flow package.
>>> from networkx.algorithms.flow import shortest_augmenting_path
>>> len(list(nx.node_disjoint_paths(G, 0, 6, flow_func=shortest_augmenting_path)))
5
Notes
-----
This is a flow based implementation of node disjoint paths. We compute
the maximum flow between source and target on an auxiliary directed
network. The saturated edges in the residual network after running the
maximum flow algorithm correspond to node disjoint paths between source
and target in the original network. This function handles both directed
and undirected graphs, and can use all flow algorithms from NetworkX flow
package.
See also
--------
:meth:`edge_disjoint_paths`
:meth:`node_connectivity`
:meth:`maximum_flow`
:meth:`edmonds_karp`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
"""
if s not in G:
raise nx.NetworkXError(f"node {s} not in graph")
if t not in G:
raise nx.NetworkXError(f"node {t} not in graph")
if auxiliary is None:
H = build_auxiliary_node_connectivity(G)
else:
H = auxiliary
mapping = H.graph.get('mapping', None)
if mapping is None:
raise nx.NetworkXError('Invalid auxiliary digraph.')
# Maximum possible edge disjoint paths
possible = min(H.out_degree(f'{mapping[s]}B'),
H.in_degree(f'{mapping[t]}A'))
if not possible:
raise NetworkXNoPath
if cutoff is None:
cutoff = possible
else:
cutoff = min(cutoff, possible)
kwargs = dict(flow_func=flow_func, residual=residual, auxiliary=H,
cutoff=cutoff)
# The edge disjoint paths in the auxiliary digraph correspond to the node
# disjoint paths in the original graph.
paths_edges = edge_disjoint_paths(H, f'{mapping[s]}B', f'{mapping[t]}A',
**kwargs)
for path in paths_edges:
# Each node in the original graph maps to two nodes in auxiliary graph
yield list(_unique_everseen(H.nodes[node]['id'] for node in path))
def _unique_everseen(iterable):
# Adapted from https://docs.python.org/3/library/itertools.html examples
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
seen = set()
seen_add = seen.add
for element in _filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
|
en
| 0.843497
|
Flow based node and edge disjoint paths. # Define the default maximum flow function to use for the undelying # maximum flow computations # Functions to build auxiliary data structures. Returns the edges disjoint paths between source and target. Edge disjoint paths are paths that do not share any edge. The number of edge disjoint paths between source and target is equal to their edge connectivity. Parameters ---------- G : NetworkX graph s : node Source node for the flow. t : node Sink node for the flow. flow_func : function A function for computing the maximum flow among a pair of nodes. The function has to accept at least three parameters: a Digraph, a source node, and a target node. And return a residual network that follows NetworkX conventions (see :meth:`maximum_flow` for details). If flow_func is None, the default maximum flow function (:meth:`edmonds_karp`) is used. The choice of the default function may change from version to version and should not be relied on. Default value: None. cutoff : int Maximum number of paths to yield. Some of the maximum flow algorithms, such as :meth:`edmonds_karp` (the default) and :meth:`shortest_augmenting_path` support the cutoff parameter, and will terminate when the flow value reaches or exceeds the cutoff. Other algorithms will ignore this parameter. Default value: None. auxiliary : NetworkX DiGraph Auxiliary digraph to compute flow based edge connectivity. It has to have a graph attribute called mapping with a dictionary mapping node names in G and in the auxiliary digraph. If provided it will be reused instead of recreated. Default value: None. residual : NetworkX DiGraph Residual network to compute maximum flow. If provided it will be reused instead of recreated. Default value: None. Returns ------- paths : generator A generator of edge independent paths. Raises ------ NetworkXNoPath If there is no path between source and target. NetworkXError If source or target are not in the graph G. See also -------- :meth:`node_disjoint_paths` :meth:`edge_connectivity` :meth:`maximum_flow` :meth:`edmonds_karp` :meth:`preflow_push` :meth:`shortest_augmenting_path` Examples -------- We use in this example the platonic icosahedral graph, which has node edge connectivity 5, thus there are 5 edge disjoint paths between any pair of nodes. >>> G = nx.icosahedral_graph() >>> len(list(nx.edge_disjoint_paths(G, 0, 6))) 5 If you need to compute edge disjoint paths on several pairs of nodes in the same graph, it is recommended that you reuse the data structures that NetworkX uses in the computation: the auxiliary digraph for edge connectivity, and the residual network for the underlying maximum flow computation. Example of how to compute edge disjoint paths among all pairs of nodes of the platonic icosahedral graph reusing the data structures. >>> import itertools >>> # You also have to explicitly import the function for >>> # building the auxiliary digraph from the connectivity package >>> from networkx.algorithms.connectivity import ( ... build_auxiliary_edge_connectivity) >>> H = build_auxiliary_edge_connectivity(G) >>> # And the function for building the residual network from the >>> # flow package >>> from networkx.algorithms.flow import build_residual_network >>> # Note that the auxiliary digraph has an edge attribute named capacity >>> R = build_residual_network(H, 'capacity') >>> result = {n: {} for n in G} >>> # Reuse the auxiliary digraph and the residual network by passing them >>> # as arguments >>> for u, v in itertools.combinations(G, 2): ... k = len(list(nx.edge_disjoint_paths(G, u, v, auxiliary=H, residual=R))) ... result[u][v] = k >>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2)) True You can also use alternative flow algorithms for computing edge disjoint paths. For instance, in dense networks the algorithm :meth:`shortest_augmenting_path` will usually perform better than the default :meth:`edmonds_karp` which is faster for sparse networks with highly skewed degree distributions. Alternative flow functions have to be explicitly imported from the flow package. >>> from networkx.algorithms.flow import shortest_augmenting_path >>> len(list(nx.edge_disjoint_paths(G, 0, 6, flow_func=shortest_augmenting_path))) 5 Notes ----- This is a flow based implementation of edge disjoint paths. We compute the maximum flow between source and target on an auxiliary directed network. The saturated edges in the residual network after running the maximum flow algorithm correspond to edge disjoint paths between source and target in the original network. This function handles both directed and undirected graphs, and can use all flow algorithms from NetworkX flow package. # Maximum possible edge disjoint paths # Compute maximum flow between source and target. Flow functions in # NetworkX return a residual network. # Saturated edges in the residual network form the edge disjoint paths # between source and target # This is equivalent of what flow.utils.build_flow_dict returns, but # only for the nodes with saturated edges and without reporting 0 flows. # Rebuild the edge disjoint paths from the flow dictionary. # preflow_push does not support cutoff: we have to # keep track of the paths founds and stop at cutoff. Computes node disjoint paths between source and target. Node disjoint paths are paths that only share their first and last nodes. The number of node independent paths between two nodes is equal to their local node connectivity. Parameters ---------- G : NetworkX graph s : node Source node. t : node Target node. flow_func : function A function for computing the maximum flow among a pair of nodes. The function has to accept at least three parameters: a Digraph, a source node, and a target node. And return a residual network that follows NetworkX conventions (see :meth:`maximum_flow` for details). If flow_func is None, the default maximum flow function (:meth:`edmonds_karp`) is used. See below for details. The choice of the default function may change from version to version and should not be relied on. Default value: None. cutoff : int Maximum number of paths to yield. Some of the maximum flow algorithms, such as :meth:`edmonds_karp` (the default) and :meth:`shortest_augmenting_path` support the cutoff parameter, and will terminate when the flow value reaches or exceeds the cutoff. Other algorithms will ignore this parameter. Default value: None. auxiliary : NetworkX DiGraph Auxiliary digraph to compute flow based node connectivity. It has to have a graph attribute called mapping with a dictionary mapping node names in G and in the auxiliary digraph. If provided it will be reused instead of recreated. Default value: None. residual : NetworkX DiGraph Residual network to compute maximum flow. If provided it will be reused instead of recreated. Default value: None. Returns ------- paths : generator Generator of node disjoint paths. Raises ------ NetworkXNoPath If there is no path between source and target. NetworkXError If source or target are not in the graph G. Examples -------- We use in this example the platonic icosahedral graph, which has node node connectivity 5, thus there are 5 node disjoint paths between any pair of non neighbor nodes. >>> G = nx.icosahedral_graph() >>> len(list(nx.node_disjoint_paths(G, 0, 6))) 5 If you need to compute node disjoint paths between several pairs of nodes in the same graph, it is recommended that you reuse the data structures that NetworkX uses in the computation: the auxiliary digraph for node connectivity and node cuts, and the residual network for the underlying maximum flow computation. Example of how to compute node disjoint paths reusing the data structures: >>> # You also have to explicitly import the function for >>> # building the auxiliary digraph from the connectivity package >>> from networkx.algorithms.connectivity import ( ... build_auxiliary_node_connectivity) >>> H = build_auxiliary_node_connectivity(G) >>> # And the function for building the residual network from the >>> # flow package >>> from networkx.algorithms.flow import build_residual_network >>> # Note that the auxiliary digraph has an edge attribute named capacity >>> R = build_residual_network(H, 'capacity') >>> # Reuse the auxiliary digraph and the residual network by passing them >>> # as arguments >>> len(list(nx.node_disjoint_paths(G, 0, 6, auxiliary=H, residual=R))) 5 You can also use alternative flow algorithms for computing node disjoint paths. For instance, in dense networks the algorithm :meth:`shortest_augmenting_path` will usually perform better than the default :meth:`edmonds_karp` which is faster for sparse networks with highly skewed degree distributions. Alternative flow functions have to be explicitly imported from the flow package. >>> from networkx.algorithms.flow import shortest_augmenting_path >>> len(list(nx.node_disjoint_paths(G, 0, 6, flow_func=shortest_augmenting_path))) 5 Notes ----- This is a flow based implementation of node disjoint paths. We compute the maximum flow between source and target on an auxiliary directed network. The saturated edges in the residual network after running the maximum flow algorithm correspond to node disjoint paths between source and target in the original network. This function handles both directed and undirected graphs, and can use all flow algorithms from NetworkX flow package. See also -------- :meth:`edge_disjoint_paths` :meth:`node_connectivity` :meth:`maximum_flow` :meth:`edmonds_karp` :meth:`preflow_push` :meth:`shortest_augmenting_path` # Maximum possible edge disjoint paths # The edge disjoint paths in the auxiliary digraph correspond to the node # disjoint paths in the original graph. # Each node in the original graph maps to two nodes in auxiliary graph # Adapted from https://docs.python.org/3/library/itertools.html examples # unique_everseen('AAAABBBCCDAABBB') --> A B C D
| 3.371452
| 3
|
controllers/msg.py
|
annehaley/eden
| 205
|
6626428
|
# -*- coding: utf-8 -*-
"""
Messaging Module - Controllers
"""
if not settings.has_module(c):
raise HTTP(404, body="Module disabled: %s" % c)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = settings.modules[c].get("name_nice")
response.title = module_name
return {"module_name": module_name,
}
# -----------------------------------------------------------------------------
def basestation():
""" RESTful CRUD controller for Base Stations """
# Pre-processor
def prep(r):
# Function to call for all Site Instance Types
from s3db.org import org_site_prep
org_site_prep(r)
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def compose():
""" Compose a Message which can be sent to a pentity via a number of different communications channels """
return msg.compose()
# =============================================================================
def message():
"""
RESTful CRUD controller for the master message log
"""
tablename = "msg_message"
table = s3db.msg_message
table.instance_type.readable = True
table.instance_type.label = T("Channel")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Message Details"),
title_list = T("Message Log"),
label_list_button = T("View Message Log"),
msg_list_empty = T("No Messages currently in the Message Log"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons
s3.actions += [{"label": s3_str(T("Mark Sender")),
"url": URL(f = "mark_sender",
args = ["[id]"],
),
"_class": "action-btn",
},
]
return output
s3.postp = postp
s3db.configure(tablename,
deletable = False,
editable = False,
insertable = False,
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def contact():
"""
RESTful CRUD controller for the Contact Form
"""
def prep(r):
if not auth.s3_has_role("ADMIN"):
r.method = "create"
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def mark_sender():
"""
Assign priority to the given sender
"""
try:
mid = request.args[0]
except:
raise SyntaxError
mtable = s3db.msg_message
stable = s3db.msg_sender
# @ToDo: Replace 2 queries with Join
srecord = db(mtable.id == mid).select(mtable.from_address,
limitby = (0, 1),
).first()
sender = srecord.from_address
record = db(stable.sender == sender).select(stable.id,
limitby = (0, 1),
).first()
if record:
args = "update"
else:
args = "create"
redirect(URL(f = "sender",
args = args,
vars = {"sender": sender},
))
# =============================================================================
def outbox():
""" View the contents of the Outbox """
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
from s3db.pr import pr_PersonEntityRepresent
tablename = "msg_outbox"
table = s3db[tablename]
table.message_id.label = T("Message")
table.message_id.writable = False
table.message_id.readable = True
table.pe_id.readable = True
table.pe_id.label = T("Recipient")
table.message_id.represent = s3db.msg_message_represent
table.pe_id.represent = pr_PersonEntityRepresent(default_label = "")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Message Details"),
title_list = T("Outbox"),
label_list_button = T("View Outbox"),
label_delete_button = T("Delete Message"),
msg_record_deleted = T("Message deleted"),
msg_list_empty = T("No Messages currently in Outbox"),
)
def postp(r, output):
if isinstance(output, dict):
add_btn = A(T("Compose"),
_class = "action-btn",
_href = URL(f="compose"),
)
output["rheader"] = add_btn
return output
s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def email_outbox():
"""
RESTful CRUD controller for the Email Outbox
- all Outbound Email Messages are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_email"
table = s3db.msg_email
s3.filter = (table.inbound == False)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Email Details"),
title_list = T("Sent Emails"),
label_list_button = T("View Sent Emails"),
label_delete_button = T("Delete Email"),
msg_record_deleted = T("Email deleted"),
msg_list_empty = T("No Emails currently in Outbox"),
)
def postp(r, output):
if isinstance(output, dict):
add_btn = A(T("Compose"),
_class = "action-btn",
_href = URL(f="compose"),
)
output["rheader"] = add_btn
return output
s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
listadd = False,
list_fields = ["date",
"to_address",
"subject",
"body",
],
)
return s3_rest_controller(c, "email")
# -----------------------------------------------------------------------------
def facebook_outbox():
"""
RESTful CRUD controller for the Facebook Outbox
- all Outbound Facebook Messages are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_facebook"
table = s3db.msg_facebook
s3.filter = (table.inbound == False)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Post Details"),
title_list = T("Sent Posts"),
label_list_button = T("View Sent Posts"),
label_delete_button = T("Delete Post"),
msg_record_deleted = T("Post deleted"),
msg_list_empty = T("No Posts currently in Outbox"),
)
#def postp(r, output):
# if isinstance(output, dict):
# add_btn = A(T("Compose"),
# _class="action-btn",
# _href=URL(f="compose")
# )
# output["rheader"] = add_btn
# return output
#s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
listadd = False,
list_fields = ["date",
#"to_address",
"body",
],
)
return s3_rest_controller(c, "facebook")
# -----------------------------------------------------------------------------
def sms_outbox():
"""
RESTful CRUD controller for the SMS Outbox
- all sent SMS are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_sms"
table = s3db.msg_sms
s3.filter = (table.inbound == False)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("SMS Details"),
title_list = T("Sent SMS"),
label_list_button = T("View Sent SMS"),
label_delete_button = T("Delete SMS"),
msg_record_deleted = T("SMS deleted"),
msg_list_empty = T("No SMS currently in Outbox"),
)
def postp(r, output):
if isinstance(output, dict):
add_btn = A(T("Compose"),
_class = "action-btn",
_href = URL(f="compose"),
)
output["rheader"] = add_btn
return output
s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
listadd = False,
list_fields = ["date",
"to_address",
"body",
],
)
return s3_rest_controller(c, "sms")
# -----------------------------------------------------------------------------
def twitter_outbox():
"""
RESTful CRUD controller for the Twitter Outbox
- all sent Tweets are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_twitter"
table = s3db.msg_twitter
s3.filter = (table.inbound == False)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Tweet Details"),
title_list = T("Sent Tweets"),
label_list_button = T("View Sent Tweets"),
label_delete_button = T("Delete Tweet"),
msg_record_deleted = T("Tweet deleted"),
msg_list_empty = T("No Tweets currently in Outbox"),
)
def postp(r, output):
if isinstance(output, dict):
add_btn = A(T("Compose"),
_class = "action-btn",
_href = URL(f="compose"),
)
output["rheader"] = add_btn
return output
s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
listadd = False,
list_fields = ["date",
"to_address",
"body",
],
)
return s3_rest_controller(c, "twitter")
# =============================================================================
def inbox():
"""
RESTful CRUD controller for the Inbox
- all Inbound Messages are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
table = s3db.msg_message
s3.filter = (table.inbound == True)
table.inbound.readable = False
tablename = "msg_message"
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Message Details"),
title_list = T("InBox"),
label_list_button = T("View InBox"),
label_delete_button = T("Delete Message"),
msg_record_deleted = T("Message deleted"),
msg_list_empty = T("No Messages currently in InBox"),
)
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
list_fields = ["date",
"channel_id",
"from_address",
"body",
],
)
return s3_rest_controller(c, "message")
# -----------------------------------------------------------------------------
def email_inbox():
"""
RESTful CRUD controller for the Email Inbox
- all Inbound Email Messages are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
s3.filter = (FS("inbound") == True)
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm("date",
"subject",
"from_address",
"body",
S3SQLInlineComponent(
"attachment",
name = "document_id",
label = T("Attachments"),
fields = ["document_id",],
),
)
tablename = "msg_email"
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Email Details"),
title_list = T("Email InBox"),
label_list_button = T("View Email InBox"),
label_delete_button = T("Delete Email"),
msg_record_deleted = T("Email deleted"),
msg_list_empty = T("No Emails currently in InBox"),
)
s3db.configure(tablename,
crud_form = crud_form,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
list_fields = ["date",
"from_address",
"subject",
"body",
(T("Attachments"), "attachment.document_id"),
],
)
def prep(r):
s3db.msg_email.inbound.readable = False
if r.id:
s3db.msg_attachment.document_id.label = ""
return True
s3.prep = prep
return s3_rest_controller(c, "email")
# =============================================================================
def rss():
"""
RESTful CRUD controller for RSS feed posts
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_rss"
table = s3db.msg_rss
# To represent the description suitably
# If it is an image display an image
#table.description.represent = lambda description: HTML(description)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("RSS Post Details"),
title_list = T("RSS Posts"),
label_list_button = T("View RSS Posts"),
label_delete_button = T("Delete Post"),
msg_record_deleted = T("RSS Post deleted"),
msg_list_empty = T("No Posts available"),
)
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
list_fields = ["date",
"body",
],
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def sms_inbox():
"""
RESTful CRUD controller for the SMS Inbox
- all Inbound SMS Messages go here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_sms"
table = s3db[tablename]
s3.filter = (table.inbound == True)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("SMS Details"),
title_list = T("SMS InBox"),
label_list_button = T("View SMS InBox"),
label_delete_button = T("Delete SMS"),
msg_record_deleted = T("SMS deleted"),
msg_list_empty = T("No SMS currently in InBox"),
)
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
list_fields = ["date",
"from_address",
"body",
],
)
return s3_rest_controller(c, "sms")
# -----------------------------------------------------------------------------
def twitter():
"""
Twitter RESTful Controller
@ToDo: Action Button to update async
"""
s3db.configure("msg_twitter",
editable = False,
insertable = False,
list_fields = ["date",
"from_address",
"to_address",
"body",
],
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def twitter_inbox():
"""
RESTful CRUD controller for the Twitter Inbox
- all Inbound Tweets (Directed Messages) are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_twitter"
table = s3db.msg_twitter
s3.filter = (table.inbound == True)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Tweet Details"),
title_list = T("Twitter InBox"),
label_list_button = T("View Twitter InBox"),
label_delete_button = T("Delete Tweet"),
msg_record_deleted = T("Tweet deleted"),
msg_list_empty = T("No Tweets currently in InBox"),
)
s3db.configure(tablename,
editable = False,
insertable = False,
list_fields = ["date",
"from_address",
"body",
],
)
return s3_rest_controller(c, "twitter")
# =============================================================================
def tropo():
"""
Receive a JSON POST from the Tropo WebAPI
@see: https://www.tropo.com/docs/webapi/newhowitworks.htm
"""
# Stored in modules/tropo.py
from tropo import Tropo, Session
try:
s = Session(request.body.read())
t = Tropo()
# This is their service contacting us, so parse their request
try:
row_id = s.parameters["row_id"]
# This is an Outbound message which we've requested Tropo to send for us
table = s3db.msg_tropo_scratch
query = (table.row_id == row_id)
row = db(query).select(limitby = (0, 1),
).first()
# Send the message
#t.message(say_obj={"say":{"value":row.message}},to=row.recipient,network=row.network)
t.call(to=row.recipient, network=row.network)
t.say(row.message)
# Update status to sent in Outbox
outbox = s3db.msg_outbox
db(outbox.id == row.row_id).update(status = 2)
# @ToDo: Set message log to actioned
#log = s3db.msg_log
#db(log.id == row.message_id).update(actioned=True)
# Clear the Scratchpad
db(query).delete()
return t.RenderJson()
except:
# This is an Inbound message
try:
message = s.initialText
# This is an SMS/IM
# Place it in the InBox
uuid = s.id
recipient = s.to["id"]
try:
fromaddress = s.fromaddress["id"]
except:
# SyntaxError: s.from => invalid syntax (why!?)
fromaddress = ""
# @ToDo: Update to new model
#s3db.msg_log.insert(uuid=uuid, fromaddress=fromaddress,
# recipient=recipient, message=message,
# inbound=True)
# Send the message to the parser
reply = msg.parse_message(message)
t.say([reply])
return t.RenderJson()
except:
# This is a Voice call
# - we can't handle these yet
raise HTTP(501)
except:
# GET request or some random POST
pass
# =============================================================================
@auth.s3_requires_membership(1)
def sms_outbound_gateway():
""" SMS Outbound Gateway selection for the messaging framework """
# CRUD Strings
s3.crud_strings["msg_sms_outbound_gateway"] = Storage(
label_create = T("Create SMS Outbound Gateway"),
title_display = T("SMS Outbound Gateway Details"),
title_list = T("SMS Outbound Gateways"),
title_update = T("Edit SMS Outbound Gateway"),
label_list_button = T("List SMS Outbound Gateways"),
label_delete_button = T("Delete SMS Outbound Gateway"),
msg_record_created = T("SMS Outbound Gateway added"),
msg_record_modified = T("SMS Outbound Gateway updated"),
msg_record_deleted = T("SMS Outbound Gateway deleted"),
msg_list_empty = T("No SMS Outbound Gateways currently registered"),
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def channel():
"""
RESTful CRUD controller for Channels
- unused
"""
return s3_rest_controller()
# -----------------------------------------------------------------------------
def email_channel():
"""
RESTful CRUD controller for Inbound Email channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_email_channel"
table = s3db[tablename]
table.server.label = T("Server")
table.protocol.label = T("Protocol")
table.use_ssl.label = "SSL"
table.port.label = T("Port")
table.username.label = T("Username")
table.password.label = T("Password")
table.delete_from_server.label = T("Delete from Server?")
table.port.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Port"),
T("For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP)."),
),
)
table.delete_from_server.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Delete"),
T("If this is set to True then mails will be deleted from the server after downloading."),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Email Settings"),
title_list = T("Email Accounts"),
label_create = T("Create Email Account"),
title_update = T("Edit Email Settings"),
label_list_button = T("View Email Accounts"),
msg_record_created = T("Account added"),
msg_record_deleted = T("Email Account deleted"),
msg_list_empty = T("No Accounts currently defined"),
msg_record_modified = T("Email Settings updated"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args = ["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def facebook_channel():
"""
RESTful CRUD controller for Facebook channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_facebook_channel"
table = s3db[tablename]
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Facebook Settings"),
title_list = T("Facebook Accounts"),
label_create = T("Add Facebook Account"),
title_update = T("Edit Facebook Settings"),
label_list_button = T("View Facebook Accounts"),
msg_record_created = T("Account added"),
msg_record_deleted = T("Facebook Account deleted"),
msg_list_empty = T("No Accounts currently defined"),
msg_record_modified = T("Facebook Settings updated"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
#if not s3task._is_alive():
# # No Scheduler Running
# s3.actions += [{"label": s3_str(T("Poll")),
# "restrict": restrict_d),
# "url": URL(args = ["[id]", "poll"]),
# "_class": "action-btn",
# }
# ]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def mcommons_channel():
"""
RESTful CRUD controller for Mobile Commons SMS Channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_mcommons_channel"
table = s3db[tablename]
table.name.label = T("Account Name")
table.name.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Account Name"),
T("Name for your Mobile Commons Account"),
),
)
table.campaign_id.label = T("Campaign ID")
table.url.label = T("URL")
table.url.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("URL"),
T("URL for the Mobile Commons API"),
),
)
table.username.label = T("Username")
table.password.label = T("Password")
table.timestmp.label = T("Last Downloaded")
table.timestmp.writable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Mobile Commons Setting Details"),
title_list = T("Mobile Commons Settings"),
label_create = T("Add Mobile Commons Settings"),
title_update = T("Edit Mobile Commons Settings"),
label_list_button = T("View Mobile Commons Settings"),
msg_record_created = T("Mobile Commons Setting added"),
msg_record_deleted = T("Mobile Commons Setting deleted"),
msg_list_empty = T("No Mobile Commons Settings currently defined"),
msg_record_modified = T("Mobile Commons settings updated"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def gcm_channel():
"""
RESTful CRUD controller for Google Cloud Messaging Channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_gcm_channel"
table = s3db[tablename]
table.name.label = T("Account Name")
table.name.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Account Label"),
T("Label for GCM Account"),
),
)
table.api_key.label = T("API KEY")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Google Cloud Messaging Setting Details"),
title_list = T("Google Cloud Messaging Settings"),
label_create = T("Add Google Cloud Messaging Settings"),
title_update = T("Edit Google Cloud Messaging Settings"),
label_list_button = T("View Google Cloud Messaging Settings"),
msg_record_created = T("Google Cloud Messaging Setting added"),
msg_record_deleted = T("Google Cloud Messaging Setting deleted"),
msg_list_empty = T("No Google Cloud Messaging Settings currently defined"),
msg_record_modified = T("Google Cloud Messaging settings updated"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted != True)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
#if not s3task._is_alive():
# No Scheduler Running
# s3.actions += [{"label": s3_str(T("Poll")),
# "restrict": restrict_d,
# "url": URL(args = ["[id]", "poll"]),
# "_class": "action-btn",
# },
# ]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def rss_channel():
"""
RESTful CRUD controller for RSS channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_rss_channel"
table = s3db[tablename]
table.name.label = T("Name")
table.description.label = T("Description")
table.url.label = T("URL/Link")
table.url.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("URL"),
T("Link for the RSS Feed."),
),
)
table.enabled.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Subscriptions Status"),
T("Are you susbscribed?"),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("RSS Channel Details"),
title_list = T("RSS Channels"),
label_create = T("Add RSS Channel"),
title_update = T("Edit RSS Channel"),
label_list_button = T("View RSS Channels"),
msg_record_created = T("Channel added"),
msg_record_deleted = T("RSS Channel deleted"),
msg_list_empty = T("No RSS Channels currently defined"),
msg_record_modified = T("RSS Channel updated"),
)
def status_represent(v):
try:
v = int(v)
except:
# Text
return v
return "There have been no new entries for %s requests" % v
s3db.msg_channel_status.status.represent = status_represent
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Subscribe")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Unsubscribe")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def twilio_channel():
"""
RESTful CRUD controller for Twilio SMS channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_twilio_channel"
table = s3db[tablename]
table.account_name.label = T("Account Name")
table.account_name.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Account Name"),
T("Identifier Name for your Twilio Account."),
),
)
table.url.label = T("URL")
table.url.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("URL"),
T("URL for the twilio API."),
),
)
table.account_sid.label = "Account SID"
table.auth_token.label = T("AUTH TOKEN")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twilio Channel Details"),
title_list = T("Twilio Channels"),
label_create = T("Add Twilio Channel"),
title_update = T("Edit Twilio Channel"),
label_list_button = T("View Twilio Channels"),
msg_record_created = T("Twilio Channel added"),
msg_record_deleted = T("Twilio Channel deleted"),
msg_record_modified = T("Twilio Channel updated"),
msg_list_empty = T("No Twilio Channels currently defined"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def sms_modem_channel():
"""
RESTful CRUD controller for modem channels
- appears in the administration menu
Multiple Modems can be configured to receive Inbound Messages
"""
try:
import serial
except ImportError:
session.error = T("Python Serial module not available within the running Python - this needs installing to activate the Modem")
redirect(URL(c="admin", f="index"))
tablename = "%s_%s" % (c, f)
table = s3db[tablename]
table.modem_port.label = T("Port")
table.modem_baud.label = T("Baud")
table.enabled.label = T("Enabled")
table.modem_port.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Port"),
T("The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows"),
),
)
table.modem_baud.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Baud"),
T("Baud rate to use for your modem - The default is safe for most cases"),
),
)
table.enabled.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Enabled"),
T("Unselect to disable the modem"),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
label_create = T("Add Modem Channel"),
title_display = T("Modem Channel Details"),
title_list = T("Modem Channels"),
title_update = T("Edit Modem Channel"),
label_list_button = T("View Modem Channels"),
msg_record_created = T("Modem Channel added"),
msg_record_modified = T("Modem Channel updated"),
msg_record_deleted = T("Modem Channel deleted"),
msg_list_empty = T("No Modem Channels currently defined"),
)
return s3_rest_controller()
#------------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def sms_smtp_channel():
"""
RESTful CRUD controller for SMTP to SMS Outbound channels
- appears in the administration menu
"""
tablename = "%s_%s" % (c, f)
table = s3db[tablename]
table.address.label = T("Address")
table.subject.label = T("Subject")
table.enabled.label = T("Enabled")
table.address.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Address"),
T("Email Address to which to send SMS messages. Assumes sending to phonenumber@address"),
),
)
table.subject.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Subject"),
T("Optional Subject to put into Email - can be used as a Security Password by the service provider"),
),
)
table.enabled.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Enabled"),
T("Unselect to disable this SMTP service"),
),
)
# CRUD Strings
s3.crud_strings["msg_sms_outbound_gateway"] = Storage(
label_create=T("Create SMTP to SMS Channel"),
title_display=T("SMTP to SMS Channel Details"),
title_list=T("SMTP to SMS Channels"),
title_update=T("Edit SMTP to SMS Channel"),
label_list_button=T("List SMTP to SMS Channels"),
label_delete_button=T("Delete SMTP to SMS Channel"),
msg_record_created=T("SMTP to SMS Channel added"),
msg_record_modified=T("SMTP to SMS Channel updated"),
msg_record_deleted=T("SMTP to SMS Channel deleted"),
msg_list_empty=T("No SMTP to SMS Channels currently registered"),
)
s3db.configure(tablename,
update_next = URL(args = [1, "update"]),
)
return s3_rest_controller()
#------------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def sms_webapi_channel():
"""
RESTful CRUD controller for Web API channels
- appears in the administration menu
"""
tablename = "%s_%s" % (c, f)
table = s3db[tablename]
table.url.label = T("URL")
table.message_variable.label = T("Message variable")
table.to_variable.label = T("To variable")
table.username.label = T("Username")
table.password.label = T("Password")
table.enabled.label = T("Enabled")
table.url.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("URL"),
T("The URL of your web gateway without the POST parameters"),
),
)
table.parameters.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Parameters"),
T("The POST variables other than the ones containing the message and the phone number"),
),
)
table.message_variable.comment = DIV(_class = "tooltip",
_title="%s|%s" % (T("Message Variable"),
T("The POST variable on the URL used for sending messages"),
),
)
table.to_variable.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("To variable"),
T("The POST variable containing the phone number"),
),
)
table.username.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Username"),
T("If the service requries HTTP BASIC Auth (e.g. Mobile Commons)"),
),
)
table.password.comment = DIV(_class = "tooltip",
_title="%s|%s" % (T("Password"),
T("If the service requries HTTP BASIC Auth (e.g. Mobile Commons)"),
),
)
table.enabled.comment = DIV(_class = "tooltip",
_title="%s|%s" % (T("Enabled"),
T("Unselect to disable this API service"),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
label_create = T("Create Web API Channel"),
title_display = T("Web API Channel Details"),
title_list = T("Web API Channels"),
title_update = T("Edit Web API Channel"),
label_list_button = T("List Web API Channels"),
label_delete_button = T("Delete Web API Channel"),
msg_record_created = T("Web API Channel added"),
msg_record_modified = T("Web API Channel updated"),
msg_record_deleted = T("Web API Channel deleted"),
msg_list_empty = T("No Web API Channels currently registered"),
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def tropo_channel():
"""
RESTful CRUD controller for Tropo channels
- appears in the administration menu
"""
tablename = "msg_tropo_channel"
table = s3db[tablename]
table.token_messaging.label = T("Tropo Messaging Token")
table.token_messaging.comment = DIV(DIV(_class = "stickytip",
_title = "%s|%s" % (T("Tropo Messaging Token"),
T("The token associated with this application on") + " <a href='https://www.tropo.com/docs/scripting/troposessionapi.htm' target=_blank>Tropo.com</a>"),
),
)
#table.token_voice.label = T("Tropo Voice Token")
#table.token_voice.comment = DIV(DIV(_class="stickytip",_title=T("Tropo Voice Token") + "|" + T("The token associated with this application on") + " <a href='https://www.tropo.com/docs/scripting/troposessionapi.htm' target=_blank>Tropo.com</a>"))
# CRUD Strings
s3.crud_strings[tablename] = Storage(
label_create = T("Create Tropo Channel"),
title_display = T("Tropo Channel Details"),
title_list = T("Tropo Channels"),
title_update = T("Edit Tropo Channel"),
label_list_button = T("List Tropo Channels"),
label_delete_button = T("Delete Tropo Channel"),
msg_record_created = T("Tropo Channel added"),
msg_record_modified = T("Tropo Channel updated"),
msg_record_deleted = T("Tropo Channel deleted"),
msg_list_empty = T("No Tropo Channels currently registered"),
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def twitter_channel():
"""
RESTful CRUD controller for Twitter channels
- appears in the administration menu
Only 1 of these normally in existence
@ToDo: Don't enforce
"""
#try:
# import tweepy
#except:
# session.error = T("tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!")
# redirect(URL(c="admin", f="index"))
tablename = "%s_%s" % (c, f)
table = s3db[tablename]
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twitter account Details"),
title_list = T("Twitter accounts"),
label_create = T("Add Twitter account"),
title_update = T("Edit Twitter account"),
label_list_button = T("View Twitter accounts"),
msg_record_created = T("Twitter account added"),
msg_record_deleted = T("Twitter account deleted"),
msg_record_modified = T("Twitter account updated"),
msg_list_empty = T("No Twitter accounts currently defined"),
)
def prep(r):
oauth_consumer_key = settings.msg.twitter_oauth_consumer_key
oauth_consumer_secret = settings.msg.twitter_oauth_consumer_secret
if not (oauth_consumer_key and oauth_consumer_secret):
session.error = T("You should edit Twitter settings in models/000_config.py")
return True
oauth = tweepy.OAuthHandler(oauth_consumer_key,
oauth_consumer_secret)
if r.http == "GET" and r.method in ("create", "update"):
# We're showing the form
_s3 = session.s3
try:
_s3.twitter_oauth_url = oauth.get_authorization_url()
_s3.twitter_request_key = oauth.request_token.key
_s3.twitter_request_secret = oauth.request_token.secret
except tweepy.TweepError:
session.error = T("Problem connecting to twitter.com - please refresh")
return True
#table.pin.readable = True
#table.pin.label = T("PIN number from Twitter (leave empty to detach account)")
#table.pin.value = ""
table.twitter_account.label = T("Current Twitter account")
return True
else:
# Not showing form, no need for pin
#table.pin.readable = False
#table.pin.label = T("PIN") # won't be seen
#table.pin.value = "" # but let's be on the safe side
pass
return True
#s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
#if isinstance(output, dict):
# if r.http == "GET" and r.method in ("create", "update"):
# rheader = A(T("Collect PIN from Twitter"),
# _href = session.s3.twitter_oauth_url,
# _target = "_blank")
# output["rheader"] = rheader
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def inject_search_after_save(output):
"""
Inject a Search After Save checkbox
in the Twitter Search Query Form
"""
if "form" in output:
id = "search_after_save"
label = LABEL("%s:" % T("Search After Save?"),
_for = "msg_twitter_search",
)
widget = INPUT(_name = "search_after_save",
_type = "checkbox",
value = "on",
_id = id,
_class = "boolean",
)
comment = ""
if s3_formstyle == "bootstrap":
_controls = DIV(widget,
comment,
_class = "controls",
)
row = DIV(label,
_controls,
_class = "control-group",
_id = "%s__row" % id,
)
elif callable(s3_formstyle):
row = s3_formstyle(id, label, widget, comment)
else:
# Unsupported
raise
output["form"][0][-2].append(row)
# -----------------------------------------------------------------------------
def action_after_save(form):
"""
Schedules Twitter query search immediately after save
depending on flag
"""
if request.post_vars.get("search_after_save"):
s3task.run_async("msg_twitter_search", args = [form.vars.id])
session.information = T("The search results should appear shortly - refresh to see them")
# -----------------------------------------------------------------------------
def twitter_search():
"""
RESTful CRUD controller to add keywords
for Twitter Search
"""
tablename = "msg_twitter_search"
table = s3db[tablename]
table.is_processed.writable = False
table.is_searched.writable = False
table.is_processed.readable = False
table.is_searched.readable = False
# Tweak languages to those supported by Twitter
try:
import tweepy
except:
session.error = T("tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!")
redirect(URL(c="msg", f="index"))
twitter_settings = S3Msg.get_twitter_api()
supported_languages = ['fr', 'en', 'ar', 'ja', 'es', 'de', 'it', 'id', 'pt', 'ko', 'tr', 'ru', 'nl', 'fil',
'msa', 'zh-tw', 'zh-cn', 'hi', 'no', 'sv', 'fi', 'da', 'pl', 'hu', 'fa', 'he', 'ur', 'th']
if twitter_settings:
twitter_api = twitter_settings[0]
try:
supported_languages = [str(x["code"]) for x in twitter_api.supported_languages()]
except (tweepy.TweepError, AttributeError):
# List according to Twitter 1.1 API https://dev.twitter.com/docs/api/1.1/get/help/languages
pass
substitute_list = {"en-gb": "en",
"pt-br": "pt"}
new_langs = []
lang_default = current.response.s3.language
langs = set(settings.get_L10n_languages().keys())
for l in langs:
if l in supported_languages:
new_langs.append(l)
else:
supported_substitute = substitute_list.get(l)
if supported_substitute:
if lang_default == l:
lang_default = supported_substitute
if supported_substitute not in langs:
new_langs.append(supported_substitute)
else:
if lang_default == l:
lang_default = 'en'
langs = new_langs
table.lang.requires = IS_IN_SET(langs)
table.lang.default = lang_default
comment = "Add the keywords separated by single spaces."
table.keywords.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Keywords"),
T(comment),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twitter Search Queries"),
title_list = T("Twitter Search Queries"),
label_create = T("Add Twitter Search Query"),
title_update = T("Edit Twitter Search Query"),
label_list_button = T("View Queries"),
msg_record_created = T("Query added"),
msg_record_deleted = T("Query deleted"),
msg_list_empty = T("No Query currently defined"),
msg_record_modified = T("Query updated"),
)
if request.post_vars.get("search_after_save"):
url_after_save = URL(f="twitter_result")
else:
url_after_save = None
s3db.configure(tablename,
create_next = url_after_save,
create_onaccept = action_after_save,
deletable = True,
listadd = True,
)
def prep(r):
if r.interactive:
table = s3db.msg_twitter_channel
if not db(table.id > 0).select(table.id,
limitby = (0, 1),
).first():
session.error = T("Need to configure Twitter Authentication")
redirect(URL(f = "twitter_channel"))
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons
rtable = r.table
query = (rtable.deleted == False) & \
(rtable.is_searched == False)
records = db(query).select(rtable.id)
restrict_s = [str(record.id) for record in records]
query = (rtable.deleted == False) & \
(rtable.is_processed == False)
records = db(query).select(rtable.id)
restrict_k = [str(record.id) for record in records]
# @ToDo: Make these S3Methods rather than additional controllers
s3.actions += [{"label": s3_str(T("Search")),
"restrict": restrict_s,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
{"label": s3_str(T("Analyze with KeyGraph")),
"restrict": restrict_k,
"url": URL(args = ["[id]", "keygraph"]),
"_class": "action-btn",
},
]
inject_search_after_save(output)
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def twitter_result():
"""
RESTful CRUD controller for Twitter Search Results.
"""
tablename = "msg_twitter_result"
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twitter Search Results"),
title_list = T("Twitter Search Results"),
label_list_button = T("View Tweets"),
msg_record_deleted = T("Tweet deleted"),
msg_list_empty = T("No Tweets Available."),
)
from s3.s3filter import S3DateFilter, S3TextFilter
filter_widgets = [
S3DateFilter("date",
label = T("Tweeted on"),
hide_time = True,
_class = "date-filter-class",
comment = T("Filter Tweets by the date they were tweeted on"),
),
S3TextFilter("from_address",
label = T("Tweeted by"),
_class = "tweeter-filter-class",
comment = T("Filter Tweets by who tweeted them"),
)
]
report_fields = ["search_id",
"date",
"lang",
]
s3db.configure(tablename,
deletable = False,
editable = False,
insertable = False,
filter_widgets = filter_widgets,
report_options = {"rows": report_fields,
"cols": report_fields,
"fact": report_fields,
"defaults": {"rows": "search_id",
"cols": "lang",
},
},
)
def postp(r, output):
if r.id or r.method in ("read", "display"):
# Display the Tweet as an Embedded tweet
record = output["item"].record
# Tweet link
twitter_url = "https://twitter.com/%s/statuses/%s" % (record.from_address,
record.tweet_id,
)
script_url = "https://platform.twitter.com/widgets.js"
# Themeable Throbber
throbber = DIV(_class = "s3-twitter-throbber",
)
# Display throbber while Tweet loads
tweet_container = DIV(throbber,
_class = "s3-twitter-container",
)
tweet_user = TAG[""](A(_href = twitter_url,
_style = "display: none"),
)
# Configure Tweet display
attributes = {"_width": "350px",
"_data-conversation": "none",
"_class": "twitter-tweet",
"lang": record.lang,
}
tweet = TAG["blockquote"](tweet_container,
tweet_user,
SCRIPT(_src = script_url,
_charset = "utf-8"),
**attributes
)
# Insert tweet
output["item"] = tweet
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def sender():
"""
RESTful CRUD controller for whitelisting senders.
User can assign priority to senders.
"""
tablename = "msg_sender"
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Whitelisted Senders"),
title_list = T("Whitelisted Senders"),
label_create = T("Whitelist a Sender"),
title_update = T("Edit Sender Priority"),
label_list_button = T("View Sender Priority"),
msg_record_created = T("Sender Whitelisted"),
msg_record_deleted = T("Sender deleted"),
msg_list_empty = T("No Senders Whitelisted"),
msg_record_modified = T("Sender Priority updated"),
)
s3db.configure(tablename, listadd=True)
def prep(r):
if r.method == "create":
dsender = request.vars['sender']
dpriority = request.vars['priority']
r.table.sender.default = dsender
r.table.priority.default = dpriority
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def keyword():
""" REST Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def parser():
"""
RESTful CRUD controller for Parsers
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
def prep(r):
if r.interactive:
# CRUD Strings
s3.crud_strings["msg_parser"] = Storage(
title_display = T("Parser Connection Details"),
title_list = T("Parser Connections"),
label_create = T("Connect Parser"),
title_update = T("Edit Parser Connection"),
label_list_button = T("View Parser Connections"),
msg_record_created = T("Parser connected"),
msg_record_deleted = T("Parser connection removed"),
msg_record_modified = T("Parser connection updated"),
msg_list_empty = T("No Parsers currently connected"),
)
import inspect
import sys
from s3 import IS_ONE_OF, S3Represent
template = settings.get_msg_parser()
module_name = "applications.%s.modules.templates.%s.parser" % \
(appname, template)
__import__(module_name)
mymodule = sys.modules[module_name]
S3Parser = mymodule.S3Parser()
# Dynamic lookup of the parsing functions in S3Parser class.
parsers = inspect.getmembers(S3Parser, \
predicate=inspect.isfunction)
parse_opts = []
pappend = parse_opts.append
for p in parsers:
p = p[0]
# Filter out helper functions
if not p.startswith("_"):
pappend(p)
table = r.table
table.channel_id.requires = IS_ONE_OF(db, "msg_channel.channel_id",
S3Represent(lookup = "msg_channel"),
sort = True,
)
table.function_name.requires = IS_IN_SET(parse_opts,
zero = None)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Parse")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "parse"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# =============================================================================
# The following functions hook into the pr functions:
#
def group():
""" RESTful CRUD controller """
if auth.is_logged_in() or auth.basic():
pass
else:
redirect(URL(c="default", f="user",
args = "login",
vars = {"_next":URL(c="msg", f="group")},
))
table = s3db.pr_group
# Hide unnecessary fields
table.description.readable = table.description.writable = False
# Do not show system groups
s3.filter = (table.system == False)
return s3_rest_controller("pr", "group",
rheader = s3db.pr_rheader,
)
# -----------------------------------------------------------------------------
def group_membership():
""" RESTful CRUD controller """
if auth.is_logged_in() or auth.basic():
pass
else:
redirect(URL(c="default", f="user",
args = "login",
vars = {"_next": URL(c="msg", f="group_membership")},
))
table = s3db.pr_group_membership
# Hide unnecessary fields
table.comments.readable = table.comments.writable = False
table.group_head.readable = table.group_head.writable = False
return s3_rest_controller("pr", f)
# -----------------------------------------------------------------------------
def contacts():
"""
Allow the user to add, update and delete their contacts
- seems to be unused (was called 'contact' & was broken)
"""
table = s3db.pr_contact
#ptable = s3db.pr_person
if auth.is_logged_in() or auth.basic():
s3.filter = (table.pe_id == auth.user.pe_id)
else:
redirect(URL(c="default", f="user", args="login",
vars={"_next": URL(c="msg", f="contact")}))
# These fields will be populated automatically
table.name.writable = table.name.readable = False
table.pe_id.writable = table.pe_id.readable = False
table.person_name.writable = table.person_name.readable = False
table.id.writable = False
#table.id.readable = False
def msg_contact_onvalidation(form):
# Add the person id to the record
if auth.user:
form.vars.pe_id = auth.user.pe_id
s3db.configure(table._tablename,
onvalidation = msg_contact_onvalidation)
def prep(r):
# Restrict update and delete access to contacts not owned by the user
if r.id :
pe_id = r.record.pe_id
if auth.user and auth.user.pe_id == pe_id:
return True
else:
session.error = T("Access denied")
return {"bypass": True, "output": redirect(URL(r=request))}
else:
return True
s3.prep = prep
response.menu_options = []
return s3_rest_controller("pr", "contact")
# -----------------------------------------------------------------------------
def search():
"""
Do a search of groups which match a type
- used for auto-completion
"""
if not (auth.is_logged_in() or auth.basic()):
# Not allowed
return
# JQuery UI Autocomplete uses 'term' instead of 'value'
# (old JQuery Autocomplete uses 'q' instead of 'value')
value = request.vars.term or request.vars.q
if not value:
return
# Call the search function
type = get_vars.get("type", None)
if type:
items = person_search(value, type)
else:
items = person_search(value)
# Encode in JSON
item = json.dumps(items)
response.headers["Content-Type"] = "application/json"
return item
# -----------------------------------------------------------------------------
def recipient_represent(id, default_label=""):
""" Simplified output as-compared to pr_pentity_represent """
output = ""
table = s3db.pr_pentity
pe = db(table.pe_id == id).select(table.instance_type,
limitby = (0, 1),
).first()
if not pe:
return output
instance_type = pe.instance_type
table = db.get(instance_type, None)
if not table:
return output
if instance_type == "pr_person":
person = db(table.pe_id == id).select(table.first_name,
table.middle_name,
table.last_name,
limitby = (0, 1),
).first()
if person:
from s3 import s3_fullname
output = s3_fullname(person)
elif instance_type == "pr_group":
group = db(table.pe_id == id).select(table.name,
limitby = (0, 1),
).first()
if group:
output = group.name
return output
# -----------------------------------------------------------------------------
def person_search(value, type=None):
""" Search for People & Groups which match a search term """
# Shortcuts
groups = s3db.pr_group
persons = s3db.pr_person
items = []
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = value.lower()
if type:
represent = recipient_represent
else:
represent = s3db.pr_pentity_represent
if type == "pr_group" or not type:
# Check Groups
query = (groups["name"].lower().like("%" + value + "%")) & (groups.deleted == False)
rows = db(query).select(groups.pe_id)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
if type == "pr_person" or not type:
# Check Persons
deleted = (persons.deleted == False)
# First name
query = (persons["first_name"].lower().like("%" + value + "%")) & deleted
rows = db(query).select(persons.pe_id, cache=s3db.cache)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
# Middle name
query = (persons["middle_name"].lower().like("%" + value + "%")) & deleted
rows = db(query).select(persons.pe_id, cache=s3db.cache)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
# Last name
query = (persons["last_name"].lower().like("%" + value + "%")) & deleted
rows = db(query).select(persons.pe_id, cache=s3db.cache)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
return items
# -----------------------------------------------------------------------------
def subscription():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
# Send Outbound Messages (was for being called via cron, now useful for debugging)
# -----------------------------------------------------------------------------
def process_email_outbox():
""" Send Pending Email Messages """
msg.process_outbox(contact_method = "EMAIL")
# -----------------------------------------------------------------------------
def process_sms_outbox():
""" Send Pending SMS Messages """
msg.process_outbox(contact_method = "SMS")
# -----------------------------------------------------------------------------
def process_twitter_outbox():
""" Send Pending Twitter Messages """
msg.process_outbox(contact_method = "TWITTER")
# =============================================================================
# Enabled only for testing:
#
@auth.s3_requires_membership(1)
def facebook_post():
""" Post to Facebook """
title = T("Post to Facebook")
# Test the formstyle
formstyle = s3.crud.formstyle
row = formstyle("test", "test", "test", "test")
if isinstance(row, tuple):
# Formstyle with separate row for label (e.g. default Eden formstyle)
tuple_rows = True
else:
# Formstyle with just a single row (e.g. Bootstrap, Foundation or DRRPP)
tuple_rows = False
form_rows = []
comment = ""
_id = "channel_id"
label = LABEL("%s:" % T("Channel"))
table = s3db.msg_facebook_channel
query = (table.deleted == False) & \
(table.enabled == True)
rows = db(query).select(table.channel_id, table.name)
options = [OPTION(row.name, _value=row.channel_id) for row in rows]
channel_select = SELECT(_name = "channel_id",
_id = _id,
*options
)
widget = channel_select
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "post"
label = LABEL("%s:" % T("Contents"))
widget = TEXTAREA(_name = "post",
)
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "submit"
label = ""
widget = INPUT(_type="submit", _value=T("Post"))
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
if tuple_rows:
# Assume TRs
form = FORM(TABLE(*form_rows))
else:
form = FORM(*form_rows)
if form.accepts(request.vars, session):
form_vars = form.vars
channel_id = form_vars.get("channel_id")
post = form_vars.get("post")
if channel_id and post:
msg.post_to_facebook(post, channel_id)
output = {"form": form,
"title": title,
}
return output
# =============================================================================
# Enabled only for testing:
#
@auth.s3_requires_membership(1)
def twitter_post():
""" Post to Twitter """
title = T("Post to Twitter")
# Test the formstyle
formstyle = s3.crud.formstyle
row = formstyle("test", "test", "test", "test")
if isinstance(row, tuple):
# Formstyle with separate row for label (e.g. default Eden formstyle)
tuple_rows = True
else:
# Formstyle with just a single row (e.g. Bootstrap, Foundation or DRRPP)
tuple_rows = False
form_rows = []
comment = ""
_id = "channel_id"
label = LABEL("%s:" % T("Channel"))
table = s3db.msg_twitter_channel
query = (table.deleted == False) & \
(table.enabled == True)
rows = db(query).select(table.channel_id, table.name)
options = [OPTION(row.name, _value=row.channel_id) for row in rows]
channel_select = SELECT(_name = "channel_id",
_id = _id,
*options
)
widget = channel_select
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "post"
label = LABEL("%s:" % T("Contents"))
widget = TEXTAREA(_name = "post",
)
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "submit"
label = ""
widget = INPUT(_type="submit", _value=T("Post"))
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
if tuple_rows:
# Assume TRs
form = FORM(TABLE(*form_rows))
else:
form = FORM(*form_rows)
if form.accepts(request.vars, session):
form_vars = form.vars
channel_id = form_vars.get("channel_id")
post = form_vars.get("post")
if channel_id and post:
msg.send_tweet(post)
output = {"form": form,
"title": title,
}
return output
# =============================================================================
# Enabled only for testing:
#
@auth.s3_requires_membership(1)
def tag():
""" RESTful CRUD controller """
return s3_rest_controller()
# =============================================================================
# Enabled only for testing:
#
def readKeyGraph(queryID):
""" """
import os
curpath = os.getcwd()
f = open("%s.txt" % queryID, "r")
topics = int(next(f))
nodelabel = {}
E = []
nodetopic = {}
for x in range(0, topics):
thisnodes = []
nodes = int(next(f).split("KEYGRAPH_NODES:")[1])
for y in range(0, nodes):
s = next(f)
nodeid = s.split(":")[0]
nodetopic[str(nodeid)] = x
l1 = s.split(":")[1]
l2 = s.split(":")[2]
try:
nodelabel[str(nodeid)] = unicode(l2.strip())
except:
pass
edges = int(next(f).split("KEYGRAPH_EDGES:")[1])
edges = edges / 2
for y in range(0,edges):
s = next(f)
n1 = s.split(" ")[0].strip()
n2 = s.split(" ")[1].strip()
if (n1 in nodelabel.keys()) and (n2 in nodelabel.keys()):
E.append((str(n1), str(n2)))
next(f)
next(f)
"""
for x in range(0,len(E)):
lx = list(E[x])
lx.append((nodetopic[E[x][0]] - nodetopic[E[x][1]] + 3)*100)
E[x] = tuple(lx)
"""
#import networkx as nx
from igraph import Graph, write_svg
#g = nx.Graph()
g = Graph()
g.add_vertices([ str(s) for s in nodelabel.keys()])
#g.add_nodes_from(nodelabel)
g.add_edges(E)
g.vs["name"] = list(nodelabel.values())
g.vs["label"] = g.vs["name"]
g.vs["doc_id"] = list(nodelabel.keys())
layout = g.layout_lgl()
#layout = g.layout_kamada_kawai()
visual_style = {}
visual_style["vertex_size"] = 20
#visual_style["vertex_color"] = [color_dict[gender] for gender in g.vs["gender"]]
visual_style["vertex_label"] = g.vs["name"]
#visual_style["edge_width"] = [1 + 2 * int(len(is_formal)) for is_formal in g.vs["label"]]
visual_style["layout"] = layout
visual_style["bbox"] = (2000, 2000)
visual_style["margin"] = 20
#plot(g, **visual_style)
#c = g.clusters().subgraphs()
filename = "%s.svg" % queryID
write_svg(g.community_fastgreedy().as_clustering().graph, layout=layout, **visual_style)
#plot(g.community_fastgreedy().as_clustering(), layout=layout)
#plot(g)
#g.add_weighted_edges_from(E)
#nx.relabel_nodes(g, nodelabel, copy=False)
#nx.draw(g, node_size=100, font_size=8, edge_size=10000)
#labels = nx.draw_networkx_labels(g,pos=nx.spring_layout(g),labels=nodelabel)
#import matplotlib.pyplot as plt
#plt.savefig('kg3.png', facecolor='w', edgecolor='w',orientation='portrait', papertype=None, format=None,transparent=False, bbox_inches=None, pad_inches=0.1)
#plt.show()
# END ================================================================================
|
# -*- coding: utf-8 -*-
"""
Messaging Module - Controllers
"""
if not settings.has_module(c):
raise HTTP(404, body="Module disabled: %s" % c)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = settings.modules[c].get("name_nice")
response.title = module_name
return {"module_name": module_name,
}
# -----------------------------------------------------------------------------
def basestation():
""" RESTful CRUD controller for Base Stations """
# Pre-processor
def prep(r):
# Function to call for all Site Instance Types
from s3db.org import org_site_prep
org_site_prep(r)
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def compose():
""" Compose a Message which can be sent to a pentity via a number of different communications channels """
return msg.compose()
# =============================================================================
def message():
"""
RESTful CRUD controller for the master message log
"""
tablename = "msg_message"
table = s3db.msg_message
table.instance_type.readable = True
table.instance_type.label = T("Channel")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Message Details"),
title_list = T("Message Log"),
label_list_button = T("View Message Log"),
msg_list_empty = T("No Messages currently in the Message Log"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons
s3.actions += [{"label": s3_str(T("Mark Sender")),
"url": URL(f = "mark_sender",
args = ["[id]"],
),
"_class": "action-btn",
},
]
return output
s3.postp = postp
s3db.configure(tablename,
deletable = False,
editable = False,
insertable = False,
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def contact():
"""
RESTful CRUD controller for the Contact Form
"""
def prep(r):
if not auth.s3_has_role("ADMIN"):
r.method = "create"
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def mark_sender():
"""
Assign priority to the given sender
"""
try:
mid = request.args[0]
except:
raise SyntaxError
mtable = s3db.msg_message
stable = s3db.msg_sender
# @ToDo: Replace 2 queries with Join
srecord = db(mtable.id == mid).select(mtable.from_address,
limitby = (0, 1),
).first()
sender = srecord.from_address
record = db(stable.sender == sender).select(stable.id,
limitby = (0, 1),
).first()
if record:
args = "update"
else:
args = "create"
redirect(URL(f = "sender",
args = args,
vars = {"sender": sender},
))
# =============================================================================
def outbox():
""" View the contents of the Outbox """
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
from s3db.pr import pr_PersonEntityRepresent
tablename = "msg_outbox"
table = s3db[tablename]
table.message_id.label = T("Message")
table.message_id.writable = False
table.message_id.readable = True
table.pe_id.readable = True
table.pe_id.label = T("Recipient")
table.message_id.represent = s3db.msg_message_represent
table.pe_id.represent = pr_PersonEntityRepresent(default_label = "")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Message Details"),
title_list = T("Outbox"),
label_list_button = T("View Outbox"),
label_delete_button = T("Delete Message"),
msg_record_deleted = T("Message deleted"),
msg_list_empty = T("No Messages currently in Outbox"),
)
def postp(r, output):
if isinstance(output, dict):
add_btn = A(T("Compose"),
_class = "action-btn",
_href = URL(f="compose"),
)
output["rheader"] = add_btn
return output
s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def email_outbox():
"""
RESTful CRUD controller for the Email Outbox
- all Outbound Email Messages are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_email"
table = s3db.msg_email
s3.filter = (table.inbound == False)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Email Details"),
title_list = T("Sent Emails"),
label_list_button = T("View Sent Emails"),
label_delete_button = T("Delete Email"),
msg_record_deleted = T("Email deleted"),
msg_list_empty = T("No Emails currently in Outbox"),
)
def postp(r, output):
if isinstance(output, dict):
add_btn = A(T("Compose"),
_class = "action-btn",
_href = URL(f="compose"),
)
output["rheader"] = add_btn
return output
s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
listadd = False,
list_fields = ["date",
"to_address",
"subject",
"body",
],
)
return s3_rest_controller(c, "email")
# -----------------------------------------------------------------------------
def facebook_outbox():
"""
RESTful CRUD controller for the Facebook Outbox
- all Outbound Facebook Messages are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_facebook"
table = s3db.msg_facebook
s3.filter = (table.inbound == False)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Post Details"),
title_list = T("Sent Posts"),
label_list_button = T("View Sent Posts"),
label_delete_button = T("Delete Post"),
msg_record_deleted = T("Post deleted"),
msg_list_empty = T("No Posts currently in Outbox"),
)
#def postp(r, output):
# if isinstance(output, dict):
# add_btn = A(T("Compose"),
# _class="action-btn",
# _href=URL(f="compose")
# )
# output["rheader"] = add_btn
# return output
#s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
listadd = False,
list_fields = ["date",
#"to_address",
"body",
],
)
return s3_rest_controller(c, "facebook")
# -----------------------------------------------------------------------------
def sms_outbox():
"""
RESTful CRUD controller for the SMS Outbox
- all sent SMS are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_sms"
table = s3db.msg_sms
s3.filter = (table.inbound == False)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("SMS Details"),
title_list = T("Sent SMS"),
label_list_button = T("View Sent SMS"),
label_delete_button = T("Delete SMS"),
msg_record_deleted = T("SMS deleted"),
msg_list_empty = T("No SMS currently in Outbox"),
)
def postp(r, output):
if isinstance(output, dict):
add_btn = A(T("Compose"),
_class = "action-btn",
_href = URL(f="compose"),
)
output["rheader"] = add_btn
return output
s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
listadd = False,
list_fields = ["date",
"to_address",
"body",
],
)
return s3_rest_controller(c, "sms")
# -----------------------------------------------------------------------------
def twitter_outbox():
"""
RESTful CRUD controller for the Twitter Outbox
- all sent Tweets are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_twitter"
table = s3db.msg_twitter
s3.filter = (table.inbound == False)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Tweet Details"),
title_list = T("Sent Tweets"),
label_list_button = T("View Sent Tweets"),
label_delete_button = T("Delete Tweet"),
msg_record_deleted = T("Tweet deleted"),
msg_list_empty = T("No Tweets currently in Outbox"),
)
def postp(r, output):
if isinstance(output, dict):
add_btn = A(T("Compose"),
_class = "action-btn",
_href = URL(f="compose"),
)
output["rheader"] = add_btn
return output
s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
listadd = False,
list_fields = ["date",
"to_address",
"body",
],
)
return s3_rest_controller(c, "twitter")
# =============================================================================
def inbox():
"""
RESTful CRUD controller for the Inbox
- all Inbound Messages are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
table = s3db.msg_message
s3.filter = (table.inbound == True)
table.inbound.readable = False
tablename = "msg_message"
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Message Details"),
title_list = T("InBox"),
label_list_button = T("View InBox"),
label_delete_button = T("Delete Message"),
msg_record_deleted = T("Message deleted"),
msg_list_empty = T("No Messages currently in InBox"),
)
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
list_fields = ["date",
"channel_id",
"from_address",
"body",
],
)
return s3_rest_controller(c, "message")
# -----------------------------------------------------------------------------
def email_inbox():
"""
RESTful CRUD controller for the Email Inbox
- all Inbound Email Messages are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
s3.filter = (FS("inbound") == True)
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm("date",
"subject",
"from_address",
"body",
S3SQLInlineComponent(
"attachment",
name = "document_id",
label = T("Attachments"),
fields = ["document_id",],
),
)
tablename = "msg_email"
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Email Details"),
title_list = T("Email InBox"),
label_list_button = T("View Email InBox"),
label_delete_button = T("Delete Email"),
msg_record_deleted = T("Email deleted"),
msg_list_empty = T("No Emails currently in InBox"),
)
s3db.configure(tablename,
crud_form = crud_form,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
list_fields = ["date",
"from_address",
"subject",
"body",
(T("Attachments"), "attachment.document_id"),
],
)
def prep(r):
s3db.msg_email.inbound.readable = False
if r.id:
s3db.msg_attachment.document_id.label = ""
return True
s3.prep = prep
return s3_rest_controller(c, "email")
# =============================================================================
def rss():
"""
RESTful CRUD controller for RSS feed posts
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_rss"
table = s3db.msg_rss
# To represent the description suitably
# If it is an image display an image
#table.description.represent = lambda description: HTML(description)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("RSS Post Details"),
title_list = T("RSS Posts"),
label_list_button = T("View RSS Posts"),
label_delete_button = T("Delete Post"),
msg_record_deleted = T("RSS Post deleted"),
msg_list_empty = T("No Posts available"),
)
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
list_fields = ["date",
"body",
],
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def sms_inbox():
"""
RESTful CRUD controller for the SMS Inbox
- all Inbound SMS Messages go here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_sms"
table = s3db[tablename]
s3.filter = (table.inbound == True)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("SMS Details"),
title_list = T("SMS InBox"),
label_list_button = T("View SMS InBox"),
label_delete_button = T("Delete SMS"),
msg_record_deleted = T("SMS deleted"),
msg_list_empty = T("No SMS currently in InBox"),
)
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
list_fields = ["date",
"from_address",
"body",
],
)
return s3_rest_controller(c, "sms")
# -----------------------------------------------------------------------------
def twitter():
"""
Twitter RESTful Controller
@ToDo: Action Button to update async
"""
s3db.configure("msg_twitter",
editable = False,
insertable = False,
list_fields = ["date",
"from_address",
"to_address",
"body",
],
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def twitter_inbox():
"""
RESTful CRUD controller for the Twitter Inbox
- all Inbound Tweets (Directed Messages) are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_twitter"
table = s3db.msg_twitter
s3.filter = (table.inbound == True)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Tweet Details"),
title_list = T("Twitter InBox"),
label_list_button = T("View Twitter InBox"),
label_delete_button = T("Delete Tweet"),
msg_record_deleted = T("Tweet deleted"),
msg_list_empty = T("No Tweets currently in InBox"),
)
s3db.configure(tablename,
editable = False,
insertable = False,
list_fields = ["date",
"from_address",
"body",
],
)
return s3_rest_controller(c, "twitter")
# =============================================================================
def tropo():
"""
Receive a JSON POST from the Tropo WebAPI
@see: https://www.tropo.com/docs/webapi/newhowitworks.htm
"""
# Stored in modules/tropo.py
from tropo import Tropo, Session
try:
s = Session(request.body.read())
t = Tropo()
# This is their service contacting us, so parse their request
try:
row_id = s.parameters["row_id"]
# This is an Outbound message which we've requested Tropo to send for us
table = s3db.msg_tropo_scratch
query = (table.row_id == row_id)
row = db(query).select(limitby = (0, 1),
).first()
# Send the message
#t.message(say_obj={"say":{"value":row.message}},to=row.recipient,network=row.network)
t.call(to=row.recipient, network=row.network)
t.say(row.message)
# Update status to sent in Outbox
outbox = s3db.msg_outbox
db(outbox.id == row.row_id).update(status = 2)
# @ToDo: Set message log to actioned
#log = s3db.msg_log
#db(log.id == row.message_id).update(actioned=True)
# Clear the Scratchpad
db(query).delete()
return t.RenderJson()
except:
# This is an Inbound message
try:
message = s.initialText
# This is an SMS/IM
# Place it in the InBox
uuid = s.id
recipient = s.to["id"]
try:
fromaddress = s.fromaddress["id"]
except:
# SyntaxError: s.from => invalid syntax (why!?)
fromaddress = ""
# @ToDo: Update to new model
#s3db.msg_log.insert(uuid=uuid, fromaddress=fromaddress,
# recipient=recipient, message=message,
# inbound=True)
# Send the message to the parser
reply = msg.parse_message(message)
t.say([reply])
return t.RenderJson()
except:
# This is a Voice call
# - we can't handle these yet
raise HTTP(501)
except:
# GET request or some random POST
pass
# =============================================================================
@auth.s3_requires_membership(1)
def sms_outbound_gateway():
""" SMS Outbound Gateway selection for the messaging framework """
# CRUD Strings
s3.crud_strings["msg_sms_outbound_gateway"] = Storage(
label_create = T("Create SMS Outbound Gateway"),
title_display = T("SMS Outbound Gateway Details"),
title_list = T("SMS Outbound Gateways"),
title_update = T("Edit SMS Outbound Gateway"),
label_list_button = T("List SMS Outbound Gateways"),
label_delete_button = T("Delete SMS Outbound Gateway"),
msg_record_created = T("SMS Outbound Gateway added"),
msg_record_modified = T("SMS Outbound Gateway updated"),
msg_record_deleted = T("SMS Outbound Gateway deleted"),
msg_list_empty = T("No SMS Outbound Gateways currently registered"),
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def channel():
"""
RESTful CRUD controller for Channels
- unused
"""
return s3_rest_controller()
# -----------------------------------------------------------------------------
def email_channel():
"""
RESTful CRUD controller for Inbound Email channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_email_channel"
table = s3db[tablename]
table.server.label = T("Server")
table.protocol.label = T("Protocol")
table.use_ssl.label = "SSL"
table.port.label = T("Port")
table.username.label = T("Username")
table.password.label = T("Password")
table.delete_from_server.label = T("Delete from Server?")
table.port.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Port"),
T("For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP)."),
),
)
table.delete_from_server.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Delete"),
T("If this is set to True then mails will be deleted from the server after downloading."),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Email Settings"),
title_list = T("Email Accounts"),
label_create = T("Create Email Account"),
title_update = T("Edit Email Settings"),
label_list_button = T("View Email Accounts"),
msg_record_created = T("Account added"),
msg_record_deleted = T("Email Account deleted"),
msg_list_empty = T("No Accounts currently defined"),
msg_record_modified = T("Email Settings updated"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args = ["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def facebook_channel():
"""
RESTful CRUD controller for Facebook channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_facebook_channel"
table = s3db[tablename]
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Facebook Settings"),
title_list = T("Facebook Accounts"),
label_create = T("Add Facebook Account"),
title_update = T("Edit Facebook Settings"),
label_list_button = T("View Facebook Accounts"),
msg_record_created = T("Account added"),
msg_record_deleted = T("Facebook Account deleted"),
msg_list_empty = T("No Accounts currently defined"),
msg_record_modified = T("Facebook Settings updated"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
#if not s3task._is_alive():
# # No Scheduler Running
# s3.actions += [{"label": s3_str(T("Poll")),
# "restrict": restrict_d),
# "url": URL(args = ["[id]", "poll"]),
# "_class": "action-btn",
# }
# ]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def mcommons_channel():
"""
RESTful CRUD controller for Mobile Commons SMS Channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_mcommons_channel"
table = s3db[tablename]
table.name.label = T("Account Name")
table.name.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Account Name"),
T("Name for your Mobile Commons Account"),
),
)
table.campaign_id.label = T("Campaign ID")
table.url.label = T("URL")
table.url.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("URL"),
T("URL for the Mobile Commons API"),
),
)
table.username.label = T("Username")
table.password.label = T("Password")
table.timestmp.label = T("Last Downloaded")
table.timestmp.writable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Mobile Commons Setting Details"),
title_list = T("Mobile Commons Settings"),
label_create = T("Add Mobile Commons Settings"),
title_update = T("Edit Mobile Commons Settings"),
label_list_button = T("View Mobile Commons Settings"),
msg_record_created = T("Mobile Commons Setting added"),
msg_record_deleted = T("Mobile Commons Setting deleted"),
msg_list_empty = T("No Mobile Commons Settings currently defined"),
msg_record_modified = T("Mobile Commons settings updated"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def gcm_channel():
"""
RESTful CRUD controller for Google Cloud Messaging Channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_gcm_channel"
table = s3db[tablename]
table.name.label = T("Account Name")
table.name.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Account Label"),
T("Label for GCM Account"),
),
)
table.api_key.label = T("API KEY")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Google Cloud Messaging Setting Details"),
title_list = T("Google Cloud Messaging Settings"),
label_create = T("Add Google Cloud Messaging Settings"),
title_update = T("Edit Google Cloud Messaging Settings"),
label_list_button = T("View Google Cloud Messaging Settings"),
msg_record_created = T("Google Cloud Messaging Setting added"),
msg_record_deleted = T("Google Cloud Messaging Setting deleted"),
msg_list_empty = T("No Google Cloud Messaging Settings currently defined"),
msg_record_modified = T("Google Cloud Messaging settings updated"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted != True)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
#if not s3task._is_alive():
# No Scheduler Running
# s3.actions += [{"label": s3_str(T("Poll")),
# "restrict": restrict_d,
# "url": URL(args = ["[id]", "poll"]),
# "_class": "action-btn",
# },
# ]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def rss_channel():
"""
RESTful CRUD controller for RSS channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_rss_channel"
table = s3db[tablename]
table.name.label = T("Name")
table.description.label = T("Description")
table.url.label = T("URL/Link")
table.url.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("URL"),
T("Link for the RSS Feed."),
),
)
table.enabled.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Subscriptions Status"),
T("Are you susbscribed?"),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("RSS Channel Details"),
title_list = T("RSS Channels"),
label_create = T("Add RSS Channel"),
title_update = T("Edit RSS Channel"),
label_list_button = T("View RSS Channels"),
msg_record_created = T("Channel added"),
msg_record_deleted = T("RSS Channel deleted"),
msg_list_empty = T("No RSS Channels currently defined"),
msg_record_modified = T("RSS Channel updated"),
)
def status_represent(v):
try:
v = int(v)
except:
# Text
return v
return "There have been no new entries for %s requests" % v
s3db.msg_channel_status.status.represent = status_represent
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Subscribe")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Unsubscribe")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def twilio_channel():
"""
RESTful CRUD controller for Twilio SMS channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_twilio_channel"
table = s3db[tablename]
table.account_name.label = T("Account Name")
table.account_name.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Account Name"),
T("Identifier Name for your Twilio Account."),
),
)
table.url.label = T("URL")
table.url.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("URL"),
T("URL for the twilio API."),
),
)
table.account_sid.label = "Account SID"
table.auth_token.label = T("AUTH TOKEN")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twilio Channel Details"),
title_list = T("Twilio Channels"),
label_create = T("Add Twilio Channel"),
title_update = T("Edit Twilio Channel"),
label_list_button = T("View Twilio Channels"),
msg_record_created = T("Twilio Channel added"),
msg_record_deleted = T("Twilio Channel deleted"),
msg_record_modified = T("Twilio Channel updated"),
msg_list_empty = T("No Twilio Channels currently defined"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def sms_modem_channel():
"""
RESTful CRUD controller for modem channels
- appears in the administration menu
Multiple Modems can be configured to receive Inbound Messages
"""
try:
import serial
except ImportError:
session.error = T("Python Serial module not available within the running Python - this needs installing to activate the Modem")
redirect(URL(c="admin", f="index"))
tablename = "%s_%s" % (c, f)
table = s3db[tablename]
table.modem_port.label = T("Port")
table.modem_baud.label = T("Baud")
table.enabled.label = T("Enabled")
table.modem_port.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Port"),
T("The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows"),
),
)
table.modem_baud.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Baud"),
T("Baud rate to use for your modem - The default is safe for most cases"),
),
)
table.enabled.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Enabled"),
T("Unselect to disable the modem"),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
label_create = T("Add Modem Channel"),
title_display = T("Modem Channel Details"),
title_list = T("Modem Channels"),
title_update = T("Edit Modem Channel"),
label_list_button = T("View Modem Channels"),
msg_record_created = T("Modem Channel added"),
msg_record_modified = T("Modem Channel updated"),
msg_record_deleted = T("Modem Channel deleted"),
msg_list_empty = T("No Modem Channels currently defined"),
)
return s3_rest_controller()
#------------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def sms_smtp_channel():
"""
RESTful CRUD controller for SMTP to SMS Outbound channels
- appears in the administration menu
"""
tablename = "%s_%s" % (c, f)
table = s3db[tablename]
table.address.label = T("Address")
table.subject.label = T("Subject")
table.enabled.label = T("Enabled")
table.address.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Address"),
T("Email Address to which to send SMS messages. Assumes sending to phonenumber@address"),
),
)
table.subject.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Subject"),
T("Optional Subject to put into Email - can be used as a Security Password by the service provider"),
),
)
table.enabled.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Enabled"),
T("Unselect to disable this SMTP service"),
),
)
# CRUD Strings
s3.crud_strings["msg_sms_outbound_gateway"] = Storage(
label_create=T("Create SMTP to SMS Channel"),
title_display=T("SMTP to SMS Channel Details"),
title_list=T("SMTP to SMS Channels"),
title_update=T("Edit SMTP to SMS Channel"),
label_list_button=T("List SMTP to SMS Channels"),
label_delete_button=T("Delete SMTP to SMS Channel"),
msg_record_created=T("SMTP to SMS Channel added"),
msg_record_modified=T("SMTP to SMS Channel updated"),
msg_record_deleted=T("SMTP to SMS Channel deleted"),
msg_list_empty=T("No SMTP to SMS Channels currently registered"),
)
s3db.configure(tablename,
update_next = URL(args = [1, "update"]),
)
return s3_rest_controller()
#------------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def sms_webapi_channel():
"""
RESTful CRUD controller for Web API channels
- appears in the administration menu
"""
tablename = "%s_%s" % (c, f)
table = s3db[tablename]
table.url.label = T("URL")
table.message_variable.label = T("Message variable")
table.to_variable.label = T("To variable")
table.username.label = T("Username")
table.password.label = T("Password")
table.enabled.label = T("Enabled")
table.url.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("URL"),
T("The URL of your web gateway without the POST parameters"),
),
)
table.parameters.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Parameters"),
T("The POST variables other than the ones containing the message and the phone number"),
),
)
table.message_variable.comment = DIV(_class = "tooltip",
_title="%s|%s" % (T("Message Variable"),
T("The POST variable on the URL used for sending messages"),
),
)
table.to_variable.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("To variable"),
T("The POST variable containing the phone number"),
),
)
table.username.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Username"),
T("If the service requries HTTP BASIC Auth (e.g. Mobile Commons)"),
),
)
table.password.comment = DIV(_class = "tooltip",
_title="%s|%s" % (T("Password"),
T("If the service requries HTTP BASIC Auth (e.g. Mobile Commons)"),
),
)
table.enabled.comment = DIV(_class = "tooltip",
_title="%s|%s" % (T("Enabled"),
T("Unselect to disable this API service"),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
label_create = T("Create Web API Channel"),
title_display = T("Web API Channel Details"),
title_list = T("Web API Channels"),
title_update = T("Edit Web API Channel"),
label_list_button = T("List Web API Channels"),
label_delete_button = T("Delete Web API Channel"),
msg_record_created = T("Web API Channel added"),
msg_record_modified = T("Web API Channel updated"),
msg_record_deleted = T("Web API Channel deleted"),
msg_list_empty = T("No Web API Channels currently registered"),
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def tropo_channel():
"""
RESTful CRUD controller for Tropo channels
- appears in the administration menu
"""
tablename = "msg_tropo_channel"
table = s3db[tablename]
table.token_messaging.label = T("Tropo Messaging Token")
table.token_messaging.comment = DIV(DIV(_class = "stickytip",
_title = "%s|%s" % (T("Tropo Messaging Token"),
T("The token associated with this application on") + " <a href='https://www.tropo.com/docs/scripting/troposessionapi.htm' target=_blank>Tropo.com</a>"),
),
)
#table.token_voice.label = T("Tropo Voice Token")
#table.token_voice.comment = DIV(DIV(_class="stickytip",_title=T("Tropo Voice Token") + "|" + T("The token associated with this application on") + " <a href='https://www.tropo.com/docs/scripting/troposessionapi.htm' target=_blank>Tropo.com</a>"))
# CRUD Strings
s3.crud_strings[tablename] = Storage(
label_create = T("Create Tropo Channel"),
title_display = T("Tropo Channel Details"),
title_list = T("Tropo Channels"),
title_update = T("Edit Tropo Channel"),
label_list_button = T("List Tropo Channels"),
label_delete_button = T("Delete Tropo Channel"),
msg_record_created = T("Tropo Channel added"),
msg_record_modified = T("Tropo Channel updated"),
msg_record_deleted = T("Tropo Channel deleted"),
msg_list_empty = T("No Tropo Channels currently registered"),
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def twitter_channel():
"""
RESTful CRUD controller for Twitter channels
- appears in the administration menu
Only 1 of these normally in existence
@ToDo: Don't enforce
"""
#try:
# import tweepy
#except:
# session.error = T("tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!")
# redirect(URL(c="admin", f="index"))
tablename = "%s_%s" % (c, f)
table = s3db[tablename]
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twitter account Details"),
title_list = T("Twitter accounts"),
label_create = T("Add Twitter account"),
title_update = T("Edit Twitter account"),
label_list_button = T("View Twitter accounts"),
msg_record_created = T("Twitter account added"),
msg_record_deleted = T("Twitter account deleted"),
msg_record_modified = T("Twitter account updated"),
msg_list_empty = T("No Twitter accounts currently defined"),
)
def prep(r):
oauth_consumer_key = settings.msg.twitter_oauth_consumer_key
oauth_consumer_secret = settings.msg.twitter_oauth_consumer_secret
if not (oauth_consumer_key and oauth_consumer_secret):
session.error = T("You should edit Twitter settings in models/000_config.py")
return True
oauth = tweepy.OAuthHandler(oauth_consumer_key,
oauth_consumer_secret)
if r.http == "GET" and r.method in ("create", "update"):
# We're showing the form
_s3 = session.s3
try:
_s3.twitter_oauth_url = oauth.get_authorization_url()
_s3.twitter_request_key = oauth.request_token.key
_s3.twitter_request_secret = oauth.request_token.secret
except tweepy.TweepError:
session.error = T("Problem connecting to twitter.com - please refresh")
return True
#table.pin.readable = True
#table.pin.label = T("PIN number from Twitter (leave empty to detach account)")
#table.pin.value = ""
table.twitter_account.label = T("Current Twitter account")
return True
else:
# Not showing form, no need for pin
#table.pin.readable = False
#table.pin.label = T("PIN") # won't be seen
#table.pin.value = "" # but let's be on the safe side
pass
return True
#s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
#if isinstance(output, dict):
# if r.http == "GET" and r.method in ("create", "update"):
# rheader = A(T("Collect PIN from Twitter"),
# _href = session.s3.twitter_oauth_url,
# _target = "_blank")
# output["rheader"] = rheader
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def inject_search_after_save(output):
"""
Inject a Search After Save checkbox
in the Twitter Search Query Form
"""
if "form" in output:
id = "search_after_save"
label = LABEL("%s:" % T("Search After Save?"),
_for = "msg_twitter_search",
)
widget = INPUT(_name = "search_after_save",
_type = "checkbox",
value = "on",
_id = id,
_class = "boolean",
)
comment = ""
if s3_formstyle == "bootstrap":
_controls = DIV(widget,
comment,
_class = "controls",
)
row = DIV(label,
_controls,
_class = "control-group",
_id = "%s__row" % id,
)
elif callable(s3_formstyle):
row = s3_formstyle(id, label, widget, comment)
else:
# Unsupported
raise
output["form"][0][-2].append(row)
# -----------------------------------------------------------------------------
def action_after_save(form):
"""
Schedules Twitter query search immediately after save
depending on flag
"""
if request.post_vars.get("search_after_save"):
s3task.run_async("msg_twitter_search", args = [form.vars.id])
session.information = T("The search results should appear shortly - refresh to see them")
# -----------------------------------------------------------------------------
def twitter_search():
"""
RESTful CRUD controller to add keywords
for Twitter Search
"""
tablename = "msg_twitter_search"
table = s3db[tablename]
table.is_processed.writable = False
table.is_searched.writable = False
table.is_processed.readable = False
table.is_searched.readable = False
# Tweak languages to those supported by Twitter
try:
import tweepy
except:
session.error = T("tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!")
redirect(URL(c="msg", f="index"))
twitter_settings = S3Msg.get_twitter_api()
supported_languages = ['fr', 'en', 'ar', 'ja', 'es', 'de', 'it', 'id', 'pt', 'ko', 'tr', 'ru', 'nl', 'fil',
'msa', 'zh-tw', 'zh-cn', 'hi', 'no', 'sv', 'fi', 'da', 'pl', 'hu', 'fa', 'he', 'ur', 'th']
if twitter_settings:
twitter_api = twitter_settings[0]
try:
supported_languages = [str(x["code"]) for x in twitter_api.supported_languages()]
except (tweepy.TweepError, AttributeError):
# List according to Twitter 1.1 API https://dev.twitter.com/docs/api/1.1/get/help/languages
pass
substitute_list = {"en-gb": "en",
"pt-br": "pt"}
new_langs = []
lang_default = current.response.s3.language
langs = set(settings.get_L10n_languages().keys())
for l in langs:
if l in supported_languages:
new_langs.append(l)
else:
supported_substitute = substitute_list.get(l)
if supported_substitute:
if lang_default == l:
lang_default = supported_substitute
if supported_substitute not in langs:
new_langs.append(supported_substitute)
else:
if lang_default == l:
lang_default = 'en'
langs = new_langs
table.lang.requires = IS_IN_SET(langs)
table.lang.default = lang_default
comment = "Add the keywords separated by single spaces."
table.keywords.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Keywords"),
T(comment),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twitter Search Queries"),
title_list = T("Twitter Search Queries"),
label_create = T("Add Twitter Search Query"),
title_update = T("Edit Twitter Search Query"),
label_list_button = T("View Queries"),
msg_record_created = T("Query added"),
msg_record_deleted = T("Query deleted"),
msg_list_empty = T("No Query currently defined"),
msg_record_modified = T("Query updated"),
)
if request.post_vars.get("search_after_save"):
url_after_save = URL(f="twitter_result")
else:
url_after_save = None
s3db.configure(tablename,
create_next = url_after_save,
create_onaccept = action_after_save,
deletable = True,
listadd = True,
)
def prep(r):
if r.interactive:
table = s3db.msg_twitter_channel
if not db(table.id > 0).select(table.id,
limitby = (0, 1),
).first():
session.error = T("Need to configure Twitter Authentication")
redirect(URL(f = "twitter_channel"))
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons
rtable = r.table
query = (rtable.deleted == False) & \
(rtable.is_searched == False)
records = db(query).select(rtable.id)
restrict_s = [str(record.id) for record in records]
query = (rtable.deleted == False) & \
(rtable.is_processed == False)
records = db(query).select(rtable.id)
restrict_k = [str(record.id) for record in records]
# @ToDo: Make these S3Methods rather than additional controllers
s3.actions += [{"label": s3_str(T("Search")),
"restrict": restrict_s,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
{"label": s3_str(T("Analyze with KeyGraph")),
"restrict": restrict_k,
"url": URL(args = ["[id]", "keygraph"]),
"_class": "action-btn",
},
]
inject_search_after_save(output)
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def twitter_result():
"""
RESTful CRUD controller for Twitter Search Results.
"""
tablename = "msg_twitter_result"
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twitter Search Results"),
title_list = T("Twitter Search Results"),
label_list_button = T("View Tweets"),
msg_record_deleted = T("Tweet deleted"),
msg_list_empty = T("No Tweets Available."),
)
from s3.s3filter import S3DateFilter, S3TextFilter
filter_widgets = [
S3DateFilter("date",
label = T("Tweeted on"),
hide_time = True,
_class = "date-filter-class",
comment = T("Filter Tweets by the date they were tweeted on"),
),
S3TextFilter("from_address",
label = T("Tweeted by"),
_class = "tweeter-filter-class",
comment = T("Filter Tweets by who tweeted them"),
)
]
report_fields = ["search_id",
"date",
"lang",
]
s3db.configure(tablename,
deletable = False,
editable = False,
insertable = False,
filter_widgets = filter_widgets,
report_options = {"rows": report_fields,
"cols": report_fields,
"fact": report_fields,
"defaults": {"rows": "search_id",
"cols": "lang",
},
},
)
def postp(r, output):
if r.id or r.method in ("read", "display"):
# Display the Tweet as an Embedded tweet
record = output["item"].record
# Tweet link
twitter_url = "https://twitter.com/%s/statuses/%s" % (record.from_address,
record.tweet_id,
)
script_url = "https://platform.twitter.com/widgets.js"
# Themeable Throbber
throbber = DIV(_class = "s3-twitter-throbber",
)
# Display throbber while Tweet loads
tweet_container = DIV(throbber,
_class = "s3-twitter-container",
)
tweet_user = TAG[""](A(_href = twitter_url,
_style = "display: none"),
)
# Configure Tweet display
attributes = {"_width": "350px",
"_data-conversation": "none",
"_class": "twitter-tweet",
"lang": record.lang,
}
tweet = TAG["blockquote"](tweet_container,
tweet_user,
SCRIPT(_src = script_url,
_charset = "utf-8"),
**attributes
)
# Insert tweet
output["item"] = tweet
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def sender():
"""
RESTful CRUD controller for whitelisting senders.
User can assign priority to senders.
"""
tablename = "msg_sender"
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Whitelisted Senders"),
title_list = T("Whitelisted Senders"),
label_create = T("Whitelist a Sender"),
title_update = T("Edit Sender Priority"),
label_list_button = T("View Sender Priority"),
msg_record_created = T("Sender Whitelisted"),
msg_record_deleted = T("Sender deleted"),
msg_list_empty = T("No Senders Whitelisted"),
msg_record_modified = T("Sender Priority updated"),
)
s3db.configure(tablename, listadd=True)
def prep(r):
if r.method == "create":
dsender = request.vars['sender']
dpriority = request.vars['priority']
r.table.sender.default = dsender
r.table.priority.default = dpriority
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def keyword():
""" REST Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def parser():
"""
RESTful CRUD controller for Parsers
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
def prep(r):
if r.interactive:
# CRUD Strings
s3.crud_strings["msg_parser"] = Storage(
title_display = T("Parser Connection Details"),
title_list = T("Parser Connections"),
label_create = T("Connect Parser"),
title_update = T("Edit Parser Connection"),
label_list_button = T("View Parser Connections"),
msg_record_created = T("Parser connected"),
msg_record_deleted = T("Parser connection removed"),
msg_record_modified = T("Parser connection updated"),
msg_list_empty = T("No Parsers currently connected"),
)
import inspect
import sys
from s3 import IS_ONE_OF, S3Represent
template = settings.get_msg_parser()
module_name = "applications.%s.modules.templates.%s.parser" % \
(appname, template)
__import__(module_name)
mymodule = sys.modules[module_name]
S3Parser = mymodule.S3Parser()
# Dynamic lookup of the parsing functions in S3Parser class.
parsers = inspect.getmembers(S3Parser, \
predicate=inspect.isfunction)
parse_opts = []
pappend = parse_opts.append
for p in parsers:
p = p[0]
# Filter out helper functions
if not p.startswith("_"):
pappend(p)
table = r.table
table.channel_id.requires = IS_ONE_OF(db, "msg_channel.channel_id",
S3Represent(lookup = "msg_channel"),
sort = True,
)
table.function_name.requires = IS_IN_SET(parse_opts,
zero = None)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Parse")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "parse"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# =============================================================================
# The following functions hook into the pr functions:
#
def group():
""" RESTful CRUD controller """
if auth.is_logged_in() or auth.basic():
pass
else:
redirect(URL(c="default", f="user",
args = "login",
vars = {"_next":URL(c="msg", f="group")},
))
table = s3db.pr_group
# Hide unnecessary fields
table.description.readable = table.description.writable = False
# Do not show system groups
s3.filter = (table.system == False)
return s3_rest_controller("pr", "group",
rheader = s3db.pr_rheader,
)
# -----------------------------------------------------------------------------
def group_membership():
""" RESTful CRUD controller """
if auth.is_logged_in() or auth.basic():
pass
else:
redirect(URL(c="default", f="user",
args = "login",
vars = {"_next": URL(c="msg", f="group_membership")},
))
table = s3db.pr_group_membership
# Hide unnecessary fields
table.comments.readable = table.comments.writable = False
table.group_head.readable = table.group_head.writable = False
return s3_rest_controller("pr", f)
# -----------------------------------------------------------------------------
def contacts():
"""
Allow the user to add, update and delete their contacts
- seems to be unused (was called 'contact' & was broken)
"""
table = s3db.pr_contact
#ptable = s3db.pr_person
if auth.is_logged_in() or auth.basic():
s3.filter = (table.pe_id == auth.user.pe_id)
else:
redirect(URL(c="default", f="user", args="login",
vars={"_next": URL(c="msg", f="contact")}))
# These fields will be populated automatically
table.name.writable = table.name.readable = False
table.pe_id.writable = table.pe_id.readable = False
table.person_name.writable = table.person_name.readable = False
table.id.writable = False
#table.id.readable = False
def msg_contact_onvalidation(form):
# Add the person id to the record
if auth.user:
form.vars.pe_id = auth.user.pe_id
s3db.configure(table._tablename,
onvalidation = msg_contact_onvalidation)
def prep(r):
# Restrict update and delete access to contacts not owned by the user
if r.id :
pe_id = r.record.pe_id
if auth.user and auth.user.pe_id == pe_id:
return True
else:
session.error = T("Access denied")
return {"bypass": True, "output": redirect(URL(r=request))}
else:
return True
s3.prep = prep
response.menu_options = []
return s3_rest_controller("pr", "contact")
# -----------------------------------------------------------------------------
def search():
"""
Do a search of groups which match a type
- used for auto-completion
"""
if not (auth.is_logged_in() or auth.basic()):
# Not allowed
return
# JQuery UI Autocomplete uses 'term' instead of 'value'
# (old JQuery Autocomplete uses 'q' instead of 'value')
value = request.vars.term or request.vars.q
if not value:
return
# Call the search function
type = get_vars.get("type", None)
if type:
items = person_search(value, type)
else:
items = person_search(value)
# Encode in JSON
item = json.dumps(items)
response.headers["Content-Type"] = "application/json"
return item
# -----------------------------------------------------------------------------
def recipient_represent(id, default_label=""):
""" Simplified output as-compared to pr_pentity_represent """
output = ""
table = s3db.pr_pentity
pe = db(table.pe_id == id).select(table.instance_type,
limitby = (0, 1),
).first()
if not pe:
return output
instance_type = pe.instance_type
table = db.get(instance_type, None)
if not table:
return output
if instance_type == "pr_person":
person = db(table.pe_id == id).select(table.first_name,
table.middle_name,
table.last_name,
limitby = (0, 1),
).first()
if person:
from s3 import s3_fullname
output = s3_fullname(person)
elif instance_type == "pr_group":
group = db(table.pe_id == id).select(table.name,
limitby = (0, 1),
).first()
if group:
output = group.name
return output
# -----------------------------------------------------------------------------
def person_search(value, type=None):
""" Search for People & Groups which match a search term """
# Shortcuts
groups = s3db.pr_group
persons = s3db.pr_person
items = []
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = value.lower()
if type:
represent = recipient_represent
else:
represent = s3db.pr_pentity_represent
if type == "pr_group" or not type:
# Check Groups
query = (groups["name"].lower().like("%" + value + "%")) & (groups.deleted == False)
rows = db(query).select(groups.pe_id)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
if type == "pr_person" or not type:
# Check Persons
deleted = (persons.deleted == False)
# First name
query = (persons["first_name"].lower().like("%" + value + "%")) & deleted
rows = db(query).select(persons.pe_id, cache=s3db.cache)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
# Middle name
query = (persons["middle_name"].lower().like("%" + value + "%")) & deleted
rows = db(query).select(persons.pe_id, cache=s3db.cache)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
# Last name
query = (persons["last_name"].lower().like("%" + value + "%")) & deleted
rows = db(query).select(persons.pe_id, cache=s3db.cache)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
return items
# -----------------------------------------------------------------------------
def subscription():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
# Send Outbound Messages (was for being called via cron, now useful for debugging)
# -----------------------------------------------------------------------------
def process_email_outbox():
""" Send Pending Email Messages """
msg.process_outbox(contact_method = "EMAIL")
# -----------------------------------------------------------------------------
def process_sms_outbox():
""" Send Pending SMS Messages """
msg.process_outbox(contact_method = "SMS")
# -----------------------------------------------------------------------------
def process_twitter_outbox():
""" Send Pending Twitter Messages """
msg.process_outbox(contact_method = "TWITTER")
# =============================================================================
# Enabled only for testing:
#
@auth.s3_requires_membership(1)
def facebook_post():
""" Post to Facebook """
title = T("Post to Facebook")
# Test the formstyle
formstyle = s3.crud.formstyle
row = formstyle("test", "test", "test", "test")
if isinstance(row, tuple):
# Formstyle with separate row for label (e.g. default Eden formstyle)
tuple_rows = True
else:
# Formstyle with just a single row (e.g. Bootstrap, Foundation or DRRPP)
tuple_rows = False
form_rows = []
comment = ""
_id = "channel_id"
label = LABEL("%s:" % T("Channel"))
table = s3db.msg_facebook_channel
query = (table.deleted == False) & \
(table.enabled == True)
rows = db(query).select(table.channel_id, table.name)
options = [OPTION(row.name, _value=row.channel_id) for row in rows]
channel_select = SELECT(_name = "channel_id",
_id = _id,
*options
)
widget = channel_select
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "post"
label = LABEL("%s:" % T("Contents"))
widget = TEXTAREA(_name = "post",
)
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "submit"
label = ""
widget = INPUT(_type="submit", _value=T("Post"))
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
if tuple_rows:
# Assume TRs
form = FORM(TABLE(*form_rows))
else:
form = FORM(*form_rows)
if form.accepts(request.vars, session):
form_vars = form.vars
channel_id = form_vars.get("channel_id")
post = form_vars.get("post")
if channel_id and post:
msg.post_to_facebook(post, channel_id)
output = {"form": form,
"title": title,
}
return output
# =============================================================================
# Enabled only for testing:
#
@auth.s3_requires_membership(1)
def twitter_post():
""" Post to Twitter """
title = T("Post to Twitter")
# Test the formstyle
formstyle = s3.crud.formstyle
row = formstyle("test", "test", "test", "test")
if isinstance(row, tuple):
# Formstyle with separate row for label (e.g. default Eden formstyle)
tuple_rows = True
else:
# Formstyle with just a single row (e.g. Bootstrap, Foundation or DRRPP)
tuple_rows = False
form_rows = []
comment = ""
_id = "channel_id"
label = LABEL("%s:" % T("Channel"))
table = s3db.msg_twitter_channel
query = (table.deleted == False) & \
(table.enabled == True)
rows = db(query).select(table.channel_id, table.name)
options = [OPTION(row.name, _value=row.channel_id) for row in rows]
channel_select = SELECT(_name = "channel_id",
_id = _id,
*options
)
widget = channel_select
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "post"
label = LABEL("%s:" % T("Contents"))
widget = TEXTAREA(_name = "post",
)
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "submit"
label = ""
widget = INPUT(_type="submit", _value=T("Post"))
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
if tuple_rows:
# Assume TRs
form = FORM(TABLE(*form_rows))
else:
form = FORM(*form_rows)
if form.accepts(request.vars, session):
form_vars = form.vars
channel_id = form_vars.get("channel_id")
post = form_vars.get("post")
if channel_id and post:
msg.send_tweet(post)
output = {"form": form,
"title": title,
}
return output
# =============================================================================
# Enabled only for testing:
#
@auth.s3_requires_membership(1)
def tag():
""" RESTful CRUD controller """
return s3_rest_controller()
# =============================================================================
# Enabled only for testing:
#
def readKeyGraph(queryID):
""" """
import os
curpath = os.getcwd()
f = open("%s.txt" % queryID, "r")
topics = int(next(f))
nodelabel = {}
E = []
nodetopic = {}
for x in range(0, topics):
thisnodes = []
nodes = int(next(f).split("KEYGRAPH_NODES:")[1])
for y in range(0, nodes):
s = next(f)
nodeid = s.split(":")[0]
nodetopic[str(nodeid)] = x
l1 = s.split(":")[1]
l2 = s.split(":")[2]
try:
nodelabel[str(nodeid)] = unicode(l2.strip())
except:
pass
edges = int(next(f).split("KEYGRAPH_EDGES:")[1])
edges = edges / 2
for y in range(0,edges):
s = next(f)
n1 = s.split(" ")[0].strip()
n2 = s.split(" ")[1].strip()
if (n1 in nodelabel.keys()) and (n2 in nodelabel.keys()):
E.append((str(n1), str(n2)))
next(f)
next(f)
"""
for x in range(0,len(E)):
lx = list(E[x])
lx.append((nodetopic[E[x][0]] - nodetopic[E[x][1]] + 3)*100)
E[x] = tuple(lx)
"""
#import networkx as nx
from igraph import Graph, write_svg
#g = nx.Graph()
g = Graph()
g.add_vertices([ str(s) for s in nodelabel.keys()])
#g.add_nodes_from(nodelabel)
g.add_edges(E)
g.vs["name"] = list(nodelabel.values())
g.vs["label"] = g.vs["name"]
g.vs["doc_id"] = list(nodelabel.keys())
layout = g.layout_lgl()
#layout = g.layout_kamada_kawai()
visual_style = {}
visual_style["vertex_size"] = 20
#visual_style["vertex_color"] = [color_dict[gender] for gender in g.vs["gender"]]
visual_style["vertex_label"] = g.vs["name"]
#visual_style["edge_width"] = [1 + 2 * int(len(is_formal)) for is_formal in g.vs["label"]]
visual_style["layout"] = layout
visual_style["bbox"] = (2000, 2000)
visual_style["margin"] = 20
#plot(g, **visual_style)
#c = g.clusters().subgraphs()
filename = "%s.svg" % queryID
write_svg(g.community_fastgreedy().as_clustering().graph, layout=layout, **visual_style)
#plot(g.community_fastgreedy().as_clustering(), layout=layout)
#plot(g)
#g.add_weighted_edges_from(E)
#nx.relabel_nodes(g, nodelabel, copy=False)
#nx.draw(g, node_size=100, font_size=8, edge_size=10000)
#labels = nx.draw_networkx_labels(g,pos=nx.spring_layout(g),labels=nodelabel)
#import matplotlib.pyplot as plt
#plt.savefig('kg3.png', facecolor='w', edgecolor='w',orientation='portrait', papertype=None, format=None,transparent=False, bbox_inches=None, pad_inches=0.1)
#plt.show()
# END ================================================================================
|
en
| 0.494841
|
# -*- coding: utf-8 -*- Messaging Module - Controllers # ----------------------------------------------------------------------------- Module's Home Page # ----------------------------------------------------------------------------- RESTful CRUD controller for Base Stations # Pre-processor # Function to call for all Site Instance Types # ============================================================================= Compose a Message which can be sent to a pentity via a number of different communications channels # ============================================================================= RESTful CRUD controller for the master message log # CRUD Strings # Normal Action Buttons # Custom Action Buttons # ----------------------------------------------------------------------------- RESTful CRUD controller for the Contact Form # ============================================================================= Assign priority to the given sender # @ToDo: Replace 2 queries with Join # ============================================================================= View the contents of the Outbox # CRUD Strings # Permissions-based #deletable = False, # ----------------------------------------------------------------------------- RESTful CRUD controller for the Email Outbox - all Outbound Email Messages are visible here # CRUD Strings # Permissions-based #deletable = False, # ----------------------------------------------------------------------------- RESTful CRUD controller for the Facebook Outbox - all Outbound Facebook Messages are visible here # CRUD Strings #def postp(r, output): # if isinstance(output, dict): # add_btn = A(T("Compose"), # _class="action-btn", # _href=URL(f="compose") # ) # output["rheader"] = add_btn # return output #s3.postp = postp # Permissions-based #deletable = False, #"to_address", # ----------------------------------------------------------------------------- RESTful CRUD controller for the SMS Outbox - all sent SMS are visible here # CRUD Strings # Permissions-based #deletable = False, # ----------------------------------------------------------------------------- RESTful CRUD controller for the Twitter Outbox - all sent Tweets are visible here # CRUD Strings # Permissions-based #deletable = False, # ============================================================================= RESTful CRUD controller for the Inbox - all Inbound Messages are visible here # CRUD Strings # Permissions-based #deletable = False, # ----------------------------------------------------------------------------- RESTful CRUD controller for the Email Inbox - all Inbound Email Messages are visible here # CRUD Strings # Permissions-based #deletable = False, # ============================================================================= RESTful CRUD controller for RSS feed posts # To represent the description suitably # If it is an image display an image #table.description.represent = lambda description: HTML(description) # CRUD Strings # Permissions-based #deletable = False, # ----------------------------------------------------------------------------- RESTful CRUD controller for the SMS Inbox - all Inbound SMS Messages go here # CRUD Strings # Permissions-based #deletable = False, # ----------------------------------------------------------------------------- Twitter RESTful Controller @ToDo: Action Button to update async # ----------------------------------------------------------------------------- RESTful CRUD controller for the Twitter Inbox - all Inbound Tweets (Directed Messages) are visible here # CRUD Strings # ============================================================================= Receive a JSON POST from the Tropo WebAPI @see: https://www.tropo.com/docs/webapi/newhowitworks.htm # Stored in modules/tropo.py # This is their service contacting us, so parse their request # This is an Outbound message which we've requested Tropo to send for us # Send the message #t.message(say_obj={"say":{"value":row.message}},to=row.recipient,network=row.network) # Update status to sent in Outbox # @ToDo: Set message log to actioned #log = s3db.msg_log #db(log.id == row.message_id).update(actioned=True) # Clear the Scratchpad # This is an Inbound message # This is an SMS/IM # Place it in the InBox # SyntaxError: s.from => invalid syntax (why!?) # @ToDo: Update to new model #s3db.msg_log.insert(uuid=uuid, fromaddress=fromaddress, # recipient=recipient, message=message, # inbound=True) # Send the message to the parser # This is a Voice call # - we can't handle these yet # GET request or some random POST # ============================================================================= SMS Outbound Gateway selection for the messaging framework # CRUD Strings # ----------------------------------------------------------------------------- RESTful CRUD controller for Channels - unused # ----------------------------------------------------------------------------- RESTful CRUD controller for Inbound Email channels - appears in the administration menu # CRUD Strings # Normal Action Buttons # Custom Action Buttons for Enable/Disable # No Scheduler Running # ----------------------------------------------------------------------------- RESTful CRUD controller for Facebook channels - appears in the administration menu # CRUD Strings # Normal Action Buttons # Custom Action Buttons for Enable/Disable #if not s3task._is_alive(): # # No Scheduler Running # s3.actions += [{"label": s3_str(T("Poll")), # "restrict": restrict_d), # "url": URL(args = ["[id]", "poll"]), # "_class": "action-btn", # } # ] # ----------------------------------------------------------------------------- RESTful CRUD controller for Mobile Commons SMS Channels - appears in the administration menu # CRUD Strings # Normal Action Buttons # Custom Action Buttons for Enable/Disable # No Scheduler Running # ----------------------------------------------------------------------------- RESTful CRUD controller for Google Cloud Messaging Channels - appears in the administration menu # CRUD Strings # Normal Action Buttons # Custom Action Buttons for Enable/Disable #if not s3task._is_alive(): # No Scheduler Running # s3.actions += [{"label": s3_str(T("Poll")), # "restrict": restrict_d, # "url": URL(args = ["[id]", "poll"]), # "_class": "action-btn", # }, # ] # ----------------------------------------------------------------------------- RESTful CRUD controller for RSS channels - appears in the administration menu # CRUD Strings # Text # Normal Action Buttons # Custom Action Buttons for Enable/Disable # No Scheduler Running # ----------------------------------------------------------------------------- RESTful CRUD controller for Twilio SMS channels - appears in the administration menu # CRUD Strings # Normal Action Buttons # Custom Action Buttons for Enable/Disable # No Scheduler Running # ----------------------------------------------------------------------------- RESTful CRUD controller for modem channels - appears in the administration menu Multiple Modems can be configured to receive Inbound Messages # CRUD Strings #------------------------------------------------------------------------------ RESTful CRUD controller for SMTP to SMS Outbound channels - appears in the administration menu # CRUD Strings #------------------------------------------------------------------------------ RESTful CRUD controller for Web API channels - appears in the administration menu # CRUD Strings # ----------------------------------------------------------------------------- RESTful CRUD controller for Tropo channels - appears in the administration menu #table.token_voice.label = T("Tropo Voice Token") #table.token_voice.comment = DIV(DIV(_class="stickytip",_title=T("Tropo Voice Token") + "|" + T("The token associated with this application on") + " <a href='https://www.tropo.com/docs/scripting/troposessionapi.htm' target=_blank>Tropo.com</a>")) # CRUD Strings # ----------------------------------------------------------------------------- RESTful CRUD controller for Twitter channels - appears in the administration menu Only 1 of these normally in existence @ToDo: Don't enforce #try: # import tweepy #except: # session.error = T("tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!") # redirect(URL(c="admin", f="index")) # CRUD Strings # We're showing the form #table.pin.readable = True #table.pin.label = T("PIN number from Twitter (leave empty to detach account)") #table.pin.value = "" # Not showing form, no need for pin #table.pin.readable = False #table.pin.label = T("PIN") # won't be seen #table.pin.value = "" # but let's be on the safe side #s3.prep = prep # Post-process # Normal Action Buttons # Custom Action Buttons for Enable/Disable # No Scheduler Running #if isinstance(output, dict): # if r.http == "GET" and r.method in ("create", "update"): # rheader = A(T("Collect PIN from Twitter"), # _href = session.s3.twitter_oauth_url, # _target = "_blank") # output["rheader"] = rheader # ----------------------------------------------------------------------------- Inject a Search After Save checkbox in the Twitter Search Query Form # Unsupported # ----------------------------------------------------------------------------- Schedules Twitter query search immediately after save depending on flag # ----------------------------------------------------------------------------- RESTful CRUD controller to add keywords for Twitter Search # Tweak languages to those supported by Twitter # List according to Twitter 1.1 API https://dev.twitter.com/docs/api/1.1/get/help/languages # CRUD Strings # Normal Action Buttons # Custom Action Buttons # @ToDo: Make these S3Methods rather than additional controllers # ----------------------------------------------------------------------------- RESTful CRUD controller for Twitter Search Results. # CRUD Strings # Display the Tweet as an Embedded tweet # Tweet link # Themeable Throbber # Display throbber while Tweet loads # Configure Tweet display # Insert tweet # ----------------------------------------------------------------------------- RESTful CRUD controller for whitelisting senders. User can assign priority to senders. # CRUD Strings # ----------------------------------------------------------------------------- REST Controller # ----------------------------------------------------------------------------- RESTful CRUD controller for Parsers - appears in the administration menu # CRUD Strings # Dynamic lookup of the parsing functions in S3Parser class. # Filter out helper functions # Normal Action Buttons # Custom Action Buttons for Enable/Disable # No Scheduler Running # ============================================================================= # The following functions hook into the pr functions: # RESTful CRUD controller # Hide unnecessary fields # Do not show system groups # ----------------------------------------------------------------------------- RESTful CRUD controller # Hide unnecessary fields # ----------------------------------------------------------------------------- Allow the user to add, update and delete their contacts - seems to be unused (was called 'contact' & was broken) #ptable = s3db.pr_person # These fields will be populated automatically #table.id.readable = False # Add the person id to the record # Restrict update and delete access to contacts not owned by the user # ----------------------------------------------------------------------------- Do a search of groups which match a type - used for auto-completion # Not allowed # JQuery UI Autocomplete uses 'term' instead of 'value' # (old JQuery Autocomplete uses 'q' instead of 'value') # Call the search function # Encode in JSON # ----------------------------------------------------------------------------- Simplified output as-compared to pr_pentity_represent # ----------------------------------------------------------------------------- Search for People & Groups which match a search term # Shortcuts # We want to do case-insensitive searches # (default anyway on MySQL/SQLite, but not PostgreSQL) # Check Groups # Check Persons # First name # Middle name # Last name # ----------------------------------------------------------------------------- RESTful CRUD controller # ----------------------------------------------------------------------------- # Send Outbound Messages (was for being called via cron, now useful for debugging) # ----------------------------------------------------------------------------- Send Pending Email Messages # ----------------------------------------------------------------------------- Send Pending SMS Messages # ----------------------------------------------------------------------------- Send Pending Twitter Messages # ============================================================================= # Enabled only for testing: # Post to Facebook # Test the formstyle # Formstyle with separate row for label (e.g. default Eden formstyle) # Formstyle with just a single row (e.g. Bootstrap, Foundation or DRRPP) # Assume TRs # ============================================================================= # Enabled only for testing: # Post to Twitter # Test the formstyle # Formstyle with separate row for label (e.g. default Eden formstyle) # Formstyle with just a single row (e.g. Bootstrap, Foundation or DRRPP) # Assume TRs # ============================================================================= # Enabled only for testing: # RESTful CRUD controller # ============================================================================= # Enabled only for testing: # for x in range(0,len(E)): lx = list(E[x]) lx.append((nodetopic[E[x][0]] - nodetopic[E[x][1]] + 3)*100) E[x] = tuple(lx) #import networkx as nx #g = nx.Graph() #g.add_nodes_from(nodelabel) #layout = g.layout_kamada_kawai() #visual_style["vertex_color"] = [color_dict[gender] for gender in g.vs["gender"]] #visual_style["edge_width"] = [1 + 2 * int(len(is_formal)) for is_formal in g.vs["label"]] #plot(g, **visual_style) #c = g.clusters().subgraphs() #plot(g.community_fastgreedy().as_clustering(), layout=layout) #plot(g) #g.add_weighted_edges_from(E) #nx.relabel_nodes(g, nodelabel, copy=False) #nx.draw(g, node_size=100, font_size=8, edge_size=10000) #labels = nx.draw_networkx_labels(g,pos=nx.spring_layout(g),labels=nodelabel) #import matplotlib.pyplot as plt #plt.savefig('kg3.png', facecolor='w', edgecolor='w',orientation='portrait', papertype=None, format=None,transparent=False, bbox_inches=None, pad_inches=0.1) #plt.show() # END ================================================================================
| 2.257166
| 2
|
pywal/colors.py
|
ekkkkkknoes/pywal
| 1
|
6626429
|
"""
Generate a palette using various backends.
"""
import logging
import os
import random
import re
import sys
from . import theme
from . import util
from .settings import CACHE_DIR, MODULE_DIR, __cache_version__
def list_backends():
"""List color backends."""
return [b.name.replace(".py", "") for b in
os.scandir(os.path.join(MODULE_DIR, "backends"))
if "__" not in b.name]
def colors_to_dict(colors, img):
"""Convert list of colors to pywal format."""
return {
"wallpaper": img,
"alpha": util.Color.alpha_num,
"special": {
"background": colors[0],
"foreground": colors[15],
"cursor": colors[15]
},
"colors": {
"color0": colors[0],
"color1": colors[1],
"color2": colors[2],
"color3": colors[3],
"color4": colors[4],
"color5": colors[5],
"color6": colors[6],
"color7": colors[7],
"color8": colors[8],
"color9": colors[9],
"color10": colors[10],
"color11": colors[11],
"color12": colors[12],
"color13": colors[13],
"color14": colors[14],
"color15": colors[15]
}
}
def generic_adjust(colors, light):
"""Generic color adjustment for themers."""
if light:
for color in colors:
color = util.saturate_color(color, 0.60)
color = util.darken_color(color, 0.5)
colors[0] = util.lighten_color(colors[0], 0.95)
colors[7] = util.darken_color(colors[0], 0.75)
colors[8] = util.darken_color(colors[0], 0.25)
colors[15] = colors[7]
else:
colors[0] = util.darken_color(colors[0], 0.80)
colors[7] = util.lighten_color(colors[0], 0.75)
colors[8] = util.lighten_color(colors[0], 0.25)
colors[15] = colors[7]
return colors
def saturate_colors(colors, amount):
"""Saturate all colors."""
if amount and float(amount) <= 1.0:
for i, _ in enumerate(colors):
if i not in [0, 7, 8, 15]:
colors[i] = util.saturate_color(colors[i], float(amount))
return colors
def cache_fname(img, backend, light, cache_dir, sat=""):
"""Create the cache file name."""
color_type = "light" if light else "dark"
file_name = re.sub("[/|\\|.]", "_", img)
file_parts = [file_name, color_type, backend, sat, __cache_version__]
return [cache_dir, "schemes", "%s_%s_%s_%s_%s.json" % (*file_parts,)]
def get_backend(backend):
"""Figure out which backend to use."""
if backend == "random":
backends = list_backends()
random.shuffle(backends)
return backends[0]
return backend
def palette():
"""Generate a palette from the colors."""
for i in range(0, 16):
if i % 8 == 0:
print()
if i > 7:
i = "8;5;%s" % i
print("\033[4%sm%s\033[0m" % (i, " " * (80 // 20)), end="")
print("\n")
def get(img, light=False, backend="wal", cache_dir=CACHE_DIR, sat=""):
"""Generate a palette."""
# home_dylan_img_jpg_backend_1.2.2.json
cache_name = cache_fname(img, backend, light, cache_dir, sat)
cache_file = os.path.join(*cache_name)
if os.path.isfile(cache_file):
colors = theme.file(cache_file)
colors["alpha"] = util.Color.alpha_num
logging.info("Found cached colorscheme.")
else:
logging.info("Generating a colorscheme.")
backend = get_backend(backend)
# Dynamically import the backend we want to use.
# This keeps the dependencies "optional".
try:
__import__("pywal.backends.%s" % backend)
except ImportError:
__import__("pywal.backends.wal")
backend = "wal"
logging.info("Using %s backend.", backend)
backend = sys.modules["pywal.backends.%s" % backend]
colors = getattr(backend, "get")(img, light)
colors = colors_to_dict(saturate_colors(colors, sat), img)
util.save_file_json(colors, cache_file)
logging.info("Generation complete.")
return colors
def file(input_file):
"""Deprecated: symbolic link to --> theme.file"""
return theme.file(input_file)
|
"""
Generate a palette using various backends.
"""
import logging
import os
import random
import re
import sys
from . import theme
from . import util
from .settings import CACHE_DIR, MODULE_DIR, __cache_version__
def list_backends():
"""List color backends."""
return [b.name.replace(".py", "") for b in
os.scandir(os.path.join(MODULE_DIR, "backends"))
if "__" not in b.name]
def colors_to_dict(colors, img):
"""Convert list of colors to pywal format."""
return {
"wallpaper": img,
"alpha": util.Color.alpha_num,
"special": {
"background": colors[0],
"foreground": colors[15],
"cursor": colors[15]
},
"colors": {
"color0": colors[0],
"color1": colors[1],
"color2": colors[2],
"color3": colors[3],
"color4": colors[4],
"color5": colors[5],
"color6": colors[6],
"color7": colors[7],
"color8": colors[8],
"color9": colors[9],
"color10": colors[10],
"color11": colors[11],
"color12": colors[12],
"color13": colors[13],
"color14": colors[14],
"color15": colors[15]
}
}
def generic_adjust(colors, light):
"""Generic color adjustment for themers."""
if light:
for color in colors:
color = util.saturate_color(color, 0.60)
color = util.darken_color(color, 0.5)
colors[0] = util.lighten_color(colors[0], 0.95)
colors[7] = util.darken_color(colors[0], 0.75)
colors[8] = util.darken_color(colors[0], 0.25)
colors[15] = colors[7]
else:
colors[0] = util.darken_color(colors[0], 0.80)
colors[7] = util.lighten_color(colors[0], 0.75)
colors[8] = util.lighten_color(colors[0], 0.25)
colors[15] = colors[7]
return colors
def saturate_colors(colors, amount):
"""Saturate all colors."""
if amount and float(amount) <= 1.0:
for i, _ in enumerate(colors):
if i not in [0, 7, 8, 15]:
colors[i] = util.saturate_color(colors[i], float(amount))
return colors
def cache_fname(img, backend, light, cache_dir, sat=""):
"""Create the cache file name."""
color_type = "light" if light else "dark"
file_name = re.sub("[/|\\|.]", "_", img)
file_parts = [file_name, color_type, backend, sat, __cache_version__]
return [cache_dir, "schemes", "%s_%s_%s_%s_%s.json" % (*file_parts,)]
def get_backend(backend):
"""Figure out which backend to use."""
if backend == "random":
backends = list_backends()
random.shuffle(backends)
return backends[0]
return backend
def palette():
"""Generate a palette from the colors."""
for i in range(0, 16):
if i % 8 == 0:
print()
if i > 7:
i = "8;5;%s" % i
print("\033[4%sm%s\033[0m" % (i, " " * (80 // 20)), end="")
print("\n")
def get(img, light=False, backend="wal", cache_dir=CACHE_DIR, sat=""):
"""Generate a palette."""
# home_dylan_img_jpg_backend_1.2.2.json
cache_name = cache_fname(img, backend, light, cache_dir, sat)
cache_file = os.path.join(*cache_name)
if os.path.isfile(cache_file):
colors = theme.file(cache_file)
colors["alpha"] = util.Color.alpha_num
logging.info("Found cached colorscheme.")
else:
logging.info("Generating a colorscheme.")
backend = get_backend(backend)
# Dynamically import the backend we want to use.
# This keeps the dependencies "optional".
try:
__import__("pywal.backends.%s" % backend)
except ImportError:
__import__("pywal.backends.wal")
backend = "wal"
logging.info("Using %s backend.", backend)
backend = sys.modules["pywal.backends.%s" % backend]
colors = getattr(backend, "get")(img, light)
colors = colors_to_dict(saturate_colors(colors, sat), img)
util.save_file_json(colors, cache_file)
logging.info("Generation complete.")
return colors
def file(input_file):
"""Deprecated: symbolic link to --> theme.file"""
return theme.file(input_file)
|
en
| 0.709948
|
Generate a palette using various backends. List color backends. Convert list of colors to pywal format. Generic color adjustment for themers. Saturate all colors. Create the cache file name. Figure out which backend to use. Generate a palette from the colors. Generate a palette. # home_dylan_img_jpg_backend_1.2.2.json # Dynamically import the backend we want to use. # This keeps the dependencies "optional". Deprecated: symbolic link to --> theme.file
| 2.485282
| 2
|
setup.py
|
dario-alv-bubbling/bubbling_firebase_authentication
| 0
|
6626430
|
from setuptools import setup
install_requires = [
"firebase-admin>=4.4.0"
]
setup(
install_requires=install_requires,
)
|
from setuptools import setup
install_requires = [
"firebase-admin>=4.4.0"
]
setup(
install_requires=install_requires,
)
|
none
| 1
| 1.097252
| 1
|
|
deploy/pptracking/python/visualize.py
|
leakyH/PaddleDetection
| 3
|
6626431
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import os
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from collections import deque
def visualize_box_mask(im, results, labels, threshold=0.5):
"""
Args:
im (str/np.ndarray): path of image/np.ndarray read by cv2
results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
labels (list): labels:['class1', ..., 'classn']
threshold (float): Threshold of score.
Returns:
im (PIL.Image.Image): visualized image
"""
if isinstance(im, str):
im = Image.open(im).convert('RGB')
else:
im = Image.fromarray(im)
if 'boxes' in results and len(results['boxes']) > 0:
im = draw_box(im, results['boxes'], labels, threshold=threshold)
return im
def get_color_map_list(num_classes):
"""
Args:
num_classes (int): number of class
Returns:
color_map (list): RGB color list
"""
color_map = num_classes * [0, 0, 0]
for i in range(0, num_classes):
j = 0
lab = i
while lab:
color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))
color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))
color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))
j += 1
lab >>= 3
color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)]
return color_map
def draw_box(im, np_boxes, labels, threshold=0.5):
"""
Args:
im (PIL.Image.Image): PIL image
np_boxes (np.ndarray): shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
labels (list): labels:['class1', ..., 'classn']
threshold (float): threshold of box
Returns:
im (PIL.Image.Image): visualized image
"""
draw_thickness = min(im.size) // 320
draw = ImageDraw.Draw(im)
clsid2color = {}
color_list = get_color_map_list(len(labels))
expect_boxes = (np_boxes[:, 1] > threshold) & (np_boxes[:, 0] > -1)
np_boxes = np_boxes[expect_boxes, :]
for dt in np_boxes:
clsid, bbox, score = int(dt[0]), dt[2:], dt[1]
if clsid not in clsid2color:
clsid2color[clsid] = color_list[clsid]
color = tuple(clsid2color[clsid])
if len(bbox) == 4:
xmin, ymin, xmax, ymax = bbox
print('class_id:{:d}, confidence:{:.4f}, left_top:[{:.2f},{:.2f}],'
'right_bottom:[{:.2f},{:.2f}]'.format(
int(clsid), score, xmin, ymin, xmax, ymax))
# draw bbox
draw.line(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
(xmin, ymin)],
width=draw_thickness,
fill=color)
elif len(bbox) == 8:
x1, y1, x2, y2, x3, y3, x4, y4 = bbox
draw.line(
[(x1, y1), (x2, y2), (x3, y3), (x4, y4), (x1, y1)],
width=2,
fill=color)
xmin = min(x1, x2, x3, x4)
ymin = min(y1, y2, y3, y4)
# draw label
text = "{} {:.4f}".format(labels[clsid], score)
tw, th = draw.textsize(text)
draw.rectangle(
[(xmin + 1, ymin - th), (xmin + tw + 1, ymin)], fill=color)
draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255))
return im
def get_color(idx):
idx = idx * 3
color = ((37 * idx) % 255, (17 * idx) % 255, (29 * idx) % 255)
return color
def plot_tracking(image,
tlwhs,
obj_ids,
scores=None,
frame_id=0,
fps=0.,
ids2names=[],
do_entrance_counting=False,
entrance=None):
im = np.ascontiguousarray(np.copy(image))
im_h, im_w = im.shape[:2]
text_scale = max(1, image.shape[1] / 1600.)
text_thickness = 2
line_thickness = max(1, int(image.shape[1] / 500.))
cv2.putText(
im,
'frame: %d fps: %.2f num: %d' % (frame_id, fps, len(tlwhs)),
(0, int(15 * text_scale)),
cv2.FONT_HERSHEY_PLAIN,
text_scale, (0, 0, 255),
thickness=2)
for i, tlwh in enumerate(tlwhs):
x1, y1, w, h = tlwh
intbox = tuple(map(int, (x1, y1, x1 + w, y1 + h)))
obj_id = int(obj_ids[i])
id_text = '{}'.format(int(obj_id))
if ids2names != []:
assert len(
ids2names) == 1, "plot_tracking only supports single classes."
id_text = '{}_'.format(ids2names[0]) + id_text
_line_thickness = 1 if obj_id <= 0 else line_thickness
color = get_color(abs(obj_id))
cv2.rectangle(
im, intbox[0:2], intbox[2:4], color=color, thickness=line_thickness)
cv2.putText(
im,
id_text, (intbox[0], intbox[1] - 10),
cv2.FONT_HERSHEY_PLAIN,
text_scale, (0, 0, 255),
thickness=text_thickness)
if scores is not None:
text = '{:.2f}'.format(float(scores[i]))
cv2.putText(
im,
text, (intbox[0], intbox[1] + 10),
cv2.FONT_HERSHEY_PLAIN,
text_scale, (0, 255, 255),
thickness=text_thickness)
if do_entrance_counting:
entrance_line = tuple(map(int, entrance))
cv2.rectangle(
im,
entrance_line[0:2],
entrance_line[2:4],
color=(0, 255, 255),
thickness=line_thickness)
return im
def plot_tracking_dict(image,
num_classes,
tlwhs_dict,
obj_ids_dict,
scores_dict,
frame_id=0,
fps=0.,
ids2names=[],
do_entrance_counting=False,
entrance=None,
records=None,
center_traj=None):
im = np.ascontiguousarray(np.copy(image))
im_h, im_w = im.shape[:2]
text_scale = max(1, image.shape[1] / 1600.)
text_thickness = 2
line_thickness = max(1, int(image.shape[1] / 500.))
if num_classes == 1:
if records is not None:
start = records[-1].find('Total')
end = records[-1].find('In')
cv2.putText(
im,
records[-1][start:end], (0, int(40 * text_scale)),
cv2.FONT_HERSHEY_PLAIN,
text_scale, (0, 0, 255),
thickness=2)
if num_classes == 1 and do_entrance_counting:
entrance_line = tuple(map(int, entrance))
cv2.rectangle(
im,
entrance_line[0:2],
entrance_line[2:4],
color=(0, 255, 255),
thickness=line_thickness)
# find start location for entrance counting data
start = records[-1].find('In')
cv2.putText(
im,
records[-1][start:-1], (0, int(60 * text_scale)),
cv2.FONT_HERSHEY_PLAIN,
text_scale, (0, 0, 255),
thickness=2)
for cls_id in range(num_classes):
tlwhs = tlwhs_dict[cls_id]
obj_ids = obj_ids_dict[cls_id]
scores = scores_dict[cls_id]
cv2.putText(
im,
'frame: %d fps: %.2f num: %d' % (frame_id, fps, len(tlwhs)),
(0, int(15 * text_scale)),
cv2.FONT_HERSHEY_PLAIN,
text_scale, (0, 0, 255),
thickness=2)
record_id = set()
for i, tlwh in enumerate(tlwhs):
x1, y1, w, h = tlwh
intbox = tuple(map(int, (x1, y1, x1 + w, y1 + h)))
center = tuple(map(int, (x1 + w / 2., y1 + h / 2.)))
obj_id = int(obj_ids[i])
if center_traj is not None:
record_id.add(obj_id)
if obj_id not in center_traj[cls_id]:
center_traj[cls_id][obj_id] = deque(maxlen=30)
center_traj[cls_id][obj_id].append(center)
id_text = '{}'.format(int(obj_id))
if ids2names != []:
id_text = '{}_{}'.format(ids2names[cls_id], id_text)
else:
id_text = 'class{}_{}'.format(cls_id, id_text)
_line_thickness = 1 if obj_id <= 0 else line_thickness
color = get_color(abs(obj_id))
cv2.rectangle(
im,
intbox[0:2],
intbox[2:4],
color=color,
thickness=line_thickness)
cv2.putText(
im,
id_text, (intbox[0], intbox[1] - 10),
cv2.FONT_HERSHEY_PLAIN,
text_scale, (0, 0, 255),
thickness=text_thickness)
if scores is not None:
text = '{:.2f}'.format(float(scores[i]))
cv2.putText(
im,
text, (intbox[0], intbox[1] + 10),
cv2.FONT_HERSHEY_PLAIN,
text_scale, (0, 255, 255),
thickness=text_thickness)
if center_traj is not None:
for traj in center_traj:
for i in traj.keys():
if i not in record_id:
continue
for point in traj[i]:
cv2.circle(im, point, 3, (0, 0, 255), -1)
return im
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import os
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from collections import deque
def visualize_box_mask(im, results, labels, threshold=0.5):
"""
Args:
im (str/np.ndarray): path of image/np.ndarray read by cv2
results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
labels (list): labels:['class1', ..., 'classn']
threshold (float): Threshold of score.
Returns:
im (PIL.Image.Image): visualized image
"""
if isinstance(im, str):
im = Image.open(im).convert('RGB')
else:
im = Image.fromarray(im)
if 'boxes' in results and len(results['boxes']) > 0:
im = draw_box(im, results['boxes'], labels, threshold=threshold)
return im
def get_color_map_list(num_classes):
"""
Args:
num_classes (int): number of class
Returns:
color_map (list): RGB color list
"""
color_map = num_classes * [0, 0, 0]
for i in range(0, num_classes):
j = 0
lab = i
while lab:
color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))
color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))
color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))
j += 1
lab >>= 3
color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)]
return color_map
def draw_box(im, np_boxes, labels, threshold=0.5):
"""
Args:
im (PIL.Image.Image): PIL image
np_boxes (np.ndarray): shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
labels (list): labels:['class1', ..., 'classn']
threshold (float): threshold of box
Returns:
im (PIL.Image.Image): visualized image
"""
draw_thickness = min(im.size) // 320
draw = ImageDraw.Draw(im)
clsid2color = {}
color_list = get_color_map_list(len(labels))
expect_boxes = (np_boxes[:, 1] > threshold) & (np_boxes[:, 0] > -1)
np_boxes = np_boxes[expect_boxes, :]
for dt in np_boxes:
clsid, bbox, score = int(dt[0]), dt[2:], dt[1]
if clsid not in clsid2color:
clsid2color[clsid] = color_list[clsid]
color = tuple(clsid2color[clsid])
if len(bbox) == 4:
xmin, ymin, xmax, ymax = bbox
print('class_id:{:d}, confidence:{:.4f}, left_top:[{:.2f},{:.2f}],'
'right_bottom:[{:.2f},{:.2f}]'.format(
int(clsid), score, xmin, ymin, xmax, ymax))
# draw bbox
draw.line(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
(xmin, ymin)],
width=draw_thickness,
fill=color)
elif len(bbox) == 8:
x1, y1, x2, y2, x3, y3, x4, y4 = bbox
draw.line(
[(x1, y1), (x2, y2), (x3, y3), (x4, y4), (x1, y1)],
width=2,
fill=color)
xmin = min(x1, x2, x3, x4)
ymin = min(y1, y2, y3, y4)
# draw label
text = "{} {:.4f}".format(labels[clsid], score)
tw, th = draw.textsize(text)
draw.rectangle(
[(xmin + 1, ymin - th), (xmin + tw + 1, ymin)], fill=color)
draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255))
return im
def get_color(idx):
idx = idx * 3
color = ((37 * idx) % 255, (17 * idx) % 255, (29 * idx) % 255)
return color
def plot_tracking(image,
tlwhs,
obj_ids,
scores=None,
frame_id=0,
fps=0.,
ids2names=[],
do_entrance_counting=False,
entrance=None):
im = np.ascontiguousarray(np.copy(image))
im_h, im_w = im.shape[:2]
text_scale = max(1, image.shape[1] / 1600.)
text_thickness = 2
line_thickness = max(1, int(image.shape[1] / 500.))
cv2.putText(
im,
'frame: %d fps: %.2f num: %d' % (frame_id, fps, len(tlwhs)),
(0, int(15 * text_scale)),
cv2.FONT_HERSHEY_PLAIN,
text_scale, (0, 0, 255),
thickness=2)
for i, tlwh in enumerate(tlwhs):
x1, y1, w, h = tlwh
intbox = tuple(map(int, (x1, y1, x1 + w, y1 + h)))
obj_id = int(obj_ids[i])
id_text = '{}'.format(int(obj_id))
if ids2names != []:
assert len(
ids2names) == 1, "plot_tracking only supports single classes."
id_text = '{}_'.format(ids2names[0]) + id_text
_line_thickness = 1 if obj_id <= 0 else line_thickness
color = get_color(abs(obj_id))
cv2.rectangle(
im, intbox[0:2], intbox[2:4], color=color, thickness=line_thickness)
cv2.putText(
im,
id_text, (intbox[0], intbox[1] - 10),
cv2.FONT_HERSHEY_PLAIN,
text_scale, (0, 0, 255),
thickness=text_thickness)
if scores is not None:
text = '{:.2f}'.format(float(scores[i]))
cv2.putText(
im,
text, (intbox[0], intbox[1] + 10),
cv2.FONT_HERSHEY_PLAIN,
text_scale, (0, 255, 255),
thickness=text_thickness)
if do_entrance_counting:
entrance_line = tuple(map(int, entrance))
cv2.rectangle(
im,
entrance_line[0:2],
entrance_line[2:4],
color=(0, 255, 255),
thickness=line_thickness)
return im
def plot_tracking_dict(image,
num_classes,
tlwhs_dict,
obj_ids_dict,
scores_dict,
frame_id=0,
fps=0.,
ids2names=[],
do_entrance_counting=False,
entrance=None,
records=None,
center_traj=None):
im = np.ascontiguousarray(np.copy(image))
im_h, im_w = im.shape[:2]
text_scale = max(1, image.shape[1] / 1600.)
text_thickness = 2
line_thickness = max(1, int(image.shape[1] / 500.))
if num_classes == 1:
if records is not None:
start = records[-1].find('Total')
end = records[-1].find('In')
cv2.putText(
im,
records[-1][start:end], (0, int(40 * text_scale)),
cv2.FONT_HERSHEY_PLAIN,
text_scale, (0, 0, 255),
thickness=2)
if num_classes == 1 and do_entrance_counting:
entrance_line = tuple(map(int, entrance))
cv2.rectangle(
im,
entrance_line[0:2],
entrance_line[2:4],
color=(0, 255, 255),
thickness=line_thickness)
# find start location for entrance counting data
start = records[-1].find('In')
cv2.putText(
im,
records[-1][start:-1], (0, int(60 * text_scale)),
cv2.FONT_HERSHEY_PLAIN,
text_scale, (0, 0, 255),
thickness=2)
for cls_id in range(num_classes):
tlwhs = tlwhs_dict[cls_id]
obj_ids = obj_ids_dict[cls_id]
scores = scores_dict[cls_id]
cv2.putText(
im,
'frame: %d fps: %.2f num: %d' % (frame_id, fps, len(tlwhs)),
(0, int(15 * text_scale)),
cv2.FONT_HERSHEY_PLAIN,
text_scale, (0, 0, 255),
thickness=2)
record_id = set()
for i, tlwh in enumerate(tlwhs):
x1, y1, w, h = tlwh
intbox = tuple(map(int, (x1, y1, x1 + w, y1 + h)))
center = tuple(map(int, (x1 + w / 2., y1 + h / 2.)))
obj_id = int(obj_ids[i])
if center_traj is not None:
record_id.add(obj_id)
if obj_id not in center_traj[cls_id]:
center_traj[cls_id][obj_id] = deque(maxlen=30)
center_traj[cls_id][obj_id].append(center)
id_text = '{}'.format(int(obj_id))
if ids2names != []:
id_text = '{}_{}'.format(ids2names[cls_id], id_text)
else:
id_text = 'class{}_{}'.format(cls_id, id_text)
_line_thickness = 1 if obj_id <= 0 else line_thickness
color = get_color(abs(obj_id))
cv2.rectangle(
im,
intbox[0:2],
intbox[2:4],
color=color,
thickness=line_thickness)
cv2.putText(
im,
id_text, (intbox[0], intbox[1] - 10),
cv2.FONT_HERSHEY_PLAIN,
text_scale, (0, 0, 255),
thickness=text_thickness)
if scores is not None:
text = '{:.2f}'.format(float(scores[i]))
cv2.putText(
im,
text, (intbox[0], intbox[1] + 10),
cv2.FONT_HERSHEY_PLAIN,
text_scale, (0, 255, 255),
thickness=text_thickness)
if center_traj is not None:
for traj in center_traj:
for i in traj.keys():
if i not in record_id:
continue
for point in traj[i]:
cv2.circle(im, point, 3, (0, 0, 255), -1)
return im
|
en
| 0.680171
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Args: im (str/np.ndarray): path of image/np.ndarray read by cv2 results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box, matix element:[class, score, x_min, y_min, x_max, y_max] labels (list): labels:['class1', ..., 'classn'] threshold (float): Threshold of score. Returns: im (PIL.Image.Image): visualized image Args: num_classes (int): number of class Returns: color_map (list): RGB color list Args: im (PIL.Image.Image): PIL image np_boxes (np.ndarray): shape:[N,6], N: number of box, matix element:[class, score, x_min, y_min, x_max, y_max] labels (list): labels:['class1', ..., 'classn'] threshold (float): threshold of box Returns: im (PIL.Image.Image): visualized image # draw bbox # draw label # find start location for entrance counting data
| 2.409857
| 2
|
docs/examples/opentelemetry-example-app/src/opentelemetry_example_app/grpc/hello_world_server.py
|
gky360/opentelemetry-python
| 0
|
6626432
|
#!/usr/bin/env python
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=import-error
"""The Python implementation of the GRPC helloworld.Greeter server.
Note that you need ``opentelemetry-ext-grpc`` and ``protobuf`` to be installed
to run these examples. To run this script in the context of the example app,
install ``opentelemetry-example-app``::
pip install -e ext/opentelemetry-ext-grpc/
pip install -e docs/examples/opentelemetry-example-app
Then run the server in one shell::
python -m opentelemetry_example_app.grpc.hello_world_client
and the client in another::
python -m opentelemetry_example_app.grpc.hello_world_server
See also:
https://github.com/grpc/grpc/blob/master/examples/python/helloworld/greeter_server.py
"""
import logging
from concurrent import futures
import grpc
from opentelemetry import trace
from opentelemetry.ext.grpc import server_interceptor
from opentelemetry.ext.grpc.grpcext import intercept_server
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import (
ConsoleSpanExporter,
SimpleExportSpanProcessor,
)
try:
# Relative imports should work in the context of the package, e.g.:
# `python -m opentelemetry_example_app.grpc.hello_world_server`.
from .gen import helloworld_pb2, helloworld_pb2_grpc
except ImportError:
# This will fail when running the file as a script, e.g.:
# `./hello_world_server.py`
# fall back to importing from the same directory in this case.
from gen import helloworld_pb2, helloworld_pb2_grpc
trace.set_tracer_provider(TracerProvider())
trace.get_tracer_provider().add_span_processor(
SimpleExportSpanProcessor(ConsoleSpanExporter())
)
tracer = trace.get_tracer(__name__)
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, context):
return helloworld_pb2.HelloReply(message="Hello, %s!" % request.name)
def serve():
server = grpc.server(futures.ThreadPoolExecutor())
server = intercept_server(server, server_interceptor(tracer))
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
server.add_insecure_port("[::]:50051")
server.start()
server.wait_for_termination()
if __name__ == "__main__":
logging.basicConfig()
serve()
|
#!/usr/bin/env python
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=import-error
"""The Python implementation of the GRPC helloworld.Greeter server.
Note that you need ``opentelemetry-ext-grpc`` and ``protobuf`` to be installed
to run these examples. To run this script in the context of the example app,
install ``opentelemetry-example-app``::
pip install -e ext/opentelemetry-ext-grpc/
pip install -e docs/examples/opentelemetry-example-app
Then run the server in one shell::
python -m opentelemetry_example_app.grpc.hello_world_client
and the client in another::
python -m opentelemetry_example_app.grpc.hello_world_server
See also:
https://github.com/grpc/grpc/blob/master/examples/python/helloworld/greeter_server.py
"""
import logging
from concurrent import futures
import grpc
from opentelemetry import trace
from opentelemetry.ext.grpc import server_interceptor
from opentelemetry.ext.grpc.grpcext import intercept_server
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import (
ConsoleSpanExporter,
SimpleExportSpanProcessor,
)
try:
# Relative imports should work in the context of the package, e.g.:
# `python -m opentelemetry_example_app.grpc.hello_world_server`.
from .gen import helloworld_pb2, helloworld_pb2_grpc
except ImportError:
# This will fail when running the file as a script, e.g.:
# `./hello_world_server.py`
# fall back to importing from the same directory in this case.
from gen import helloworld_pb2, helloworld_pb2_grpc
trace.set_tracer_provider(TracerProvider())
trace.get_tracer_provider().add_span_processor(
SimpleExportSpanProcessor(ConsoleSpanExporter())
)
tracer = trace.get_tracer(__name__)
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, context):
return helloworld_pb2.HelloReply(message="Hello, %s!" % request.name)
def serve():
server = grpc.server(futures.ThreadPoolExecutor())
server = intercept_server(server, server_interceptor(tracer))
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
server.add_insecure_port("[::]:50051")
server.start()
server.wait_for_termination()
if __name__ == "__main__":
logging.basicConfig()
serve()
|
en
| 0.667285
|
#!/usr/bin/env python # Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=import-error The Python implementation of the GRPC helloworld.Greeter server. Note that you need ``opentelemetry-ext-grpc`` and ``protobuf`` to be installed to run these examples. To run this script in the context of the example app, install ``opentelemetry-example-app``:: pip install -e ext/opentelemetry-ext-grpc/ pip install -e docs/examples/opentelemetry-example-app Then run the server in one shell:: python -m opentelemetry_example_app.grpc.hello_world_client and the client in another:: python -m opentelemetry_example_app.grpc.hello_world_server See also: https://github.com/grpc/grpc/blob/master/examples/python/helloworld/greeter_server.py # Relative imports should work in the context of the package, e.g.: # `python -m opentelemetry_example_app.grpc.hello_world_server`. # This will fail when running the file as a script, e.g.: # `./hello_world_server.py` # fall back to importing from the same directory in this case.
| 1.785465
| 2
|
general/views.py
|
VladaDidko/skill-
| 0
|
6626433
|
<gh_stars>0
from django.shortcuts import render
from blog.models import Category, Post
from django.http import HttpResponse
from users.models import Profile, Follower
from django.contrib.auth.models import User
def home(request):
current_user = request.user
followers = Follower.objects.all().filter(follower__in=User.objects.filter(id=current_user.id))
context = {
'categories': Category.objects.all(),
'posts': Post.objects.all().order_by('-published_date'),
'followers': followers
}
return render(request, 'general/home.html', context)
def users(request):
context = {
'users': User.objects.all(),
'profiles': Profile.objects.all(),
'followers': Follower.objects.all(),
}
return render(request, 'general/people.html', context)
def followers(request):
current_user = request.user
followers = Follower.objects.all().filter(following__in=User.objects.filter(id=current_user.id))
context = {
'followers': followers
}
return render(request, 'general/followers.html', context)
def following(request):
current_user = request.user
followers = Follower.objects.all().filter(follower__in=User.objects.filter(id=current_user.id))
context = {
'followers': followers
}
return render(request, 'general/following.html', context)
def update(request, pk):
following = User.objects.get(id=pk)
follower = Follower(follower=request.user, following=following)
follower.save()
context = {
'users': User.objects.all(),
'profiles': Profile.objects.all(),
'followers': Follower.objects.all(),
}
return render(request, 'general/people.html', context)
|
from django.shortcuts import render
from blog.models import Category, Post
from django.http import HttpResponse
from users.models import Profile, Follower
from django.contrib.auth.models import User
def home(request):
current_user = request.user
followers = Follower.objects.all().filter(follower__in=User.objects.filter(id=current_user.id))
context = {
'categories': Category.objects.all(),
'posts': Post.objects.all().order_by('-published_date'),
'followers': followers
}
return render(request, 'general/home.html', context)
def users(request):
context = {
'users': User.objects.all(),
'profiles': Profile.objects.all(),
'followers': Follower.objects.all(),
}
return render(request, 'general/people.html', context)
def followers(request):
current_user = request.user
followers = Follower.objects.all().filter(following__in=User.objects.filter(id=current_user.id))
context = {
'followers': followers
}
return render(request, 'general/followers.html', context)
def following(request):
current_user = request.user
followers = Follower.objects.all().filter(follower__in=User.objects.filter(id=current_user.id))
context = {
'followers': followers
}
return render(request, 'general/following.html', context)
def update(request, pk):
following = User.objects.get(id=pk)
follower = Follower(follower=request.user, following=following)
follower.save()
context = {
'users': User.objects.all(),
'profiles': Profile.objects.all(),
'followers': Follower.objects.all(),
}
return render(request, 'general/people.html', context)
|
none
| 1
| 2.113439
| 2
|
|
oneview_redfish_toolkit/api/processor.py
|
AgneshKumar/oneview-redfish-toolkit
| 19
|
6626434
|
<filename>oneview_redfish_toolkit/api/processor.py
# -*- coding: utf-8 -*-
# Copyright (2018) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oneview_redfish_toolkit.api.redfish_json_validator \
import RedfishJsonValidator
from oneview_redfish_toolkit.api.resource_block_collection \
import ResourceBlockCollection
import oneview_redfish_toolkit.api.status_mapping as status_mapping
class Processor(RedfishJsonValidator):
"""Creates a Processor Redfish dict
Populates self.redfish with some hardcoded Processor
values and data retrieved from Oneview.
"""
SCHEMA_NAME = 'Processor'
def __init__(self, server_hardware, processor_id):
"""Processor constructor
Populates self.redfish with the some common contents
and data from OneView server hardware.
Args:
server_hardware: server hardware dict from OneView
processor_id: processor identifier
"""
super().__init__(self.SCHEMA_NAME)
self.redfish["@odata.type"] = self.get_odata_type()
self.redfish["Id"] = processor_id
self.redfish["Name"] = "Processor " + processor_id
self.redfish["Status"] = dict()
state, health = status_mapping.\
get_redfish_server_hardware_status_struct(server_hardware)
self.redfish["Status"]["State"] = state
self.redfish["Status"]["Health"] = health
self.redfish["ProcessorType"] = "CPU"
self.redfish["Model"] = server_hardware["processorType"]
self.redfish["MaxSpeedMHz"] = server_hardware["processorSpeedMhz"]
self.redfish["TotalCores"] = server_hardware["processorCoreCount"]
self._fill_links(server_hardware)
self.redfish["@odata.context"] = \
"/redfish/v1/$metadata#Processor.Processor"
self.redfish["@odata.id"] = \
ResourceBlockCollection.BASE_URI + "/" \
+ server_hardware["uuid"] + "/Systems/1/Processors/" + processor_id
self._validate()
def _fill_links(self, server_hardware):
self.redfish["Links"] = dict()
self.redfish["Links"]["Chassis"] = dict()
self.redfish["Links"]["Chassis"]["@odata.id"] = \
"/redfish/v1/Chassis/" + server_hardware["uuid"]
|
<filename>oneview_redfish_toolkit/api/processor.py
# -*- coding: utf-8 -*-
# Copyright (2018) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oneview_redfish_toolkit.api.redfish_json_validator \
import RedfishJsonValidator
from oneview_redfish_toolkit.api.resource_block_collection \
import ResourceBlockCollection
import oneview_redfish_toolkit.api.status_mapping as status_mapping
class Processor(RedfishJsonValidator):
"""Creates a Processor Redfish dict
Populates self.redfish with some hardcoded Processor
values and data retrieved from Oneview.
"""
SCHEMA_NAME = 'Processor'
def __init__(self, server_hardware, processor_id):
"""Processor constructor
Populates self.redfish with the some common contents
and data from OneView server hardware.
Args:
server_hardware: server hardware dict from OneView
processor_id: processor identifier
"""
super().__init__(self.SCHEMA_NAME)
self.redfish["@odata.type"] = self.get_odata_type()
self.redfish["Id"] = processor_id
self.redfish["Name"] = "Processor " + processor_id
self.redfish["Status"] = dict()
state, health = status_mapping.\
get_redfish_server_hardware_status_struct(server_hardware)
self.redfish["Status"]["State"] = state
self.redfish["Status"]["Health"] = health
self.redfish["ProcessorType"] = "CPU"
self.redfish["Model"] = server_hardware["processorType"]
self.redfish["MaxSpeedMHz"] = server_hardware["processorSpeedMhz"]
self.redfish["TotalCores"] = server_hardware["processorCoreCount"]
self._fill_links(server_hardware)
self.redfish["@odata.context"] = \
"/redfish/v1/$metadata#Processor.Processor"
self.redfish["@odata.id"] = \
ResourceBlockCollection.BASE_URI + "/" \
+ server_hardware["uuid"] + "/Systems/1/Processors/" + processor_id
self._validate()
def _fill_links(self, server_hardware):
self.redfish["Links"] = dict()
self.redfish["Links"]["Chassis"] = dict()
self.redfish["Links"]["Chassis"]["@odata.id"] = \
"/redfish/v1/Chassis/" + server_hardware["uuid"]
|
en
| 0.804989
|
# -*- coding: utf-8 -*- # Copyright (2018) Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Creates a Processor Redfish dict Populates self.redfish with some hardcoded Processor values and data retrieved from Oneview. Processor constructor Populates self.redfish with the some common contents and data from OneView server hardware. Args: server_hardware: server hardware dict from OneView processor_id: processor identifier #Processor.Processor"
| 2.282249
| 2
|
app/backend/wells/migrations/0125_refactor_inline_sql_export_wells_water_1545.py
|
bcgov/gwells
| 37
|
6626435
|
<filename>app/backend/wells/migrations/0125_refactor_inline_sql_export_wells_water_1545.py<gh_stars>10-100
# Generated by Django 2.2.18 on 2021-03-22 18:37
from django.db import migrations
"""
Well V1
note on extra joins:
well_licences: any well having at least 1 licence entry will be marked as Licensed.
"""
CREATE_EXPORT_WELL_VIEW_SQL_V1 = """
create view export_well_v1_view as
select
well.well_tag_number as well_tag_number,
identification_plate_number as identification_plate_number,
well_identification_plate_attached as well_identification_plate_attached,
well_status_code as well_status_code,
well.well_class_code as well_class_code,
wsc.well_subclass_code as well_subclass,
CASE WHEN licence_q.cur_licences > 0 THEN 'LICENSED' ELSE 'UNLICENSED' END as licenced_status_code,
intended_water_use_code as intended_water_use_code,
observation_well_number as observation_well_number,
obs_well_status_code as obs_well_status_code,
water_supply_system_name as water_supply_system_name,
water_supply_system_well_name as water_supply_system_well_name,
well.street_address as street_address,
well.city as city,
legal_lot as legal_lot,
legal_plan as legal_plan,
legal_district_lot as legal_district_lot,
legal_block as legal_block,
legal_section as legal_section,
legal_township as legal_township,
legal_range as legal_range,
land_district_code as land_district_code,
legal_pid as legal_pid,
well_location_description as well_location_description,
st_y(geom) as latitude,
st_x(geom) as longitude,
utm_zone_code as utm_zone_code,
utm_northing as utm_northing,
utm_easting as utm_easting,
coordinate_acquisition_code as coordinate_acquisition_code,
bcgs_id as bcgs_id,
construction_start_date as construction_start_date,
construction_end_date as construction_end_date,
alteration_start_date as alteration_start_date,
alteration_end_date as alteration_end_date,
decommission_start_date as decommission_start_date,
decommission_end_date as decommission_end_date,
driller_name as driller_name,
consultant_name as consultant_name,
consultant_company as consultant_company,
diameter as diameter,
total_depth_drilled as total_depth_drilled,
finished_well_depth as finished_well_depth,
final_casing_stick_up as final_casing_stick_up,
bedrock_depth as bedrock_depth,
ground_elevation as ground_elevation,
ground_elevation_method_code as ground_elevation_method_code,
static_water_level as static_water_level,
well_yield as well_yield,
well_yield_unit_code as well_yield_unit_code,
artesian_flow as artesian_flow,
artesian_pressure as artesian_pressure,
well_cap_type as well_cap_type,
well_disinfected_code as well_disinfected_code,
well_orientation_code as well_orientation_code,
alternative_specs_submitted as alternative_specs_submitted,
surface_seal_material_code as surface_seal_material_code,
surface_seal_method_code as surface_seal_method_code,
surface_seal_length as surface_seal_length,
surface_seal_depth as surface_seal_depth,
backfill_type as backfill_type,
backfill_depth as backfill_depth,
liner_material_code as liner_material_code,
liner_diameter as liner_diameter,
liner_thickness as liner_thickness,
surface_seal_thickness as surface_seal_thickness,
liner_from as liner_from,
liner_to as liner_to,
screen_intake_method_code as screen_intake_method_code,
screen_type_code as screen_type_code,
screen_material_code as screen_material_code,
other_screen_material as other_screen_material,
screen_information as screen_information,
screen_opening_code as screen_opening_code,
screen_bottom_code as screen_bottom_code,
other_screen_bottom as other_screen_bottom,
filter_pack_from as filter_pack_from,
filter_pack_to as filter_pack_to,
filter_pack_material_code as filter_pack_material_code,
filter_pack_thickness as filter_pack_thickness,
filter_pack_material_size_code as filter_pack_material_size_code,
development_hours as development_hours,
development_notes as development_notes,
water_quality_colour as water_quality_colour,
water_quality_odour as water_quality_odour,
yield_estimation_method_code as yield_estimation_method_code,
yield_estimation_rate as yield_estimation_rate,
yield_estimation_duration as yield_estimation_duration,
static_level_before_test as static_level_before_test,
drawdown as drawdown,
hydro_fracturing_performed as hydro_fracturing_performed,
hydro_fracturing_yield_increase as hydro_fracturing_yield_increase,
decommission_reason as decommission_reason,
decommission_method_code as decommission_method_code,
decommission_details as decommission_details,
decommission_sealant_material as decommission_sealant_material,
decommission_backfill_material as decommission_backfill_material,
comments as comments,
ems as ems,
registries_person.surname as person_responsible,
registries_organization.name as company_of_person_responsible,
aquifer_id as aquifer_id,
aquifer_vulnerability_index as avi,
storativity as storativity,
transmissivity as transmissivity,
hydraulic_conductivity as hydraulic_conductivity,
specific_storage as specific_storage,
specific_yield as specific_yield,
testing_method as testing_method,
testing_duration as testing_duration,
analytic_solution_type as analytic_solution_type,
boundary_effect_code as boundary_effect_code,
aquifer_lithology_code as aquifer_lithology_code,
artesian_pressure_head as artesian_pressure_head,
artesian_conditions as artesian_conditions
from well
left join well_subclass_code as wsc on wsc.well_subclass_guid = well.well_subclass_guid
left join registries_person on
registries_person.person_guid = well.person_responsible_guid
left join registries_organization on
registries_organization.org_guid = well.org_of_person_responsible_guid
left join (select well_tag_number, count(*) as cur_licences from well
join well_licences on
well.well_tag_number = well_licences.well_id
group by well_tag_number) as licence_q
on well.well_tag_number = licence_q.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by well_tag_number;"""
"""
Well V2
"""
CREATE_EXPORT_WELL_VIEW_SQL_V2 = """
create view export_well_v2_view as
select
well.well_tag_number as well_tag_number,
identification_plate_number as identification_plate_number,
well_identification_plate_attached as well_identification_plate_attached,
well_status_code as well_status_code,
well.well_class_code as well_class_code,
wsc.well_subclass_code as well_subclass,
CASE WHEN licence_q.cur_licences > 0 THEN 'LICENSED' ELSE 'UNLICENSED' END as licenced_status_code,
intended_water_use_code as intended_water_use_code,
observation_well_number as observation_well_number,
obs_well_status_code as obs_well_status_code,
water_supply_system_name as water_supply_system_name,
water_supply_system_well_name as water_supply_system_well_name,
well.street_address as street_address,
well.city as city,
legal_lot as legal_lot,
legal_plan as legal_plan,
legal_district_lot as legal_district_lot,
legal_block as legal_block,
legal_section as legal_section,
legal_township as legal_township,
legal_range as legal_range,
land_district_code as land_district_code,
legal_pid as legal_pid,
well_location_description as well_location_description,
st_y(geom) as "latitude_Decdeg",
st_x(geom) as "longitude_Decdeg",
utm_zone_code as utm_zone_code,
utm_northing as utm_northing,
utm_easting as utm_easting,
coordinate_acquisition_code as coordinate_acquisition_code,
bcgs_id as bcgs_id,
construction_start_date as construction_start_date,
construction_end_date as construction_end_date,
alteration_start_date as alteration_start_date,
alteration_end_date as alteration_end_date,
decommission_start_date as decommission_start_date,
decommission_end_date as decommission_end_date,
driller_name as driller_name,
consultant_name as consultant_name,
consultant_company as consultant_company,
diameter as "diameter_inches",
total_depth_drilled as "total_depth_drilled_ft-bgl",
finished_well_depth as "finished_well_depth_ft-bgl",
final_casing_stick_up as "final_casing_stick_up_inches",
bedrock_depth as "bedrock_depth_ft-bgl",
ground_elevation as "ground_elevation_ft-asl",
ground_elevation_method_code as ground_elevation_method_code,
static_water_level as "static_water_level_ft-btoc",
well_yield as "well_yield_Usgpm",
well_yield_unit_code as well_yield_unit_code,
artesian_flow as "artesian_flow_Usgpm",
artesian_pressure as "artesian_pressure_ft",
well_cap_type as well_cap_type,
well_disinfected_code as well_disinfected_code,
well_orientation_code as well_orientation_code,
alternative_specs_submitted as alternative_specs_submitted,
surface_seal_material_code as surface_seal_material_code,
surface_seal_method_code as surface_seal_method_code,
surface_seal_length as "surface_seal_length_feet",
surface_seal_depth as "surface_seal_depth_ft",
backfill_type as backfill_type,
backfill_depth as "backfill_depth_ft",
liner_material_code as liner_material_code,
liner_diameter as "liner_diameter_inches",
liner_thickness as "liner_thickness_inches",
surface_seal_thickness as "surface_seal_thickness_inches",
liner_from as "liner_from_ft-bgl",
liner_to as "liner_to_ft-bgl",
screen_intake_method_code as screen_intake_method_code,
screen_type_code as screen_type_code,
screen_material_code as screen_material_code,
other_screen_material as other_screen_material,
screen_information as screen_information,
screen_opening_code as screen_opening_code,
screen_bottom_code as screen_bottom_code,
other_screen_bottom as other_screen_bottom,
filter_pack_from as "filter_pack_from_ft",
filter_pack_to as "filter_pack_to_ft",
filter_pack_material_code as filter_pack_material_code,
filter_pack_thickness as filter_pack_thickness,
filter_pack_material_size_code as filter_pack_material_size_code,
development_hours as development_hours,
development_notes as development_notes,
water_quality_colour as water_quality_colour,
water_quality_odour as water_quality_odour,
yield_estimation_method_code as yield_estimation_method_code,
yield_estimation_rate as "yield_estimation_rate_USgpm",
yield_estimation_duration as yield_estimation_duration_hours,
static_level_before_test as "static_level_before_test_ft-btoc",
drawdown as "drawdown_ft-btoc",
hydro_fracturing_performed as hydro_fracturing_performed,
hydro_fracturing_yield_increase as hydro_fracturing_yield_increase,
decommission_reason as decommission_reason,
decommission_method_code as decommission_method_code,
decommission_details as decommission_details,
decommission_sealant_material as decommission_sealant_material,
decommission_backfill_material as decommission_backfill_material,
comments as comments,
ems as ems,
registries_person.surname as person_responsible,
registries_organization.name as company_of_person_responsible,
aquifer_id as aquifer_id,
aquifer_vulnerability_index as "avi_years",
storativity as storativity,
transmissivity as transmissivity,
hydraulic_conductivity as "hydraulic_conductivity_m/s",
specific_storage as "specific_storage_1/m",
specific_yield as specific_yield,
testing_method as testing_method,
testing_duration as testing_duration_hours,
analytic_solution_type as analytic_solution_type,
boundary_effect_code as boundary_effect_code,
aquifer_lithology_code as aquifer_lithology_code,
artesian_pressure_head as artesian_pressure_head,
artesian_conditions as artesian_conditions
from well
left join well_subclass_code as wsc on wsc.well_subclass_guid = well.well_subclass_guid
left join registries_person on
registries_person.person_guid = well.person_responsible_guid
left join registries_organization on
registries_organization.org_guid = well.org_of_person_responsible_guid
left join (select well_tag_number, count(*) as cur_licences from well
join well_licences on
well.well_tag_number = well_licences.well_id
group by well_tag_number) as licence_q
on well.well_tag_number = licence_q.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by well_tag_number;"""
"""
Lithology V1
"""
CREATE_EXPORT_LITHOLOGY_VIEW_SQL_V1 = """
create view export_well_lithology_v1_view as
select
lithology_description.well_tag_number as well_tag_number,
lithology_from as lithology_from,
lithology_to as lithology_to,
lithology_raw_data as lithology_raw_data,
ldc.description as lithology_description_code,
lmc.description as lithology_material_code,
lhc.description as lithology_hardness_code,
lcc.description as lithology_colour_code,
water_bearing_estimated_flow as water_bearing_estimated_flow,
lithology_description.well_yield_unit_code as well_yield_unit_code,
lithology_observation as lithology_observation
from lithology_description
left join lithology_description_code as ldc on
ldc.lithology_description_code = lithology_description.lithology_description_code
left join lithology_material_code as lmc on
lmc.lithology_material_code = lithology_description.lithology_material_code
left join lithology_hardness_code as lhc on
lhc.lithology_hardness_code = lithology_description.lithology_hardness_code
left join lithology_colour_code as lcc on
lcc.lithology_colour_code = lithology_description.lithology_colour_code
inner join well on well.well_tag_number = lithology_description.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by lithology_description.well_tag_number;"""
"""
Lithology V2
"""
CREATE_EXPORT_LITHOLOGY_VIEW_SQL_V2 = """
create view export_well_lithology_v2_view as
select
lithology_description.well_tag_number as well_tag_number,
lithology_from as "lithology_from_ft-bgl",
lithology_to as "lithology_to_ft-bgl",
lithology_raw_data as lithology_raw_data,
ldc.description as lithology_description_code,
lmc.description as lithology_material_code,
lhc.description as lithology_hardness_code,
lcc.description as lithology_colour_code,
water_bearing_estimated_flow as "water_bearing_estimated_flow_Usgpm",
lithology_description.well_yield_unit_code as well_yield_unit_code,
lithology_observation as lithology_observation
from lithology_description
left join lithology_description_code as ldc on
ldc.lithology_description_code = lithology_description.lithology_description_code
left join lithology_material_code as lmc on
lmc.lithology_material_code = lithology_description.lithology_material_code
left join lithology_hardness_code as lhc on
lhc.lithology_hardness_code = lithology_description.lithology_hardness_code
left join lithology_colour_code as lcc on
lcc.lithology_colour_code = lithology_description.lithology_colour_code
inner join well on well.well_tag_number = lithology_description.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by lithology_description.well_tag_number;"""
"""
Casing V1
"""
CREATE_EXPORT_CASING_VIEW_SQL_V1 = """
create view export_well_casing_v1_view as
select
casing.well_tag_number as well_tag_number,
casing_from as casing_from,
casing_to as casing_to,
casing.diameter as diameter,
casing_code as casing_code,
casing_material_code as casing_material_code,
wall_thickness as wall_thickness,
drive_shoe_code as drive_shoe_code
from casing
inner join well on well.well_tag_number = casing.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by casing.well_tag_number;"""
"""
Casing V2
"""
CREATE_EXPORT_CASING_VIEW_SQL_V2 = """
create view export_well_casing_v2_view as
select
casing.well_tag_number as well_tag_number,
casing_from as "casing_from_ft-bgl",
casing_to as "casing_to_ft-bgl",
casing.diameter as "diameter_inches",
casing_code as casing_code,
casing_material_code as casing_material_code,
wall_thickness as "wall_thickness_inches",
drive_shoe_code as drive_shoe_code
from casing
inner join well on well.well_tag_number = casing.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by casing.well_tag_number;"""
"""
Screen v1
"""
CREATE_EXPORT_SCREEN_VIEW_SQL_V1 = """
create view export_well_screen_v1_view as
select
screen.well_tag_number as well_tag_number,
screen_from as screen_from,
screen_to as screen_to,
screen_diameter as screen_diameter,
screen_assembly_type_code as screen_assembly_type_code,
slot_size as slot_size
from screen
inner join well on well.well_tag_number = screen.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by screen.well_tag_number;"""
"""
Screen v2
"""
CREATE_EXPORT_SCREEN_VIEW_SQL_V2 = """
create view export_well_screen_v2_view as
select
screen.well_tag_number as well_tag_number,
screen_from as "screen_from_ft-bgl",
screen_to as "screen_to_ft-bgl",
screen_diameter as "screen_diameter_inches",
screen_assembly_type_code as screen_assembly_type_code,
slot_size as slot_size
from screen
inner join well on well.well_tag_number = screen.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by screen.well_tag_number;"""
"""
Perforation v1
"""
CREATE_EXPORT_PERFORATION_VIEW_SQL_V1 = """
create view export_well_perforation_v1_view as
select
p.well_tag_number as well_tag_number,
p.liner_perforation_from as liner_perforation_from,
p.liner_perforation_to as liner_perforation_to
from liner_perforation as p
inner join well on well.well_tag_number = p.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by p.well_tag_number;"""
"""
Perforation v2
"""
CREATE_EXPORT_PERFORATION_VIEW_SQL_V2 = """
create view export_well_perforation_v2_view as
select
p.well_tag_number as well_tag_number,
p.liner_perforation_from as "liner_perforation_from_ft-bgl",
p.liner_perforation_to as "liner_perforation_to_ft-bgl"
from liner_perforation as p
inner join well on well.well_tag_number = p.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by p.well_tag_number;"""
"""
Drilling Method v1
"""
CREATE_EXPORT_DRILLING_VIEW_SQL_V1 = """
create view export_well_drilling_v1_view as
select
well_id as well_tag_number,
drillingmethodcode_id as drilling_method_code
from well_drilling_methods
inner join well on well.well_tag_number = well_drilling_methods.well_id
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by well_tag_number;"""
"""
Development Method v1
"""
CREATE_EXPORT_DEVELOPMENT_VIEW_SQL_V1 = """
create view export_well_development_v1_view as
select
well_id as well_tag_number,
developmentmethodcode_id as development_method_code
from well_development_methods
inner join well on well.well_tag_number = well_development_methods.well_id
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by well_tag_number;"""
class Migration(migrations.Migration):
dependencies = [
('wells', '0124_create_artesian_conditions_col_update_data_water_639'),
]
operations = [
migrations.RunSQL(CREATE_EXPORT_WELL_VIEW_SQL_V1),
migrations.RunSQL(CREATE_EXPORT_WELL_VIEW_SQL_V2),
migrations.RunSQL(CREATE_EXPORT_LITHOLOGY_VIEW_SQL_V1),
migrations.RunSQL(CREATE_EXPORT_LITHOLOGY_VIEW_SQL_V2),
migrations.RunSQL(CREATE_EXPORT_CASING_VIEW_SQL_V1),
migrations.RunSQL(CREATE_EXPORT_CASING_VIEW_SQL_V2),
migrations.RunSQL(CREATE_EXPORT_SCREEN_VIEW_SQL_V1),
migrations.RunSQL(CREATE_EXPORT_SCREEN_VIEW_SQL_V2),
migrations.RunSQL(CREATE_EXPORT_PERFORATION_VIEW_SQL_V1),
migrations.RunSQL(CREATE_EXPORT_PERFORATION_VIEW_SQL_V2),
migrations.RunSQL(CREATE_EXPORT_DRILLING_VIEW_SQL_V1),
migrations.RunSQL(CREATE_EXPORT_DEVELOPMENT_VIEW_SQL_V1)
]
|
<filename>app/backend/wells/migrations/0125_refactor_inline_sql_export_wells_water_1545.py<gh_stars>10-100
# Generated by Django 2.2.18 on 2021-03-22 18:37
from django.db import migrations
"""
Well V1
note on extra joins:
well_licences: any well having at least 1 licence entry will be marked as Licensed.
"""
CREATE_EXPORT_WELL_VIEW_SQL_V1 = """
create view export_well_v1_view as
select
well.well_tag_number as well_tag_number,
identification_plate_number as identification_plate_number,
well_identification_plate_attached as well_identification_plate_attached,
well_status_code as well_status_code,
well.well_class_code as well_class_code,
wsc.well_subclass_code as well_subclass,
CASE WHEN licence_q.cur_licences > 0 THEN 'LICENSED' ELSE 'UNLICENSED' END as licenced_status_code,
intended_water_use_code as intended_water_use_code,
observation_well_number as observation_well_number,
obs_well_status_code as obs_well_status_code,
water_supply_system_name as water_supply_system_name,
water_supply_system_well_name as water_supply_system_well_name,
well.street_address as street_address,
well.city as city,
legal_lot as legal_lot,
legal_plan as legal_plan,
legal_district_lot as legal_district_lot,
legal_block as legal_block,
legal_section as legal_section,
legal_township as legal_township,
legal_range as legal_range,
land_district_code as land_district_code,
legal_pid as legal_pid,
well_location_description as well_location_description,
st_y(geom) as latitude,
st_x(geom) as longitude,
utm_zone_code as utm_zone_code,
utm_northing as utm_northing,
utm_easting as utm_easting,
coordinate_acquisition_code as coordinate_acquisition_code,
bcgs_id as bcgs_id,
construction_start_date as construction_start_date,
construction_end_date as construction_end_date,
alteration_start_date as alteration_start_date,
alteration_end_date as alteration_end_date,
decommission_start_date as decommission_start_date,
decommission_end_date as decommission_end_date,
driller_name as driller_name,
consultant_name as consultant_name,
consultant_company as consultant_company,
diameter as diameter,
total_depth_drilled as total_depth_drilled,
finished_well_depth as finished_well_depth,
final_casing_stick_up as final_casing_stick_up,
bedrock_depth as bedrock_depth,
ground_elevation as ground_elevation,
ground_elevation_method_code as ground_elevation_method_code,
static_water_level as static_water_level,
well_yield as well_yield,
well_yield_unit_code as well_yield_unit_code,
artesian_flow as artesian_flow,
artesian_pressure as artesian_pressure,
well_cap_type as well_cap_type,
well_disinfected_code as well_disinfected_code,
well_orientation_code as well_orientation_code,
alternative_specs_submitted as alternative_specs_submitted,
surface_seal_material_code as surface_seal_material_code,
surface_seal_method_code as surface_seal_method_code,
surface_seal_length as surface_seal_length,
surface_seal_depth as surface_seal_depth,
backfill_type as backfill_type,
backfill_depth as backfill_depth,
liner_material_code as liner_material_code,
liner_diameter as liner_diameter,
liner_thickness as liner_thickness,
surface_seal_thickness as surface_seal_thickness,
liner_from as liner_from,
liner_to as liner_to,
screen_intake_method_code as screen_intake_method_code,
screen_type_code as screen_type_code,
screen_material_code as screen_material_code,
other_screen_material as other_screen_material,
screen_information as screen_information,
screen_opening_code as screen_opening_code,
screen_bottom_code as screen_bottom_code,
other_screen_bottom as other_screen_bottom,
filter_pack_from as filter_pack_from,
filter_pack_to as filter_pack_to,
filter_pack_material_code as filter_pack_material_code,
filter_pack_thickness as filter_pack_thickness,
filter_pack_material_size_code as filter_pack_material_size_code,
development_hours as development_hours,
development_notes as development_notes,
water_quality_colour as water_quality_colour,
water_quality_odour as water_quality_odour,
yield_estimation_method_code as yield_estimation_method_code,
yield_estimation_rate as yield_estimation_rate,
yield_estimation_duration as yield_estimation_duration,
static_level_before_test as static_level_before_test,
drawdown as drawdown,
hydro_fracturing_performed as hydro_fracturing_performed,
hydro_fracturing_yield_increase as hydro_fracturing_yield_increase,
decommission_reason as decommission_reason,
decommission_method_code as decommission_method_code,
decommission_details as decommission_details,
decommission_sealant_material as decommission_sealant_material,
decommission_backfill_material as decommission_backfill_material,
comments as comments,
ems as ems,
registries_person.surname as person_responsible,
registries_organization.name as company_of_person_responsible,
aquifer_id as aquifer_id,
aquifer_vulnerability_index as avi,
storativity as storativity,
transmissivity as transmissivity,
hydraulic_conductivity as hydraulic_conductivity,
specific_storage as specific_storage,
specific_yield as specific_yield,
testing_method as testing_method,
testing_duration as testing_duration,
analytic_solution_type as analytic_solution_type,
boundary_effect_code as boundary_effect_code,
aquifer_lithology_code as aquifer_lithology_code,
artesian_pressure_head as artesian_pressure_head,
artesian_conditions as artesian_conditions
from well
left join well_subclass_code as wsc on wsc.well_subclass_guid = well.well_subclass_guid
left join registries_person on
registries_person.person_guid = well.person_responsible_guid
left join registries_organization on
registries_organization.org_guid = well.org_of_person_responsible_guid
left join (select well_tag_number, count(*) as cur_licences from well
join well_licences on
well.well_tag_number = well_licences.well_id
group by well_tag_number) as licence_q
on well.well_tag_number = licence_q.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by well_tag_number;"""
"""
Well V2
"""
CREATE_EXPORT_WELL_VIEW_SQL_V2 = """
create view export_well_v2_view as
select
well.well_tag_number as well_tag_number,
identification_plate_number as identification_plate_number,
well_identification_plate_attached as well_identification_plate_attached,
well_status_code as well_status_code,
well.well_class_code as well_class_code,
wsc.well_subclass_code as well_subclass,
CASE WHEN licence_q.cur_licences > 0 THEN 'LICENSED' ELSE 'UNLICENSED' END as licenced_status_code,
intended_water_use_code as intended_water_use_code,
observation_well_number as observation_well_number,
obs_well_status_code as obs_well_status_code,
water_supply_system_name as water_supply_system_name,
water_supply_system_well_name as water_supply_system_well_name,
well.street_address as street_address,
well.city as city,
legal_lot as legal_lot,
legal_plan as legal_plan,
legal_district_lot as legal_district_lot,
legal_block as legal_block,
legal_section as legal_section,
legal_township as legal_township,
legal_range as legal_range,
land_district_code as land_district_code,
legal_pid as legal_pid,
well_location_description as well_location_description,
st_y(geom) as "latitude_Decdeg",
st_x(geom) as "longitude_Decdeg",
utm_zone_code as utm_zone_code,
utm_northing as utm_northing,
utm_easting as utm_easting,
coordinate_acquisition_code as coordinate_acquisition_code,
bcgs_id as bcgs_id,
construction_start_date as construction_start_date,
construction_end_date as construction_end_date,
alteration_start_date as alteration_start_date,
alteration_end_date as alteration_end_date,
decommission_start_date as decommission_start_date,
decommission_end_date as decommission_end_date,
driller_name as driller_name,
consultant_name as consultant_name,
consultant_company as consultant_company,
diameter as "diameter_inches",
total_depth_drilled as "total_depth_drilled_ft-bgl",
finished_well_depth as "finished_well_depth_ft-bgl",
final_casing_stick_up as "final_casing_stick_up_inches",
bedrock_depth as "bedrock_depth_ft-bgl",
ground_elevation as "ground_elevation_ft-asl",
ground_elevation_method_code as ground_elevation_method_code,
static_water_level as "static_water_level_ft-btoc",
well_yield as "well_yield_Usgpm",
well_yield_unit_code as well_yield_unit_code,
artesian_flow as "artesian_flow_Usgpm",
artesian_pressure as "artesian_pressure_ft",
well_cap_type as well_cap_type,
well_disinfected_code as well_disinfected_code,
well_orientation_code as well_orientation_code,
alternative_specs_submitted as alternative_specs_submitted,
surface_seal_material_code as surface_seal_material_code,
surface_seal_method_code as surface_seal_method_code,
surface_seal_length as "surface_seal_length_feet",
surface_seal_depth as "surface_seal_depth_ft",
backfill_type as backfill_type,
backfill_depth as "backfill_depth_ft",
liner_material_code as liner_material_code,
liner_diameter as "liner_diameter_inches",
liner_thickness as "liner_thickness_inches",
surface_seal_thickness as "surface_seal_thickness_inches",
liner_from as "liner_from_ft-bgl",
liner_to as "liner_to_ft-bgl",
screen_intake_method_code as screen_intake_method_code,
screen_type_code as screen_type_code,
screen_material_code as screen_material_code,
other_screen_material as other_screen_material,
screen_information as screen_information,
screen_opening_code as screen_opening_code,
screen_bottom_code as screen_bottom_code,
other_screen_bottom as other_screen_bottom,
filter_pack_from as "filter_pack_from_ft",
filter_pack_to as "filter_pack_to_ft",
filter_pack_material_code as filter_pack_material_code,
filter_pack_thickness as filter_pack_thickness,
filter_pack_material_size_code as filter_pack_material_size_code,
development_hours as development_hours,
development_notes as development_notes,
water_quality_colour as water_quality_colour,
water_quality_odour as water_quality_odour,
yield_estimation_method_code as yield_estimation_method_code,
yield_estimation_rate as "yield_estimation_rate_USgpm",
yield_estimation_duration as yield_estimation_duration_hours,
static_level_before_test as "static_level_before_test_ft-btoc",
drawdown as "drawdown_ft-btoc",
hydro_fracturing_performed as hydro_fracturing_performed,
hydro_fracturing_yield_increase as hydro_fracturing_yield_increase,
decommission_reason as decommission_reason,
decommission_method_code as decommission_method_code,
decommission_details as decommission_details,
decommission_sealant_material as decommission_sealant_material,
decommission_backfill_material as decommission_backfill_material,
comments as comments,
ems as ems,
registries_person.surname as person_responsible,
registries_organization.name as company_of_person_responsible,
aquifer_id as aquifer_id,
aquifer_vulnerability_index as "avi_years",
storativity as storativity,
transmissivity as transmissivity,
hydraulic_conductivity as "hydraulic_conductivity_m/s",
specific_storage as "specific_storage_1/m",
specific_yield as specific_yield,
testing_method as testing_method,
testing_duration as testing_duration_hours,
analytic_solution_type as analytic_solution_type,
boundary_effect_code as boundary_effect_code,
aquifer_lithology_code as aquifer_lithology_code,
artesian_pressure_head as artesian_pressure_head,
artesian_conditions as artesian_conditions
from well
left join well_subclass_code as wsc on wsc.well_subclass_guid = well.well_subclass_guid
left join registries_person on
registries_person.person_guid = well.person_responsible_guid
left join registries_organization on
registries_organization.org_guid = well.org_of_person_responsible_guid
left join (select well_tag_number, count(*) as cur_licences from well
join well_licences on
well.well_tag_number = well_licences.well_id
group by well_tag_number) as licence_q
on well.well_tag_number = licence_q.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by well_tag_number;"""
"""
Lithology V1
"""
CREATE_EXPORT_LITHOLOGY_VIEW_SQL_V1 = """
create view export_well_lithology_v1_view as
select
lithology_description.well_tag_number as well_tag_number,
lithology_from as lithology_from,
lithology_to as lithology_to,
lithology_raw_data as lithology_raw_data,
ldc.description as lithology_description_code,
lmc.description as lithology_material_code,
lhc.description as lithology_hardness_code,
lcc.description as lithology_colour_code,
water_bearing_estimated_flow as water_bearing_estimated_flow,
lithology_description.well_yield_unit_code as well_yield_unit_code,
lithology_observation as lithology_observation
from lithology_description
left join lithology_description_code as ldc on
ldc.lithology_description_code = lithology_description.lithology_description_code
left join lithology_material_code as lmc on
lmc.lithology_material_code = lithology_description.lithology_material_code
left join lithology_hardness_code as lhc on
lhc.lithology_hardness_code = lithology_description.lithology_hardness_code
left join lithology_colour_code as lcc on
lcc.lithology_colour_code = lithology_description.lithology_colour_code
inner join well on well.well_tag_number = lithology_description.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by lithology_description.well_tag_number;"""
"""
Lithology V2
"""
CREATE_EXPORT_LITHOLOGY_VIEW_SQL_V2 = """
create view export_well_lithology_v2_view as
select
lithology_description.well_tag_number as well_tag_number,
lithology_from as "lithology_from_ft-bgl",
lithology_to as "lithology_to_ft-bgl",
lithology_raw_data as lithology_raw_data,
ldc.description as lithology_description_code,
lmc.description as lithology_material_code,
lhc.description as lithology_hardness_code,
lcc.description as lithology_colour_code,
water_bearing_estimated_flow as "water_bearing_estimated_flow_Usgpm",
lithology_description.well_yield_unit_code as well_yield_unit_code,
lithology_observation as lithology_observation
from lithology_description
left join lithology_description_code as ldc on
ldc.lithology_description_code = lithology_description.lithology_description_code
left join lithology_material_code as lmc on
lmc.lithology_material_code = lithology_description.lithology_material_code
left join lithology_hardness_code as lhc on
lhc.lithology_hardness_code = lithology_description.lithology_hardness_code
left join lithology_colour_code as lcc on
lcc.lithology_colour_code = lithology_description.lithology_colour_code
inner join well on well.well_tag_number = lithology_description.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by lithology_description.well_tag_number;"""
"""
Casing V1
"""
CREATE_EXPORT_CASING_VIEW_SQL_V1 = """
create view export_well_casing_v1_view as
select
casing.well_tag_number as well_tag_number,
casing_from as casing_from,
casing_to as casing_to,
casing.diameter as diameter,
casing_code as casing_code,
casing_material_code as casing_material_code,
wall_thickness as wall_thickness,
drive_shoe_code as drive_shoe_code
from casing
inner join well on well.well_tag_number = casing.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by casing.well_tag_number;"""
"""
Casing V2
"""
CREATE_EXPORT_CASING_VIEW_SQL_V2 = """
create view export_well_casing_v2_view as
select
casing.well_tag_number as well_tag_number,
casing_from as "casing_from_ft-bgl",
casing_to as "casing_to_ft-bgl",
casing.diameter as "diameter_inches",
casing_code as casing_code,
casing_material_code as casing_material_code,
wall_thickness as "wall_thickness_inches",
drive_shoe_code as drive_shoe_code
from casing
inner join well on well.well_tag_number = casing.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by casing.well_tag_number;"""
"""
Screen v1
"""
CREATE_EXPORT_SCREEN_VIEW_SQL_V1 = """
create view export_well_screen_v1_view as
select
screen.well_tag_number as well_tag_number,
screen_from as screen_from,
screen_to as screen_to,
screen_diameter as screen_diameter,
screen_assembly_type_code as screen_assembly_type_code,
slot_size as slot_size
from screen
inner join well on well.well_tag_number = screen.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by screen.well_tag_number;"""
"""
Screen v2
"""
CREATE_EXPORT_SCREEN_VIEW_SQL_V2 = """
create view export_well_screen_v2_view as
select
screen.well_tag_number as well_tag_number,
screen_from as "screen_from_ft-bgl",
screen_to as "screen_to_ft-bgl",
screen_diameter as "screen_diameter_inches",
screen_assembly_type_code as screen_assembly_type_code,
slot_size as slot_size
from screen
inner join well on well.well_tag_number = screen.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by screen.well_tag_number;"""
"""
Perforation v1
"""
CREATE_EXPORT_PERFORATION_VIEW_SQL_V1 = """
create view export_well_perforation_v1_view as
select
p.well_tag_number as well_tag_number,
p.liner_perforation_from as liner_perforation_from,
p.liner_perforation_to as liner_perforation_to
from liner_perforation as p
inner join well on well.well_tag_number = p.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by p.well_tag_number;"""
"""
Perforation v2
"""
CREATE_EXPORT_PERFORATION_VIEW_SQL_V2 = """
create view export_well_perforation_v2_view as
select
p.well_tag_number as well_tag_number,
p.liner_perforation_from as "liner_perforation_from_ft-bgl",
p.liner_perforation_to as "liner_perforation_to_ft-bgl"
from liner_perforation as p
inner join well on well.well_tag_number = p.well_tag_number
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by p.well_tag_number;"""
"""
Drilling Method v1
"""
CREATE_EXPORT_DRILLING_VIEW_SQL_V1 = """
create view export_well_drilling_v1_view as
select
well_id as well_tag_number,
drillingmethodcode_id as drilling_method_code
from well_drilling_methods
inner join well on well.well_tag_number = well_drilling_methods.well_id
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by well_tag_number;"""
"""
Development Method v1
"""
CREATE_EXPORT_DEVELOPMENT_VIEW_SQL_V1 = """
create view export_well_development_v1_view as
select
well_id as well_tag_number,
developmentmethodcode_id as development_method_code
from well_development_methods
inner join well on well.well_tag_number = well_development_methods.well_id
where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null
order by well_tag_number;"""
class Migration(migrations.Migration):
dependencies = [
('wells', '0124_create_artesian_conditions_col_update_data_water_639'),
]
operations = [
migrations.RunSQL(CREATE_EXPORT_WELL_VIEW_SQL_V1),
migrations.RunSQL(CREATE_EXPORT_WELL_VIEW_SQL_V2),
migrations.RunSQL(CREATE_EXPORT_LITHOLOGY_VIEW_SQL_V1),
migrations.RunSQL(CREATE_EXPORT_LITHOLOGY_VIEW_SQL_V2),
migrations.RunSQL(CREATE_EXPORT_CASING_VIEW_SQL_V1),
migrations.RunSQL(CREATE_EXPORT_CASING_VIEW_SQL_V2),
migrations.RunSQL(CREATE_EXPORT_SCREEN_VIEW_SQL_V1),
migrations.RunSQL(CREATE_EXPORT_SCREEN_VIEW_SQL_V2),
migrations.RunSQL(CREATE_EXPORT_PERFORATION_VIEW_SQL_V1),
migrations.RunSQL(CREATE_EXPORT_PERFORATION_VIEW_SQL_V2),
migrations.RunSQL(CREATE_EXPORT_DRILLING_VIEW_SQL_V1),
migrations.RunSQL(CREATE_EXPORT_DEVELOPMENT_VIEW_SQL_V1)
]
|
en
| 0.866175
|
# Generated by Django 2.2.18 on 2021-03-22 18:37 Well V1 note on extra joins: well_licences: any well having at least 1 licence entry will be marked as Licensed. create view export_well_v1_view as select well.well_tag_number as well_tag_number, identification_plate_number as identification_plate_number, well_identification_plate_attached as well_identification_plate_attached, well_status_code as well_status_code, well.well_class_code as well_class_code, wsc.well_subclass_code as well_subclass, CASE WHEN licence_q.cur_licences > 0 THEN 'LICENSED' ELSE 'UNLICENSED' END as licenced_status_code, intended_water_use_code as intended_water_use_code, observation_well_number as observation_well_number, obs_well_status_code as obs_well_status_code, water_supply_system_name as water_supply_system_name, water_supply_system_well_name as water_supply_system_well_name, well.street_address as street_address, well.city as city, legal_lot as legal_lot, legal_plan as legal_plan, legal_district_lot as legal_district_lot, legal_block as legal_block, legal_section as legal_section, legal_township as legal_township, legal_range as legal_range, land_district_code as land_district_code, legal_pid as legal_pid, well_location_description as well_location_description, st_y(geom) as latitude, st_x(geom) as longitude, utm_zone_code as utm_zone_code, utm_northing as utm_northing, utm_easting as utm_easting, coordinate_acquisition_code as coordinate_acquisition_code, bcgs_id as bcgs_id, construction_start_date as construction_start_date, construction_end_date as construction_end_date, alteration_start_date as alteration_start_date, alteration_end_date as alteration_end_date, decommission_start_date as decommission_start_date, decommission_end_date as decommission_end_date, driller_name as driller_name, consultant_name as consultant_name, consultant_company as consultant_company, diameter as diameter, total_depth_drilled as total_depth_drilled, finished_well_depth as finished_well_depth, final_casing_stick_up as final_casing_stick_up, bedrock_depth as bedrock_depth, ground_elevation as ground_elevation, ground_elevation_method_code as ground_elevation_method_code, static_water_level as static_water_level, well_yield as well_yield, well_yield_unit_code as well_yield_unit_code, artesian_flow as artesian_flow, artesian_pressure as artesian_pressure, well_cap_type as well_cap_type, well_disinfected_code as well_disinfected_code, well_orientation_code as well_orientation_code, alternative_specs_submitted as alternative_specs_submitted, surface_seal_material_code as surface_seal_material_code, surface_seal_method_code as surface_seal_method_code, surface_seal_length as surface_seal_length, surface_seal_depth as surface_seal_depth, backfill_type as backfill_type, backfill_depth as backfill_depth, liner_material_code as liner_material_code, liner_diameter as liner_diameter, liner_thickness as liner_thickness, surface_seal_thickness as surface_seal_thickness, liner_from as liner_from, liner_to as liner_to, screen_intake_method_code as screen_intake_method_code, screen_type_code as screen_type_code, screen_material_code as screen_material_code, other_screen_material as other_screen_material, screen_information as screen_information, screen_opening_code as screen_opening_code, screen_bottom_code as screen_bottom_code, other_screen_bottom as other_screen_bottom, filter_pack_from as filter_pack_from, filter_pack_to as filter_pack_to, filter_pack_material_code as filter_pack_material_code, filter_pack_thickness as filter_pack_thickness, filter_pack_material_size_code as filter_pack_material_size_code, development_hours as development_hours, development_notes as development_notes, water_quality_colour as water_quality_colour, water_quality_odour as water_quality_odour, yield_estimation_method_code as yield_estimation_method_code, yield_estimation_rate as yield_estimation_rate, yield_estimation_duration as yield_estimation_duration, static_level_before_test as static_level_before_test, drawdown as drawdown, hydro_fracturing_performed as hydro_fracturing_performed, hydro_fracturing_yield_increase as hydro_fracturing_yield_increase, decommission_reason as decommission_reason, decommission_method_code as decommission_method_code, decommission_details as decommission_details, decommission_sealant_material as decommission_sealant_material, decommission_backfill_material as decommission_backfill_material, comments as comments, ems as ems, registries_person.surname as person_responsible, registries_organization.name as company_of_person_responsible, aquifer_id as aquifer_id, aquifer_vulnerability_index as avi, storativity as storativity, transmissivity as transmissivity, hydraulic_conductivity as hydraulic_conductivity, specific_storage as specific_storage, specific_yield as specific_yield, testing_method as testing_method, testing_duration as testing_duration, analytic_solution_type as analytic_solution_type, boundary_effect_code as boundary_effect_code, aquifer_lithology_code as aquifer_lithology_code, artesian_pressure_head as artesian_pressure_head, artesian_conditions as artesian_conditions from well left join well_subclass_code as wsc on wsc.well_subclass_guid = well.well_subclass_guid left join registries_person on registries_person.person_guid = well.person_responsible_guid left join registries_organization on registries_organization.org_guid = well.org_of_person_responsible_guid left join (select well_tag_number, count(*) as cur_licences from well join well_licences on well.well_tag_number = well_licences.well_id group by well_tag_number) as licence_q on well.well_tag_number = licence_q.well_tag_number where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null order by well_tag_number; Well V2 create view export_well_v2_view as select well.well_tag_number as well_tag_number, identification_plate_number as identification_plate_number, well_identification_plate_attached as well_identification_plate_attached, well_status_code as well_status_code, well.well_class_code as well_class_code, wsc.well_subclass_code as well_subclass, CASE WHEN licence_q.cur_licences > 0 THEN 'LICENSED' ELSE 'UNLICENSED' END as licenced_status_code, intended_water_use_code as intended_water_use_code, observation_well_number as observation_well_number, obs_well_status_code as obs_well_status_code, water_supply_system_name as water_supply_system_name, water_supply_system_well_name as water_supply_system_well_name, well.street_address as street_address, well.city as city, legal_lot as legal_lot, legal_plan as legal_plan, legal_district_lot as legal_district_lot, legal_block as legal_block, legal_section as legal_section, legal_township as legal_township, legal_range as legal_range, land_district_code as land_district_code, legal_pid as legal_pid, well_location_description as well_location_description, st_y(geom) as "latitude_Decdeg", st_x(geom) as "longitude_Decdeg", utm_zone_code as utm_zone_code, utm_northing as utm_northing, utm_easting as utm_easting, coordinate_acquisition_code as coordinate_acquisition_code, bcgs_id as bcgs_id, construction_start_date as construction_start_date, construction_end_date as construction_end_date, alteration_start_date as alteration_start_date, alteration_end_date as alteration_end_date, decommission_start_date as decommission_start_date, decommission_end_date as decommission_end_date, driller_name as driller_name, consultant_name as consultant_name, consultant_company as consultant_company, diameter as "diameter_inches", total_depth_drilled as "total_depth_drilled_ft-bgl", finished_well_depth as "finished_well_depth_ft-bgl", final_casing_stick_up as "final_casing_stick_up_inches", bedrock_depth as "bedrock_depth_ft-bgl", ground_elevation as "ground_elevation_ft-asl", ground_elevation_method_code as ground_elevation_method_code, static_water_level as "static_water_level_ft-btoc", well_yield as "well_yield_Usgpm", well_yield_unit_code as well_yield_unit_code, artesian_flow as "artesian_flow_Usgpm", artesian_pressure as "artesian_pressure_ft", well_cap_type as well_cap_type, well_disinfected_code as well_disinfected_code, well_orientation_code as well_orientation_code, alternative_specs_submitted as alternative_specs_submitted, surface_seal_material_code as surface_seal_material_code, surface_seal_method_code as surface_seal_method_code, surface_seal_length as "surface_seal_length_feet", surface_seal_depth as "surface_seal_depth_ft", backfill_type as backfill_type, backfill_depth as "backfill_depth_ft", liner_material_code as liner_material_code, liner_diameter as "liner_diameter_inches", liner_thickness as "liner_thickness_inches", surface_seal_thickness as "surface_seal_thickness_inches", liner_from as "liner_from_ft-bgl", liner_to as "liner_to_ft-bgl", screen_intake_method_code as screen_intake_method_code, screen_type_code as screen_type_code, screen_material_code as screen_material_code, other_screen_material as other_screen_material, screen_information as screen_information, screen_opening_code as screen_opening_code, screen_bottom_code as screen_bottom_code, other_screen_bottom as other_screen_bottom, filter_pack_from as "filter_pack_from_ft", filter_pack_to as "filter_pack_to_ft", filter_pack_material_code as filter_pack_material_code, filter_pack_thickness as filter_pack_thickness, filter_pack_material_size_code as filter_pack_material_size_code, development_hours as development_hours, development_notes as development_notes, water_quality_colour as water_quality_colour, water_quality_odour as water_quality_odour, yield_estimation_method_code as yield_estimation_method_code, yield_estimation_rate as "yield_estimation_rate_USgpm", yield_estimation_duration as yield_estimation_duration_hours, static_level_before_test as "static_level_before_test_ft-btoc", drawdown as "drawdown_ft-btoc", hydro_fracturing_performed as hydro_fracturing_performed, hydro_fracturing_yield_increase as hydro_fracturing_yield_increase, decommission_reason as decommission_reason, decommission_method_code as decommission_method_code, decommission_details as decommission_details, decommission_sealant_material as decommission_sealant_material, decommission_backfill_material as decommission_backfill_material, comments as comments, ems as ems, registries_person.surname as person_responsible, registries_organization.name as company_of_person_responsible, aquifer_id as aquifer_id, aquifer_vulnerability_index as "avi_years", storativity as storativity, transmissivity as transmissivity, hydraulic_conductivity as "hydraulic_conductivity_m/s", specific_storage as "specific_storage_1/m", specific_yield as specific_yield, testing_method as testing_method, testing_duration as testing_duration_hours, analytic_solution_type as analytic_solution_type, boundary_effect_code as boundary_effect_code, aquifer_lithology_code as aquifer_lithology_code, artesian_pressure_head as artesian_pressure_head, artesian_conditions as artesian_conditions from well left join well_subclass_code as wsc on wsc.well_subclass_guid = well.well_subclass_guid left join registries_person on registries_person.person_guid = well.person_responsible_guid left join registries_organization on registries_organization.org_guid = well.org_of_person_responsible_guid left join (select well_tag_number, count(*) as cur_licences from well join well_licences on well.well_tag_number = well_licences.well_id group by well_tag_number) as licence_q on well.well_tag_number = licence_q.well_tag_number where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null order by well_tag_number; Lithology V1 create view export_well_lithology_v1_view as select lithology_description.well_tag_number as well_tag_number, lithology_from as lithology_from, lithology_to as lithology_to, lithology_raw_data as lithology_raw_data, ldc.description as lithology_description_code, lmc.description as lithology_material_code, lhc.description as lithology_hardness_code, lcc.description as lithology_colour_code, water_bearing_estimated_flow as water_bearing_estimated_flow, lithology_description.well_yield_unit_code as well_yield_unit_code, lithology_observation as lithology_observation from lithology_description left join lithology_description_code as ldc on ldc.lithology_description_code = lithology_description.lithology_description_code left join lithology_material_code as lmc on lmc.lithology_material_code = lithology_description.lithology_material_code left join lithology_hardness_code as lhc on lhc.lithology_hardness_code = lithology_description.lithology_hardness_code left join lithology_colour_code as lcc on lcc.lithology_colour_code = lithology_description.lithology_colour_code inner join well on well.well_tag_number = lithology_description.well_tag_number where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null order by lithology_description.well_tag_number; Lithology V2 create view export_well_lithology_v2_view as select lithology_description.well_tag_number as well_tag_number, lithology_from as "lithology_from_ft-bgl", lithology_to as "lithology_to_ft-bgl", lithology_raw_data as lithology_raw_data, ldc.description as lithology_description_code, lmc.description as lithology_material_code, lhc.description as lithology_hardness_code, lcc.description as lithology_colour_code, water_bearing_estimated_flow as "water_bearing_estimated_flow_Usgpm", lithology_description.well_yield_unit_code as well_yield_unit_code, lithology_observation as lithology_observation from lithology_description left join lithology_description_code as ldc on ldc.lithology_description_code = lithology_description.lithology_description_code left join lithology_material_code as lmc on lmc.lithology_material_code = lithology_description.lithology_material_code left join lithology_hardness_code as lhc on lhc.lithology_hardness_code = lithology_description.lithology_hardness_code left join lithology_colour_code as lcc on lcc.lithology_colour_code = lithology_description.lithology_colour_code inner join well on well.well_tag_number = lithology_description.well_tag_number where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null order by lithology_description.well_tag_number; Casing V1 create view export_well_casing_v1_view as select casing.well_tag_number as well_tag_number, casing_from as casing_from, casing_to as casing_to, casing.diameter as diameter, casing_code as casing_code, casing_material_code as casing_material_code, wall_thickness as wall_thickness, drive_shoe_code as drive_shoe_code from casing inner join well on well.well_tag_number = casing.well_tag_number where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null order by casing.well_tag_number; Casing V2 create view export_well_casing_v2_view as select casing.well_tag_number as well_tag_number, casing_from as "casing_from_ft-bgl", casing_to as "casing_to_ft-bgl", casing.diameter as "diameter_inches", casing_code as casing_code, casing_material_code as casing_material_code, wall_thickness as "wall_thickness_inches", drive_shoe_code as drive_shoe_code from casing inner join well on well.well_tag_number = casing.well_tag_number where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null order by casing.well_tag_number; Screen v1 create view export_well_screen_v1_view as select screen.well_tag_number as well_tag_number, screen_from as screen_from, screen_to as screen_to, screen_diameter as screen_diameter, screen_assembly_type_code as screen_assembly_type_code, slot_size as slot_size from screen inner join well on well.well_tag_number = screen.well_tag_number where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null order by screen.well_tag_number; Screen v2 create view export_well_screen_v2_view as select screen.well_tag_number as well_tag_number, screen_from as "screen_from_ft-bgl", screen_to as "screen_to_ft-bgl", screen_diameter as "screen_diameter_inches", screen_assembly_type_code as screen_assembly_type_code, slot_size as slot_size from screen inner join well on well.well_tag_number = screen.well_tag_number where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null order by screen.well_tag_number; Perforation v1 create view export_well_perforation_v1_view as select p.well_tag_number as well_tag_number, p.liner_perforation_from as liner_perforation_from, p.liner_perforation_to as liner_perforation_to from liner_perforation as p inner join well on well.well_tag_number = p.well_tag_number where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null order by p.well_tag_number; Perforation v2 create view export_well_perforation_v2_view as select p.well_tag_number as well_tag_number, p.liner_perforation_from as "liner_perforation_from_ft-bgl", p.liner_perforation_to as "liner_perforation_to_ft-bgl" from liner_perforation as p inner join well on well.well_tag_number = p.well_tag_number where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null order by p.well_tag_number; Drilling Method v1 create view export_well_drilling_v1_view as select well_id as well_tag_number, drillingmethodcode_id as drilling_method_code from well_drilling_methods inner join well on well.well_tag_number = well_drilling_methods.well_id where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null order by well_tag_number; Development Method v1 create view export_well_development_v1_view as select well_id as well_tag_number, developmentmethodcode_id as development_method_code from well_development_methods inner join well on well.well_tag_number = well_development_methods.well_id where well.well_publication_status_code = 'Published' or well.well_publication_status_code = null order by well_tag_number;
| 1.956822
| 2
|
src/sst/elements/Samba/tests/streambench_mmu.py
|
Xiaoyang-Lu/sst-elements
| 2
|
6626436
|
<filename>src/sst/elements/Samba/tests/streambench_mmu.py
import sst
# Define SST core options
sst.setProgramOption("timebase", "1ps")
sst.setProgramOption("stopAtCycle", "0 ns")
# Tell SST what statistics handling we want
sst.setStatisticLoadLevel(4)
# Define the simulation components
comp_cpu = sst.Component("cpu", "miranda.BaseCPU")
comp_cpu.addParams({
"verbose" : 0,
"generator" : "miranda.STREAMBenchGenerator",
"clock" : "2.4GHz",
"generatorParams.verbose" : 0,
"generatorParams.n" : 10000,
"generatorParams.operandwidth" : 16,
"printStats" : 1,
})
# Enable statistics outputs
comp_cpu.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_l1cache = sst.Component("l1cache", "memHierarchy.Cache")
comp_l1cache.addParams({
"access_latency_cycles" : "2",
"cache_frequency" : "2.4 GHz",
"replacement_policy" : "lru",
"coherence_protocol" : "MESI",
"associativity" : "4",
"cache_line_size" : "64",
"prefetcher" : "cassini.StridePrefetcher",
"debug" : "1",
"L1" : "1",
"cache_size" : "32KB"
})
# Enable statistics outputs
comp_l1cache.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_memory = sst.Component("memory", "memHierarchy.MemController")
comp_memory.addParams({
"coherence_protocol" : "MESI",
"backend.access_time" : "100 ns",
"backend.mem_size" : "4096MiB",
"clock" : "1GHz"
})
mmu = sst.Component("mmu0", "Samba")
mmu.addParams({
"corecount": 1,
"page_size_L1": 4,
"assoc_L1": 32,
"size_L1": 32,
"clock": "2.4GHz",
"levels": 2,
"max_width_L1": 2,
"max_outstanding_L1": 2,
"latency_L1": 1,
"parallel_mode_L1": 0,
"page_size_L2": 4,
"assoc_L2": 8,
"size_L2": 128,
"max_outstanding_L2": 2,
"max_width_L2": 2,
"latency_L2": 10,
"upper_link_L1": 1, # we assume same link latency of up and down traffic of the link
"upper_link_L2": 1,
"parallel_mode_L2": 0
});
mmu.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
# Define the simulation links
link_cpu_mmu_link = sst.Link("link_cpu_mmu_link")
link_mmu_cache_link = sst.Link("link_mmu_cache_link")
'''
arielMMULink = sst.Link("cpu_mmu_link_" + str(next_core_id))
MMUCacheLink = sst.Link("mmu_cache_link_" + str(next_core_id))
arielMMULink.connect((ariel, "cache_link_%d"%next_core_id, ring_latency), (mmu, "cpu_to_mmu%d"%next_core_id, ring_latency))
MMUCacheLink.connect((mmu, "mmu_to_cache%d"%next_core_id, ring_latency), (l1, "high_network_0", ring_latency))
arielMMULink.setNoCut()
MMUCacheLink.setNoCut()
'''
link_cpu_mmu_link.connect( (comp_cpu, "cache_link", "50ps"), (mmu, "cpu_to_mmu0", "50ps") )
link_cpu_mmu_link.setNoCut()
link_mmu_cache_link.connect( (mmu, "mmu_to_cache0", "50ps"), (comp_l1cache, "high_network_0", "50ps") )
link_mmu_cache_link.setNoCut()
link_mem_bus_link = sst.Link("link_mem_bus_link")
link_mem_bus_link.connect( (comp_l1cache, "low_network_0", "50ps"), (comp_memory, "direct_link", "50ps") )
|
<filename>src/sst/elements/Samba/tests/streambench_mmu.py
import sst
# Define SST core options
sst.setProgramOption("timebase", "1ps")
sst.setProgramOption("stopAtCycle", "0 ns")
# Tell SST what statistics handling we want
sst.setStatisticLoadLevel(4)
# Define the simulation components
comp_cpu = sst.Component("cpu", "miranda.BaseCPU")
comp_cpu.addParams({
"verbose" : 0,
"generator" : "miranda.STREAMBenchGenerator",
"clock" : "2.4GHz",
"generatorParams.verbose" : 0,
"generatorParams.n" : 10000,
"generatorParams.operandwidth" : 16,
"printStats" : 1,
})
# Enable statistics outputs
comp_cpu.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_l1cache = sst.Component("l1cache", "memHierarchy.Cache")
comp_l1cache.addParams({
"access_latency_cycles" : "2",
"cache_frequency" : "2.4 GHz",
"replacement_policy" : "lru",
"coherence_protocol" : "MESI",
"associativity" : "4",
"cache_line_size" : "64",
"prefetcher" : "cassini.StridePrefetcher",
"debug" : "1",
"L1" : "1",
"cache_size" : "32KB"
})
# Enable statistics outputs
comp_l1cache.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_memory = sst.Component("memory", "memHierarchy.MemController")
comp_memory.addParams({
"coherence_protocol" : "MESI",
"backend.access_time" : "100 ns",
"backend.mem_size" : "4096MiB",
"clock" : "1GHz"
})
mmu = sst.Component("mmu0", "Samba")
mmu.addParams({
"corecount": 1,
"page_size_L1": 4,
"assoc_L1": 32,
"size_L1": 32,
"clock": "2.4GHz",
"levels": 2,
"max_width_L1": 2,
"max_outstanding_L1": 2,
"latency_L1": 1,
"parallel_mode_L1": 0,
"page_size_L2": 4,
"assoc_L2": 8,
"size_L2": 128,
"max_outstanding_L2": 2,
"max_width_L2": 2,
"latency_L2": 10,
"upper_link_L1": 1, # we assume same link latency of up and down traffic of the link
"upper_link_L2": 1,
"parallel_mode_L2": 0
});
mmu.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
# Define the simulation links
link_cpu_mmu_link = sst.Link("link_cpu_mmu_link")
link_mmu_cache_link = sst.Link("link_mmu_cache_link")
'''
arielMMULink = sst.Link("cpu_mmu_link_" + str(next_core_id))
MMUCacheLink = sst.Link("mmu_cache_link_" + str(next_core_id))
arielMMULink.connect((ariel, "cache_link_%d"%next_core_id, ring_latency), (mmu, "cpu_to_mmu%d"%next_core_id, ring_latency))
MMUCacheLink.connect((mmu, "mmu_to_cache%d"%next_core_id, ring_latency), (l1, "high_network_0", ring_latency))
arielMMULink.setNoCut()
MMUCacheLink.setNoCut()
'''
link_cpu_mmu_link.connect( (comp_cpu, "cache_link", "50ps"), (mmu, "cpu_to_mmu0", "50ps") )
link_cpu_mmu_link.setNoCut()
link_mmu_cache_link.connect( (mmu, "mmu_to_cache0", "50ps"), (comp_l1cache, "high_network_0", "50ps") )
link_mmu_cache_link.setNoCut()
link_mem_bus_link = sst.Link("link_mem_bus_link")
link_mem_bus_link.connect( (comp_l1cache, "low_network_0", "50ps"), (comp_memory, "direct_link", "50ps") )
|
en
| 0.628216
|
# Define SST core options # Tell SST what statistics handling we want # Define the simulation components # Enable statistics outputs # Enable statistics outputs # we assume same link latency of up and down traffic of the link # Define the simulation links arielMMULink = sst.Link("cpu_mmu_link_" + str(next_core_id)) MMUCacheLink = sst.Link("mmu_cache_link_" + str(next_core_id)) arielMMULink.connect((ariel, "cache_link_%d"%next_core_id, ring_latency), (mmu, "cpu_to_mmu%d"%next_core_id, ring_latency)) MMUCacheLink.connect((mmu, "mmu_to_cache%d"%next_core_id, ring_latency), (l1, "high_network_0", ring_latency)) arielMMULink.setNoCut() MMUCacheLink.setNoCut()
| 1.943359
| 2
|
main.py
|
cleysonsilvame/desafio-cromai-api
| 0
|
6626437
|
from flask import Flask
from flask_restful import Api, Resource, reqparse
from math import hypot
from flask_cors import CORS
app = Flask(__name__)
api = Api(app)
cors = CORS(app)
parser = reqparse.RequestParser()
class Pitagoras(Resource):
def post(self):
parser.add_argument('opposite_side')
parser.add_argument('adjacent_side')
args = parser.parse_args()
opposite_side = float(args['opposite_side'])
adjacent_side = float(args['adjacent_side'])
hypotenuse = hypot(opposite_side, adjacent_side)
formatted_hypotenuse = float("{:.2f}".format(hypotenuse))
return {"hypotenuse": formatted_hypotenuse}
api.add_resource(Pitagoras, "/calculate")
if __name__ == "__main__":
app.run(debug=True)
|
from flask import Flask
from flask_restful import Api, Resource, reqparse
from math import hypot
from flask_cors import CORS
app = Flask(__name__)
api = Api(app)
cors = CORS(app)
parser = reqparse.RequestParser()
class Pitagoras(Resource):
def post(self):
parser.add_argument('opposite_side')
parser.add_argument('adjacent_side')
args = parser.parse_args()
opposite_side = float(args['opposite_side'])
adjacent_side = float(args['adjacent_side'])
hypotenuse = hypot(opposite_side, adjacent_side)
formatted_hypotenuse = float("{:.2f}".format(hypotenuse))
return {"hypotenuse": formatted_hypotenuse}
api.add_resource(Pitagoras, "/calculate")
if __name__ == "__main__":
app.run(debug=True)
|
none
| 1
| 2.902121
| 3
|
|
mosaic.py
|
surpass19/mosaic
| 430
|
6626438
|
import sys
import os
from PIL import Image, ImageOps
from multiprocessing import Process, Queue, cpu_count
# Change these 3 config parameters to suit your needs...
TILE_SIZE = 50 # height/width of mosaic tiles in pixels
TILE_MATCH_RES = 5 # tile matching resolution (higher values give better fit but require more processing)
ENLARGEMENT = 8 # the mosaic image will be this many times wider and taller than the original
TILE_BLOCK_SIZE = TILE_SIZE / max(min(TILE_MATCH_RES, TILE_SIZE), 1)
WORKER_COUNT = max(cpu_count() - 1, 1)
OUT_FILE = 'mosaic.jpeg'
EOQ_VALUE = None
class TileProcessor:
def __init__(self, tiles_directory):
self.tiles_directory = tiles_directory
def __process_tile(self, tile_path):
try:
img = Image.open(tile_path)
img = ImageOps.exif_transpose(img)
# tiles must be square, so get the largest square that fits inside the image
w = img.size[0]
h = img.size[1]
min_dimension = min(w, h)
w_crop = (w - min_dimension) / 2
h_crop = (h - min_dimension) / 2
img = img.crop((w_crop, h_crop, w - w_crop, h - h_crop))
large_tile_img = img.resize((TILE_SIZE, TILE_SIZE), Image.ANTIALIAS)
small_tile_img = img.resize((int(TILE_SIZE/TILE_BLOCK_SIZE), int(TILE_SIZE/TILE_BLOCK_SIZE)), Image.ANTIALIAS)
return (large_tile_img.convert('RGB'), small_tile_img.convert('RGB'))
except:
return (None, None)
def get_tiles(self):
large_tiles = []
small_tiles = []
print('Reading tiles from {}...'.format(self.tiles_directory))
# search the tiles directory recursively
for root, subFolders, files in os.walk(self.tiles_directory):
for tile_name in files:
print('Reading {:40.40}'.format(tile_name), flush=True, end='\r')
tile_path = os.path.join(root, tile_name)
large_tile, small_tile = self.__process_tile(tile_path)
if large_tile:
large_tiles.append(large_tile)
small_tiles.append(small_tile)
print('Processed {} tiles.'.format(len(large_tiles)))
return (large_tiles, small_tiles)
class TargetImage:
def __init__(self, image_path):
self.image_path = image_path
def get_data(self):
print('Processing main image...')
img = Image.open(self.image_path)
w = img.size[0] * ENLARGEMENT
h = img.size[1] * ENLARGEMENT
large_img = img.resize((w, h), Image.ANTIALIAS)
w_diff = (w % TILE_SIZE)/2
h_diff = (h % TILE_SIZE)/2
# if necessary, crop the image slightly so we use a whole number of tiles horizontally and vertically
if w_diff or h_diff:
large_img = large_img.crop((w_diff, h_diff, w - w_diff, h - h_diff))
small_img = large_img.resize((int(w/TILE_BLOCK_SIZE), int(h/TILE_BLOCK_SIZE)), Image.ANTIALIAS)
image_data = (large_img.convert('RGB'), small_img.convert('RGB'))
print('Main image processed.')
return image_data
class TileFitter:
def __init__(self, tiles_data):
self.tiles_data = tiles_data
def __get_tile_diff(self, t1, t2, bail_out_value):
diff = 0
for i in range(len(t1)):
#diff += (abs(t1[i][0] - t2[i][0]) + abs(t1[i][1] - t2[i][1]) + abs(t1[i][2] - t2[i][2]))
diff += ((t1[i][0] - t2[i][0])**2 + (t1[i][1] - t2[i][1])**2 + (t1[i][2] - t2[i][2])**2)
if diff > bail_out_value:
# we know already that this isn't going to be the best fit, so no point continuing with this tile
return diff
return diff
def get_best_fit_tile(self, img_data):
best_fit_tile_index = None
min_diff = sys.maxsize
tile_index = 0
# go through each tile in turn looking for the best match for the part of the image represented by 'img_data'
for tile_data in self.tiles_data:
diff = self.__get_tile_diff(img_data, tile_data, min_diff)
if diff < min_diff:
min_diff = diff
best_fit_tile_index = tile_index
tile_index += 1
return best_fit_tile_index
def fit_tiles(work_queue, result_queue, tiles_data):
# this function gets run by the worker processes, one on each CPU core
tile_fitter = TileFitter(tiles_data)
while True:
try:
img_data, img_coords = work_queue.get(True)
if img_data == EOQ_VALUE:
break
tile_index = tile_fitter.get_best_fit_tile(img_data)
result_queue.put((img_coords, tile_index))
except KeyboardInterrupt:
pass
# let the result handler know that this worker has finished everything
result_queue.put((EOQ_VALUE, EOQ_VALUE))
class ProgressCounter:
def __init__(self, total):
self.total = total
self.counter = 0
def update(self):
self.counter += 1
print("Progress: {:04.1f}%".format(100 * self.counter / self.total), flush=True, end='\r')
class MosaicImage:
def __init__(self, original_img):
self.image = Image.new(original_img.mode, original_img.size)
self.x_tile_count = int(original_img.size[0] / TILE_SIZE)
self.y_tile_count = int(original_img.size[1] / TILE_SIZE)
self.total_tiles = self.x_tile_count * self.y_tile_count
def add_tile(self, tile_data, coords):
img = Image.new('RGB', (TILE_SIZE, TILE_SIZE))
img.putdata(tile_data)
self.image.paste(img, coords)
def save(self, path):
self.image.save(path)
def build_mosaic(result_queue, all_tile_data_large, original_img_large):
mosaic = MosaicImage(original_img_large)
active_workers = WORKER_COUNT
while True:
try:
img_coords, best_fit_tile_index = result_queue.get()
if img_coords == EOQ_VALUE:
active_workers -= 1
if not active_workers:
break
else:
tile_data = all_tile_data_large[best_fit_tile_index]
mosaic.add_tile(tile_data, img_coords)
except KeyboardInterrupt:
pass
mosaic.save(OUT_FILE)
print('\nFinished, output is in', OUT_FILE)
def compose(original_img, tiles):
print('Building mosaic, press Ctrl-C to abort...')
original_img_large, original_img_small = original_img
tiles_large, tiles_small = tiles
mosaic = MosaicImage(original_img_large)
all_tile_data_large = [list(tile.getdata()) for tile in tiles_large]
all_tile_data_small = [list(tile.getdata()) for tile in tiles_small]
work_queue = Queue(WORKER_COUNT)
result_queue = Queue()
try:
# start the worker processes that will build the mosaic image
Process(target=build_mosaic, args=(result_queue, all_tile_data_large, original_img_large)).start()
# start the worker processes that will perform the tile fitting
for n in range(WORKER_COUNT):
Process(target=fit_tiles, args=(work_queue, result_queue, all_tile_data_small)).start()
progress = ProgressCounter(mosaic.x_tile_count * mosaic.y_tile_count)
for x in range(mosaic.x_tile_count):
for y in range(mosaic.y_tile_count):
large_box = (x * TILE_SIZE, y * TILE_SIZE, (x + 1) * TILE_SIZE, (y + 1) * TILE_SIZE)
small_box = (x * TILE_SIZE/TILE_BLOCK_SIZE, y * TILE_SIZE/TILE_BLOCK_SIZE, (x + 1) * TILE_SIZE/TILE_BLOCK_SIZE, (y + 1) * TILE_SIZE/TILE_BLOCK_SIZE)
work_queue.put((list(original_img_small.crop(small_box).getdata()), large_box))
progress.update()
except KeyboardInterrupt:
print('\nHalting, saving partial image please wait...')
finally:
# put these special values onto the queue to let the workers know they can terminate
for n in range(WORKER_COUNT):
work_queue.put((EOQ_VALUE, EOQ_VALUE))
def mosaic(img_path, tiles_path):
image_data = TargetImage(img_path).get_data()
tiles_data = TileProcessor(tiles_path).get_tiles()
compose(image_data, tiles_data)
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: {} <image> <tiles directory>\r'.format(sys.argv[0]))
else:
mosaic(sys.argv[1], sys.argv[2])
|
import sys
import os
from PIL import Image, ImageOps
from multiprocessing import Process, Queue, cpu_count
# Change these 3 config parameters to suit your needs...
TILE_SIZE = 50 # height/width of mosaic tiles in pixels
TILE_MATCH_RES = 5 # tile matching resolution (higher values give better fit but require more processing)
ENLARGEMENT = 8 # the mosaic image will be this many times wider and taller than the original
TILE_BLOCK_SIZE = TILE_SIZE / max(min(TILE_MATCH_RES, TILE_SIZE), 1)
WORKER_COUNT = max(cpu_count() - 1, 1)
OUT_FILE = 'mosaic.jpeg'
EOQ_VALUE = None
class TileProcessor:
def __init__(self, tiles_directory):
self.tiles_directory = tiles_directory
def __process_tile(self, tile_path):
try:
img = Image.open(tile_path)
img = ImageOps.exif_transpose(img)
# tiles must be square, so get the largest square that fits inside the image
w = img.size[0]
h = img.size[1]
min_dimension = min(w, h)
w_crop = (w - min_dimension) / 2
h_crop = (h - min_dimension) / 2
img = img.crop((w_crop, h_crop, w - w_crop, h - h_crop))
large_tile_img = img.resize((TILE_SIZE, TILE_SIZE), Image.ANTIALIAS)
small_tile_img = img.resize((int(TILE_SIZE/TILE_BLOCK_SIZE), int(TILE_SIZE/TILE_BLOCK_SIZE)), Image.ANTIALIAS)
return (large_tile_img.convert('RGB'), small_tile_img.convert('RGB'))
except:
return (None, None)
def get_tiles(self):
large_tiles = []
small_tiles = []
print('Reading tiles from {}...'.format(self.tiles_directory))
# search the tiles directory recursively
for root, subFolders, files in os.walk(self.tiles_directory):
for tile_name in files:
print('Reading {:40.40}'.format(tile_name), flush=True, end='\r')
tile_path = os.path.join(root, tile_name)
large_tile, small_tile = self.__process_tile(tile_path)
if large_tile:
large_tiles.append(large_tile)
small_tiles.append(small_tile)
print('Processed {} tiles.'.format(len(large_tiles)))
return (large_tiles, small_tiles)
class TargetImage:
def __init__(self, image_path):
self.image_path = image_path
def get_data(self):
print('Processing main image...')
img = Image.open(self.image_path)
w = img.size[0] * ENLARGEMENT
h = img.size[1] * ENLARGEMENT
large_img = img.resize((w, h), Image.ANTIALIAS)
w_diff = (w % TILE_SIZE)/2
h_diff = (h % TILE_SIZE)/2
# if necessary, crop the image slightly so we use a whole number of tiles horizontally and vertically
if w_diff or h_diff:
large_img = large_img.crop((w_diff, h_diff, w - w_diff, h - h_diff))
small_img = large_img.resize((int(w/TILE_BLOCK_SIZE), int(h/TILE_BLOCK_SIZE)), Image.ANTIALIAS)
image_data = (large_img.convert('RGB'), small_img.convert('RGB'))
print('Main image processed.')
return image_data
class TileFitter:
def __init__(self, tiles_data):
self.tiles_data = tiles_data
def __get_tile_diff(self, t1, t2, bail_out_value):
diff = 0
for i in range(len(t1)):
#diff += (abs(t1[i][0] - t2[i][0]) + abs(t1[i][1] - t2[i][1]) + abs(t1[i][2] - t2[i][2]))
diff += ((t1[i][0] - t2[i][0])**2 + (t1[i][1] - t2[i][1])**2 + (t1[i][2] - t2[i][2])**2)
if diff > bail_out_value:
# we know already that this isn't going to be the best fit, so no point continuing with this tile
return diff
return diff
def get_best_fit_tile(self, img_data):
best_fit_tile_index = None
min_diff = sys.maxsize
tile_index = 0
# go through each tile in turn looking for the best match for the part of the image represented by 'img_data'
for tile_data in self.tiles_data:
diff = self.__get_tile_diff(img_data, tile_data, min_diff)
if diff < min_diff:
min_diff = diff
best_fit_tile_index = tile_index
tile_index += 1
return best_fit_tile_index
def fit_tiles(work_queue, result_queue, tiles_data):
# this function gets run by the worker processes, one on each CPU core
tile_fitter = TileFitter(tiles_data)
while True:
try:
img_data, img_coords = work_queue.get(True)
if img_data == EOQ_VALUE:
break
tile_index = tile_fitter.get_best_fit_tile(img_data)
result_queue.put((img_coords, tile_index))
except KeyboardInterrupt:
pass
# let the result handler know that this worker has finished everything
result_queue.put((EOQ_VALUE, EOQ_VALUE))
class ProgressCounter:
def __init__(self, total):
self.total = total
self.counter = 0
def update(self):
self.counter += 1
print("Progress: {:04.1f}%".format(100 * self.counter / self.total), flush=True, end='\r')
class MosaicImage:
def __init__(self, original_img):
self.image = Image.new(original_img.mode, original_img.size)
self.x_tile_count = int(original_img.size[0] / TILE_SIZE)
self.y_tile_count = int(original_img.size[1] / TILE_SIZE)
self.total_tiles = self.x_tile_count * self.y_tile_count
def add_tile(self, tile_data, coords):
img = Image.new('RGB', (TILE_SIZE, TILE_SIZE))
img.putdata(tile_data)
self.image.paste(img, coords)
def save(self, path):
self.image.save(path)
def build_mosaic(result_queue, all_tile_data_large, original_img_large):
mosaic = MosaicImage(original_img_large)
active_workers = WORKER_COUNT
while True:
try:
img_coords, best_fit_tile_index = result_queue.get()
if img_coords == EOQ_VALUE:
active_workers -= 1
if not active_workers:
break
else:
tile_data = all_tile_data_large[best_fit_tile_index]
mosaic.add_tile(tile_data, img_coords)
except KeyboardInterrupt:
pass
mosaic.save(OUT_FILE)
print('\nFinished, output is in', OUT_FILE)
def compose(original_img, tiles):
print('Building mosaic, press Ctrl-C to abort...')
original_img_large, original_img_small = original_img
tiles_large, tiles_small = tiles
mosaic = MosaicImage(original_img_large)
all_tile_data_large = [list(tile.getdata()) for tile in tiles_large]
all_tile_data_small = [list(tile.getdata()) for tile in tiles_small]
work_queue = Queue(WORKER_COUNT)
result_queue = Queue()
try:
# start the worker processes that will build the mosaic image
Process(target=build_mosaic, args=(result_queue, all_tile_data_large, original_img_large)).start()
# start the worker processes that will perform the tile fitting
for n in range(WORKER_COUNT):
Process(target=fit_tiles, args=(work_queue, result_queue, all_tile_data_small)).start()
progress = ProgressCounter(mosaic.x_tile_count * mosaic.y_tile_count)
for x in range(mosaic.x_tile_count):
for y in range(mosaic.y_tile_count):
large_box = (x * TILE_SIZE, y * TILE_SIZE, (x + 1) * TILE_SIZE, (y + 1) * TILE_SIZE)
small_box = (x * TILE_SIZE/TILE_BLOCK_SIZE, y * TILE_SIZE/TILE_BLOCK_SIZE, (x + 1) * TILE_SIZE/TILE_BLOCK_SIZE, (y + 1) * TILE_SIZE/TILE_BLOCK_SIZE)
work_queue.put((list(original_img_small.crop(small_box).getdata()), large_box))
progress.update()
except KeyboardInterrupt:
print('\nHalting, saving partial image please wait...')
finally:
# put these special values onto the queue to let the workers know they can terminate
for n in range(WORKER_COUNT):
work_queue.put((EOQ_VALUE, EOQ_VALUE))
def mosaic(img_path, tiles_path):
image_data = TargetImage(img_path).get_data()
tiles_data = TileProcessor(tiles_path).get_tiles()
compose(image_data, tiles_data)
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: {} <image> <tiles directory>\r'.format(sys.argv[0]))
else:
mosaic(sys.argv[1], sys.argv[2])
|
en
| 0.870116
|
# Change these 3 config parameters to suit your needs... # height/width of mosaic tiles in pixels # tile matching resolution (higher values give better fit but require more processing) # the mosaic image will be this many times wider and taller than the original # tiles must be square, so get the largest square that fits inside the image # search the tiles directory recursively # if necessary, crop the image slightly so we use a whole number of tiles horizontally and vertically #diff += (abs(t1[i][0] - t2[i][0]) + abs(t1[i][1] - t2[i][1]) + abs(t1[i][2] - t2[i][2])) # we know already that this isn't going to be the best fit, so no point continuing with this tile # go through each tile in turn looking for the best match for the part of the image represented by 'img_data' # this function gets run by the worker processes, one on each CPU core # let the result handler know that this worker has finished everything # start the worker processes that will build the mosaic image # start the worker processes that will perform the tile fitting # put these special values onto the queue to let the workers know they can terminate
| 2.824352
| 3
|
homeassistant/components/ozw/migration.py
|
tbarbette/core
| 11
|
6626439
|
<filename>homeassistant/components/ozw/migration.py
"""Provide tools for migrating from the zwave integration."""
from homeassistant.helpers.device_registry import (
async_get_registry as async_get_device_registry,
)
from homeassistant.helpers.entity_registry import (
async_entries_for_config_entry,
async_get_registry as async_get_entity_registry,
)
from .const import DOMAIN, MIGRATED, NODES_VALUES
from .entity import create_device_id, create_value_id
# The following dicts map labels between OpenZWave 1.4 and 1.6.
METER_CC_LABELS = {
"Energy": "Electric - kWh",
"Power": "Electric - W",
"Count": "Electric - Pulses",
"Voltage": "Electric - V",
"Current": "Electric - A",
"Power Factor": "Electric - PF",
}
NOTIFICATION_CC_LABELS = {
"General": "Start",
"Smoke": "Smoke Alarm",
"Carbon Monoxide": "Carbon Monoxide",
"Carbon Dioxide": "Carbon Dioxide",
"Heat": "Heat",
"Flood": "Water",
"Access Control": "Access Control",
"Burglar": "Home Security",
"Power Management": "Power Management",
"System": "System",
"Emergency": "Emergency",
"Clock": "Clock",
"Appliance": "Appliance",
"HomeHealth": "Home Health",
}
CC_ID_LABELS = {
50: METER_CC_LABELS,
113: NOTIFICATION_CC_LABELS,
}
async def async_get_migration_data(hass):
"""Return dict with ozw side migration info."""
data = {}
nodes_values = hass.data[DOMAIN][NODES_VALUES]
ozw_config_entries = hass.config_entries.async_entries(DOMAIN)
config_entry = ozw_config_entries[0] # ozw only has a single config entry
ent_reg = await async_get_entity_registry(hass)
entity_entries = async_entries_for_config_entry(ent_reg, config_entry.entry_id)
unique_entries = {entry.unique_id: entry for entry in entity_entries}
dev_reg = await async_get_device_registry(hass)
for node_id, node_values in nodes_values.items():
for entity_values in node_values:
unique_id = create_value_id(entity_values.primary)
if unique_id not in unique_entries:
continue
node = entity_values.primary.node
device_identifier = (
DOMAIN,
create_device_id(node, entity_values.primary.instance),
)
device_entry = dev_reg.async_get_device({device_identifier}, set())
data[unique_id] = {
"node_id": node_id,
"node_instance": entity_values.primary.instance,
"device_id": device_entry.id,
"command_class": entity_values.primary.command_class.value,
"command_class_label": entity_values.primary.label,
"value_index": entity_values.primary.index,
"unique_id": unique_id,
"entity_entry": unique_entries[unique_id],
}
return data
def map_node_values(zwave_data, ozw_data):
"""Map zwave node values onto ozw node values."""
migration_map = {"device_entries": {}, "entity_entries": {}}
for zwave_entry in zwave_data.values():
node_id = zwave_entry["node_id"]
node_instance = zwave_entry["node_instance"]
cc_id = zwave_entry["command_class"]
zwave_cc_label = zwave_entry["command_class_label"]
if cc_id in CC_ID_LABELS:
labels = CC_ID_LABELS[cc_id]
ozw_cc_label = labels.get(zwave_cc_label, zwave_cc_label)
ozw_entry = next(
(
entry
for entry in ozw_data.values()
if entry["node_id"] == node_id
and entry["node_instance"] == node_instance
and entry["command_class"] == cc_id
and entry["command_class_label"] == ozw_cc_label
),
None,
)
else:
value_index = zwave_entry["value_index"]
ozw_entry = next(
(
entry
for entry in ozw_data.values()
if entry["node_id"] == node_id
and entry["node_instance"] == node_instance
and entry["command_class"] == cc_id
and entry["value_index"] == value_index
),
None,
)
if ozw_entry is None:
continue
# Save the zwave_entry under the ozw entity_id to create the map.
# Check that the mapped entities have the same domain.
if zwave_entry["entity_entry"].domain == ozw_entry["entity_entry"].domain:
migration_map["entity_entries"][
ozw_entry["entity_entry"].entity_id
] = zwave_entry
migration_map["device_entries"][ozw_entry["device_id"]] = zwave_entry[
"device_id"
]
return migration_map
async def async_migrate(hass, migration_map):
"""Perform zwave to ozw migration."""
dev_reg = await async_get_device_registry(hass)
for ozw_device_id, zwave_device_id in migration_map["device_entries"].items():
zwave_device_entry = dev_reg.async_get(zwave_device_id)
dev_reg.async_update_device(
ozw_device_id,
area_id=zwave_device_entry.area_id,
name_by_user=zwave_device_entry.name_by_user,
)
ent_reg = await async_get_entity_registry(hass)
for zwave_entry in migration_map["entity_entries"].values():
zwave_entity_id = zwave_entry["entity_entry"].entity_id
ent_reg.async_remove(zwave_entity_id)
for ozw_entity_id, zwave_entry in migration_map["entity_entries"].items():
entity_entry = zwave_entry["entity_entry"]
ent_reg.async_update_entity(
ozw_entity_id,
new_entity_id=entity_entry.entity_id,
name=entity_entry.name,
icon=entity_entry.icon,
)
zwave_config_entry = hass.config_entries.async_entries("zwave")[0]
await hass.config_entries.async_remove(zwave_config_entry.entry_id)
ozw_config_entry = hass.config_entries.async_entries("ozw")[0]
updates = {
**ozw_config_entry.data,
MIGRATED: True,
}
hass.config_entries.async_update_entry(ozw_config_entry, data=updates)
|
<filename>homeassistant/components/ozw/migration.py
"""Provide tools for migrating from the zwave integration."""
from homeassistant.helpers.device_registry import (
async_get_registry as async_get_device_registry,
)
from homeassistant.helpers.entity_registry import (
async_entries_for_config_entry,
async_get_registry as async_get_entity_registry,
)
from .const import DOMAIN, MIGRATED, NODES_VALUES
from .entity import create_device_id, create_value_id
# The following dicts map labels between OpenZWave 1.4 and 1.6.
METER_CC_LABELS = {
"Energy": "Electric - kWh",
"Power": "Electric - W",
"Count": "Electric - Pulses",
"Voltage": "Electric - V",
"Current": "Electric - A",
"Power Factor": "Electric - PF",
}
NOTIFICATION_CC_LABELS = {
"General": "Start",
"Smoke": "Smoke Alarm",
"Carbon Monoxide": "Carbon Monoxide",
"Carbon Dioxide": "Carbon Dioxide",
"Heat": "Heat",
"Flood": "Water",
"Access Control": "Access Control",
"Burglar": "Home Security",
"Power Management": "Power Management",
"System": "System",
"Emergency": "Emergency",
"Clock": "Clock",
"Appliance": "Appliance",
"HomeHealth": "Home Health",
}
CC_ID_LABELS = {
50: METER_CC_LABELS,
113: NOTIFICATION_CC_LABELS,
}
async def async_get_migration_data(hass):
"""Return dict with ozw side migration info."""
data = {}
nodes_values = hass.data[DOMAIN][NODES_VALUES]
ozw_config_entries = hass.config_entries.async_entries(DOMAIN)
config_entry = ozw_config_entries[0] # ozw only has a single config entry
ent_reg = await async_get_entity_registry(hass)
entity_entries = async_entries_for_config_entry(ent_reg, config_entry.entry_id)
unique_entries = {entry.unique_id: entry for entry in entity_entries}
dev_reg = await async_get_device_registry(hass)
for node_id, node_values in nodes_values.items():
for entity_values in node_values:
unique_id = create_value_id(entity_values.primary)
if unique_id not in unique_entries:
continue
node = entity_values.primary.node
device_identifier = (
DOMAIN,
create_device_id(node, entity_values.primary.instance),
)
device_entry = dev_reg.async_get_device({device_identifier}, set())
data[unique_id] = {
"node_id": node_id,
"node_instance": entity_values.primary.instance,
"device_id": device_entry.id,
"command_class": entity_values.primary.command_class.value,
"command_class_label": entity_values.primary.label,
"value_index": entity_values.primary.index,
"unique_id": unique_id,
"entity_entry": unique_entries[unique_id],
}
return data
def map_node_values(zwave_data, ozw_data):
"""Map zwave node values onto ozw node values."""
migration_map = {"device_entries": {}, "entity_entries": {}}
for zwave_entry in zwave_data.values():
node_id = zwave_entry["node_id"]
node_instance = zwave_entry["node_instance"]
cc_id = zwave_entry["command_class"]
zwave_cc_label = zwave_entry["command_class_label"]
if cc_id in CC_ID_LABELS:
labels = CC_ID_LABELS[cc_id]
ozw_cc_label = labels.get(zwave_cc_label, zwave_cc_label)
ozw_entry = next(
(
entry
for entry in ozw_data.values()
if entry["node_id"] == node_id
and entry["node_instance"] == node_instance
and entry["command_class"] == cc_id
and entry["command_class_label"] == ozw_cc_label
),
None,
)
else:
value_index = zwave_entry["value_index"]
ozw_entry = next(
(
entry
for entry in ozw_data.values()
if entry["node_id"] == node_id
and entry["node_instance"] == node_instance
and entry["command_class"] == cc_id
and entry["value_index"] == value_index
),
None,
)
if ozw_entry is None:
continue
# Save the zwave_entry under the ozw entity_id to create the map.
# Check that the mapped entities have the same domain.
if zwave_entry["entity_entry"].domain == ozw_entry["entity_entry"].domain:
migration_map["entity_entries"][
ozw_entry["entity_entry"].entity_id
] = zwave_entry
migration_map["device_entries"][ozw_entry["device_id"]] = zwave_entry[
"device_id"
]
return migration_map
async def async_migrate(hass, migration_map):
"""Perform zwave to ozw migration."""
dev_reg = await async_get_device_registry(hass)
for ozw_device_id, zwave_device_id in migration_map["device_entries"].items():
zwave_device_entry = dev_reg.async_get(zwave_device_id)
dev_reg.async_update_device(
ozw_device_id,
area_id=zwave_device_entry.area_id,
name_by_user=zwave_device_entry.name_by_user,
)
ent_reg = await async_get_entity_registry(hass)
for zwave_entry in migration_map["entity_entries"].values():
zwave_entity_id = zwave_entry["entity_entry"].entity_id
ent_reg.async_remove(zwave_entity_id)
for ozw_entity_id, zwave_entry in migration_map["entity_entries"].items():
entity_entry = zwave_entry["entity_entry"]
ent_reg.async_update_entity(
ozw_entity_id,
new_entity_id=entity_entry.entity_id,
name=entity_entry.name,
icon=entity_entry.icon,
)
zwave_config_entry = hass.config_entries.async_entries("zwave")[0]
await hass.config_entries.async_remove(zwave_config_entry.entry_id)
ozw_config_entry = hass.config_entries.async_entries("ozw")[0]
updates = {
**ozw_config_entry.data,
MIGRATED: True,
}
hass.config_entries.async_update_entry(ozw_config_entry, data=updates)
|
en
| 0.750447
|
Provide tools for migrating from the zwave integration. # The following dicts map labels between OpenZWave 1.4 and 1.6. Return dict with ozw side migration info. # ozw only has a single config entry Map zwave node values onto ozw node values. # Save the zwave_entry under the ozw entity_id to create the map. # Check that the mapped entities have the same domain. Perform zwave to ozw migration.
| 1.767409
| 2
|
test/test-acl.py
|
umeshpacholi/iudx-auth-server
| 0
|
6626440
|
<gh_stars>0
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
from init import provider
rules = [
'<EMAIL> can access rs1.com/x/y/z/t/a/b/c for 2 days',
'<EMAIL> can access rs1.com/_x/y/z/t/a/b/c for 2 days if country = "IN" AND api = "/latest"',
'<EMAIL> can access rs1.com/x-t/y/z/t/a/b/c for 2 days if country = "IN" OR api = "/latest"',
'<EMAIL> and <EMAIL> can access rs1.com/x for 5 hours @ 5 INR',
'a,<EMAIL>, and c can access x/y/z.a.b.c/t for 2 seconds @ 10.5 INR; all can access anything; x can access y',
'* can access local_server/*/test if ip = "192.168.3.11" OR ip = "::ffff:ada0:d182"',
'* can access test-server/test-resource/rs1 if body.operation = "select" AND body.on = "everything"',
'* can access test-server/test-resource/rs2 if api = "/latest" AND method = "GET"',
'<EMAIL> can access test/test/* if cert.class = 2 AND cert.issuer.cn = "ca.iudx.org.in"',
<EMAIL> can access data/server1/* if cert.class = 3 AND ' +
'cert.o = "Indian Institute of Science \(IISc\)" AND cert.issuer.cn = "IUDX-sub-CA at iisc.ac.in"',
<EMAIL> can access confidential/data/* if cert.title = "Member of Technical Staff" AND ' +
'cert.ou = "Robert Bosch Centre for Cyber-Physical Systems \(RBCCPS\)"',
'person@* can access local/test/1 if tokens_per_day = 300 AND cert.st = "Karnataka"'
]
for rule in rules:
r = provider.set_policy(rule)
assert r['success'] is True
|
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
from init import provider
rules = [
'<EMAIL> can access rs1.com/x/y/z/t/a/b/c for 2 days',
'<EMAIL> can access rs1.com/_x/y/z/t/a/b/c for 2 days if country = "IN" AND api = "/latest"',
'<EMAIL> can access rs1.com/x-t/y/z/t/a/b/c for 2 days if country = "IN" OR api = "/latest"',
'<EMAIL> and <EMAIL> can access rs1.com/x for 5 hours @ 5 INR',
'a,<EMAIL>, and c can access x/y/z.a.b.c/t for 2 seconds @ 10.5 INR; all can access anything; x can access y',
'* can access local_server/*/test if ip = "192.168.3.11" OR ip = "::ffff:ada0:d182"',
'* can access test-server/test-resource/rs1 if body.operation = "select" AND body.on = "everything"',
'* can access test-server/test-resource/rs2 if api = "/latest" AND method = "GET"',
'<EMAIL> can access test/test/* if cert.class = 2 AND cert.issuer.cn = "ca.iudx.org.in"',
<EMAIL> can access data/server1/* if cert.class = 3 AND ' +
'cert.o = "Indian Institute of Science \(IISc\)" AND cert.issuer.cn = "IUDX-sub-CA at iisc.ac.in"',
<EMAIL> can access confidential/data/* if cert.title = "Member of Technical Staff" AND ' +
'cert.ou = "Robert Bosch Centre for Cyber-Physical Systems \(RBCCPS\)"',
'person@* can access local/test/1 if tokens_per_day = 300 AND cert.st = "Karnataka"'
]
for rule in rules:
r = provider.set_policy(rule)
assert r['success'] is True
|
de
| 0.26704
|
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| 1.798536
| 2
|
ee/api/ee_property_definition.py
|
dorucioclea/posthog
| 0
|
6626441
|
<filename>ee/api/ee_property_definition.py
from rest_framework import serializers
from ee.models.property_definition import EnterprisePropertyDefinition
from posthog.api.shared import UserBasicSerializer
class EnterprisePropertyDefinitionSerializer(serializers.ModelSerializer):
updated_by = UserBasicSerializer(read_only=True)
class Meta:
model = EnterprisePropertyDefinition
fields = (
"id",
"name",
"description",
"tags",
"is_numerical",
"updated_at",
"updated_by",
"query_usage_30_day",
"is_event_property",
"property_type",
)
read_only_fields = ["id", "name", "is_numerical", "query_usage_30_day", "is_event_property"]
def update(self, event_definition: EnterprisePropertyDefinition, validated_data):
validated_data["updated_by"] = self.context["request"].user
return super().update(event_definition, validated_data)
|
<filename>ee/api/ee_property_definition.py
from rest_framework import serializers
from ee.models.property_definition import EnterprisePropertyDefinition
from posthog.api.shared import UserBasicSerializer
class EnterprisePropertyDefinitionSerializer(serializers.ModelSerializer):
updated_by = UserBasicSerializer(read_only=True)
class Meta:
model = EnterprisePropertyDefinition
fields = (
"id",
"name",
"description",
"tags",
"is_numerical",
"updated_at",
"updated_by",
"query_usage_30_day",
"is_event_property",
"property_type",
)
read_only_fields = ["id", "name", "is_numerical", "query_usage_30_day", "is_event_property"]
def update(self, event_definition: EnterprisePropertyDefinition, validated_data):
validated_data["updated_by"] = self.context["request"].user
return super().update(event_definition, validated_data)
|
none
| 1
| 2.138018
| 2
|
|
software/drift_cali/drift_cali.py
|
BaharsGit/Rovable
| 0
|
6626442
|
#!/usr/bin/env python
# work with MPU6050_kalman.ino
from PyQt5 import QtCore, QtWidgets, uic, QtGui
from pyqtgraph import PlotWidget
from PyQt5.QtWidgets import QApplication, QVBoxLayout
import pyqtgraph as pg
import numpy as np
import datetime
import serial
import sys
import os
import time
from time import sleep
from colorama import Fore, Back, Style
import csv
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
import random
import struct
start_cmd = 0x11
interval_cmd = 0x22
sleep_cmd = 0x33
aq_cmd = 0x44
nbins = 20
data_len = 500
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def read_current_time():
now = datetime.datetime.now(datetime.timezone.utc)
current_time = now.strftime("%Z:%j/%H:%M:%S")
return current_time
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setFixedSize(851, 630)
#Load the UI Page
uic.loadUi('drift.ui', self)
self.gyrox.setBackground('w')
self.gyroy.setBackground('w')
self.gyroz.setBackground('w')
self.serial_ports_list = []
self.serial_speed = [1000000]
# Ref: https://stackoverflow.com/questions/59898215/break-an-infinit-loop-when-button-is-pressed
self.timer = QtCore.QTimer(self, interval=5, timeout=self.read_port)
self.ser=serial.Serial()
self.scan_btn.clicked.connect(self.scan)
self.open_btn.clicked.connect(self.open_port)
self.close_btn.clicked.connect(self.close)
self.start_btn.clicked.connect(self.start_read_port)
self.stop_btn.clicked.connect(self.stop_read_port)
self.calc_btn.clicked.connect(self.calc_results)
self.gyrox_data = [0] * data_len
self.gyroy_data = [0] * data_len
self.gyroz_data = [0] * data_len
self.time_index=list(range(1, data_len+1))
for x in self.serial_speed:
self.speed_comboBox.addItem(str(x))
def scan(self):
if os.name == 'nt': # sys.platform == 'win32':
from serial.tools.list_ports_windows import comports
elif os.name == 'posix':
from serial.tools.list_ports_posix import comports
for info in comports(False):
port, desc, hwid = info
iterator = sorted(comports(False))
self.serial_ports_list = [] # clear the list first
for n, (port, desc, hwid) in enumerate(iterator, 1):
self.serial_ports_list.append("{:20}\n".format(port))
ports_num = len(self.serial_ports_list)
self.serial_comboBox.clear() # clear the list first
for x in self.serial_ports_list:
self.serial_comboBox.addItem(x)
self.start_id = 0
self.interval_id = 0
self.sleep_id = 0
def open_port(self):
index = self.serial_comboBox.currentIndex()
serial_ports_port = self.serial_ports_list[index][:-1] # delete the \n at the end
index = self.speed_comboBox.currentIndex()
self.ser = serial.Serial(serial_ports_port, self.serial_speed[index])
current_time = read_current_time()
print(current_time, self.ser.name + " Opened @ " + str(self.serial_speed[index]) + "bps")
def start_read_port(self):
self.gyrox_data = [0] * data_len
self.gyroy_data = [0] * data_len
self.gyroz_data = [0] * data_len
self.data_num = 0
self.timer.start() # Start the timer
def stop_read_port(self):
self.timer.stop() # Stop the timer
def read_port(self):
if (self.ser.inWaiting()):
current_time = read_current_time()
gyro = self.ser.read(24) # 3 double value: gyrox, gyroy, gyroz
gyrox_i = gyro[0:8]
gyroy_i = gyro[8:16]
gyroz_i = gyro[16:24]
gyrox_d=struct.unpack('d', gyrox_i)[0]
gyroy_d=struct.unpack('d', gyroy_i)[0]
gyroz_d=struct.unpack('d', gyroz_i)[0]
# print(current_time, " ---> ", gyrox_d, gyroy_d, gyroz_d)
self.gyrox_data.pop(0)
self.gyrox_data.append(gyrox_d)
self.gyrox.clear()
self.gyrox.plot(self.time_index, self.gyrox_data, pen=pg.mkPen('b', width=2))
self.gyroy_data.pop(0)
self.gyroy_data.append(gyroy_d)
self.gyroy.clear()
self.gyroy.plot(self.time_index, self.gyroy_data, pen=pg.mkPen('r', width=2))
self.gyroz_data.pop(0)
self.gyroz_data.append(gyroz_d)
self.gyroz.clear()
self.gyroz.plot(self.time_index, self.gyroz_data, pen=pg.mkPen('g', width=2))
def calc_results(self):
print(np.std(self.gyrox_data))
print(np.mean(self.gyrox_data))
print(np.std(self.gyroy_data))
print(np.mean(self.gyroy_data))
print(np.std(self.gyroz_data))
print(np.mean(self.gyroz_data))
def save_data(self):
filename = self.filename_str.text()+'.csv'
with open(filename, 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(ranging_data)
# driver code
if __name__ == '__main__':
# creating apyqt5 application
app = QApplication(sys.argv)
# creating a window object
main = MainWindow()
# showing the window
main.show()
# loop
sys.exit(app.exec_())
|
#!/usr/bin/env python
# work with MPU6050_kalman.ino
from PyQt5 import QtCore, QtWidgets, uic, QtGui
from pyqtgraph import PlotWidget
from PyQt5.QtWidgets import QApplication, QVBoxLayout
import pyqtgraph as pg
import numpy as np
import datetime
import serial
import sys
import os
import time
from time import sleep
from colorama import Fore, Back, Style
import csv
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
import random
import struct
start_cmd = 0x11
interval_cmd = 0x22
sleep_cmd = 0x33
aq_cmd = 0x44
nbins = 20
data_len = 500
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def read_current_time():
now = datetime.datetime.now(datetime.timezone.utc)
current_time = now.strftime("%Z:%j/%H:%M:%S")
return current_time
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setFixedSize(851, 630)
#Load the UI Page
uic.loadUi('drift.ui', self)
self.gyrox.setBackground('w')
self.gyroy.setBackground('w')
self.gyroz.setBackground('w')
self.serial_ports_list = []
self.serial_speed = [1000000]
# Ref: https://stackoverflow.com/questions/59898215/break-an-infinit-loop-when-button-is-pressed
self.timer = QtCore.QTimer(self, interval=5, timeout=self.read_port)
self.ser=serial.Serial()
self.scan_btn.clicked.connect(self.scan)
self.open_btn.clicked.connect(self.open_port)
self.close_btn.clicked.connect(self.close)
self.start_btn.clicked.connect(self.start_read_port)
self.stop_btn.clicked.connect(self.stop_read_port)
self.calc_btn.clicked.connect(self.calc_results)
self.gyrox_data = [0] * data_len
self.gyroy_data = [0] * data_len
self.gyroz_data = [0] * data_len
self.time_index=list(range(1, data_len+1))
for x in self.serial_speed:
self.speed_comboBox.addItem(str(x))
def scan(self):
if os.name == 'nt': # sys.platform == 'win32':
from serial.tools.list_ports_windows import comports
elif os.name == 'posix':
from serial.tools.list_ports_posix import comports
for info in comports(False):
port, desc, hwid = info
iterator = sorted(comports(False))
self.serial_ports_list = [] # clear the list first
for n, (port, desc, hwid) in enumerate(iterator, 1):
self.serial_ports_list.append("{:20}\n".format(port))
ports_num = len(self.serial_ports_list)
self.serial_comboBox.clear() # clear the list first
for x in self.serial_ports_list:
self.serial_comboBox.addItem(x)
self.start_id = 0
self.interval_id = 0
self.sleep_id = 0
def open_port(self):
index = self.serial_comboBox.currentIndex()
serial_ports_port = self.serial_ports_list[index][:-1] # delete the \n at the end
index = self.speed_comboBox.currentIndex()
self.ser = serial.Serial(serial_ports_port, self.serial_speed[index])
current_time = read_current_time()
print(current_time, self.ser.name + " Opened @ " + str(self.serial_speed[index]) + "bps")
def start_read_port(self):
self.gyrox_data = [0] * data_len
self.gyroy_data = [0] * data_len
self.gyroz_data = [0] * data_len
self.data_num = 0
self.timer.start() # Start the timer
def stop_read_port(self):
self.timer.stop() # Stop the timer
def read_port(self):
if (self.ser.inWaiting()):
current_time = read_current_time()
gyro = self.ser.read(24) # 3 double value: gyrox, gyroy, gyroz
gyrox_i = gyro[0:8]
gyroy_i = gyro[8:16]
gyroz_i = gyro[16:24]
gyrox_d=struct.unpack('d', gyrox_i)[0]
gyroy_d=struct.unpack('d', gyroy_i)[0]
gyroz_d=struct.unpack('d', gyroz_i)[0]
# print(current_time, " ---> ", gyrox_d, gyroy_d, gyroz_d)
self.gyrox_data.pop(0)
self.gyrox_data.append(gyrox_d)
self.gyrox.clear()
self.gyrox.plot(self.time_index, self.gyrox_data, pen=pg.mkPen('b', width=2))
self.gyroy_data.pop(0)
self.gyroy_data.append(gyroy_d)
self.gyroy.clear()
self.gyroy.plot(self.time_index, self.gyroy_data, pen=pg.mkPen('r', width=2))
self.gyroz_data.pop(0)
self.gyroz_data.append(gyroz_d)
self.gyroz.clear()
self.gyroz.plot(self.time_index, self.gyroz_data, pen=pg.mkPen('g', width=2))
def calc_results(self):
print(np.std(self.gyrox_data))
print(np.mean(self.gyrox_data))
print(np.std(self.gyroy_data))
print(np.mean(self.gyroy_data))
print(np.std(self.gyroz_data))
print(np.mean(self.gyroz_data))
def save_data(self):
filename = self.filename_str.text()+'.csv'
with open(filename, 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(ranging_data)
# driver code
if __name__ == '__main__':
# creating apyqt5 application
app = QApplication(sys.argv)
# creating a window object
main = MainWindow()
# showing the window
main.show()
# loop
sys.exit(app.exec_())
|
en
| 0.703977
|
#!/usr/bin/env python # work with MPU6050_kalman.ino #Load the UI Page # Ref: https://stackoverflow.com/questions/59898215/break-an-infinit-loop-when-button-is-pressed # sys.platform == 'win32': # clear the list first # clear the list first # delete the \n at the end # Start the timer # Stop the timer # 3 double value: gyrox, gyroy, gyroz # print(current_time, " ---> ", gyrox_d, gyroy_d, gyroz_d) # driver code # creating apyqt5 application # creating a window object # showing the window # loop
| 2.300507
| 2
|
mindmeld/components/_config.py
|
arushir/mindmeld
| 0
|
6626443
|
<gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the Config class.
"""
import copy
import imp
import logging
import os
import warnings
from .. import path
from .request import validate_language_code, validate_locale_code
logger = logging.getLogger(__name__)
DUCKLING_SERVICE_NAME = "duckling"
DEFAULT_DUCKLING_URL = "http://localhost:7151/parse"
CONFIG_DEPRECATION_MAPPING = {
"DOMAIN_CLASSIFIER_CONFIG": "DOMAIN_MODEL_CONFIG",
"INTENT_CLASSIFIER_CONFIG": "INTENT_MODEL_CONFIG",
"ENTITY_RECOGNIZER_CONFIG": "ENTITY_MODEL_CONFIG",
"ROLE_CLASSIFIER_CONFIG": "ROLE_MODEL_CONFIG",
"ENTITY_RESOLVER_CONFIG": "ENTITY_RESOLUTION_CONFIG",
"QUESTION_ANSWERER_CONFIG": "QUESTION_ANSWERING_CONFIG",
"get_entity_recognizer_config": "get_entity_model_config",
"get_intent_classifier_config": "get_intent_model_config",
"get_entity_resolver_config": "get_entity_resolution_model_config",
"get_role_classifier_config": "get_role_model_config",
}
DEFAULT_DOMAIN_CLASSIFIER_CONFIG = {
"model_type": "text",
"model_settings": {
"classifier_type": "logreg",
},
"param_selection": {
"type": "k-fold",
"k": 10,
"grid": {"fit_intercept": [True, False], "C": [10, 100, 1000, 10000, 100000]},
},
"features": {"bag-of-words": {"lengths": [1]}, "freq": {"bins": 5}, "in-gaz": {}},
}
DEFAULT_INTENT_CLASSIFIER_CONFIG = {
"model_type": "text",
"model_settings": {"classifier_type": "logreg"},
"param_selection": {
"type": "k-fold",
"k": 10,
"grid": {
"fit_intercept": [True, False],
"C": [0.01, 1, 100, 10000, 1000000],
"class_bias": [1, 0.7, 0.3, 0],
},
},
"features": {
"bag-of-words": {"lengths": [1]},
"in-gaz": {},
"freq": {"bins": 5},
"length": {},
},
}
DEFAULT_ENTITY_RECOGNIZER_CONFIG = {
"model_type": "tagger",
"label_type": "entities",
"model_settings": {
"classifier_type": "memm",
"tag_scheme": "IOB",
"feature_scaler": "max-abs",
},
"param_selection": {
"type": "k-fold",
"k": 5,
"scoring": "accuracy",
"grid": {
"penalty": ["l1", "l2"],
"C": [0.01, 1, 100, 10000, 1000000, 100000000],
},
},
"features": {
"bag-of-words-seq": {
"ngram_lengths_to_start_positions": {
1: [-2, -1, 0, 1, 2],
2: [-2, -1, 0, 1],
}
},
"in-gaz-span-seq": {},
"sys-candidates-seq": {"start_positions": [-1, 0, 1]},
},
}
DEFAULT_ENTITY_RESOLVER_CONFIG = {"model_type": "text_relevance"}
DEFAULT_QUESTION_ANSWERER_CONFIG = {"model_type": "keyword"}
ENGLISH_LANGUAGE_CODE = "en"
ENGLISH_US_LOCALE = "en_US"
DEFAULT_LANGUAGE_CONFIG = {
"language": ENGLISH_LANGUAGE_CODE,
"locale": ENGLISH_US_LOCALE,
}
# ElasticSearch mapping to define text analysis settings for text fields.
# It defines specific index configuration for synonym indices. The common index configuration
# is in default index template.
DEFAULT_ES_SYNONYM_MAPPING = {
"mappings": {
"properties": {
"sort_factor": {"type": "double"},
"whitelist": {
"type": "nested",
"properties": {
"name": {
"type": "text",
"fields": {
"raw": {"type": "keyword", "ignore_above": 256},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer",
},
"char_ngram": {
"type": "text",
"analyzer": "char_ngram_analyzer",
},
},
"analyzer": "default_analyzer",
}
},
},
}
}
}
PHONETIC_ES_SYNONYM_MAPPING = {
"mappings": {
"properties": {
"sort_factor": {"type": "double"},
"whitelist": {
"type": "nested",
"properties": {
"name": {
"type": "text",
"fields": {
"raw": {"type": "keyword", "ignore_above": 256},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer",
},
"char_ngram": {
"type": "text",
"analyzer": "char_ngram_analyzer",
},
"double_metaphone": {
"type": "text",
"analyzer": "phonetic_analyzer",
},
},
"analyzer": "default_analyzer",
}
},
},
"cname": {
"type": "text",
"analyzer": "default_analyzer",
"fields": {
"raw": {"type": "keyword", "ignore_above": 256},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer",
},
"char_ngram": {
"type": "text",
"analyzer": "char_ngram_analyzer",
},
"double_metaphone": {
"type": "text",
"analyzer": "phonetic_analyzer",
},
},
},
}
},
"settings": {
"analysis": {
"filter": {
"phonetic_filter": {
"type": "phonetic",
"encoder": "doublemetaphone",
"replace": True,
"max_code_len": 7,
}
},
"analyzer": {
"phonetic_analyzer": {
"filter": [
"lowercase",
"asciifolding",
"token_shingle",
"phonetic_filter",
],
"char_filter": [
"remove_comma",
"remove_tm_and_r",
"remove_loose_apostrophes",
"space_possessive_apostrophes",
"remove_special_beginning",
"remove_special_end",
"remove_special1",
"remove_special2",
"remove_special3",
"remove_dot",
],
"type": "custom",
"tokenizer": "whitespace",
}
},
}
},
}
DEFAULT_ROLE_CLASSIFIER_CONFIG = {
"model_type": "text",
"model_settings": {"classifier_type": "logreg"},
"params": {"C": 100, "penalty": "l1"},
"features": {
"bag-of-words-before": {
"ngram_lengths_to_start_positions": {1: [-2, -1], 2: [-2, -1]}
},
"bag-of-words-after": {
"ngram_lengths_to_start_positions": {1: [0, 1], 2: [0, 1]}
},
"other-entities": {},
},
}
DEFAULT_ES_INDEX_TEMPLATE_NAME = "mindmeld_default"
# Default ES index template that contains the base index configuration shared across different
# types of indices. Currently all ES indices will be created using this template.
# - custom text analysis settings such as custom analyzers, token filters and character filters.
# - dynamic field mapping template for text fields
# - common fields, e.g. id.
DEFAULT_ES_INDEX_TEMPLATE = {
"template": "*",
"mappings": {
"dynamic_templates": [
{
"default_text": {
"match": "*",
"match_mapping_type": "string",
"mapping": {
"type": "text",
"analyzer": "default_analyzer",
"fields": {
"raw": {"type": "keyword", "ignore_above": 256},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer",
},
"processed_text": {
"type": "text",
"analyzer": "english",
},
"char_ngram": {
"type": "text",
"analyzer": "char_ngram_analyzer",
},
},
},
}
}
],
"properties": {"id": {"type": "keyword"}},
},
"settings": {
"analysis": {
"char_filter": {
"remove_loose_apostrophes": {
"pattern": " '|' ",
"type": "pattern_replace",
"replacement": "",
},
"space_possessive_apostrophes": {
"pattern": "([^\\p{N}\\s]+)'s ",
"type": "pattern_replace",
"replacement": "$1 's ",
},
"remove_special_beginning": {
"pattern": "^[^\\p{L}\\p{N}\\p{Sc}&']+",
"type": "pattern_replace",
"replacement": "",
},
"remove_special_end": {
"pattern": "[^\\p{L}\\p{N}&']+$",
"type": "pattern_replace",
"replacement": "",
},
"remove_special1": {
"pattern": "([\\p{L}]+)[^\\p{L}\\p{N}&']+(?=[\\p{N}\\s]+)",
"type": "pattern_replace",
"replacement": "$1 ",
},
"remove_special2": {
"pattern": "([\\p{N}]+)[^\\p{L}\\p{N}&']+(?=[\\p{L}\\s]+)",
"type": "pattern_replace",
"replacement": "$1 ",
},
"remove_special3": {
"pattern": "([\\p{L}]+)[^\\p{L}\\p{N}&']+(?=[\\p{L}]+)",
"type": "pattern_replace",
"replacement": "$1 ",
},
"remove_comma": {
"pattern": ",",
"type": "pattern_replace",
"replacement": "",
},
"remove_tm_and_r": {
"pattern": "™|®",
"type": "pattern_replace",
"replacement": "",
},
"remove_dot": {
"pattern": "([\\p{L}]+)[.]+(?=[\\p{L}\\s]+)",
"type": "pattern_replace",
"replacement": "$1",
},
},
"filter": {
"token_shingle": {
"max_shingle_size": "4",
"min_shingle_size": "2",
"output_unigrams": "true",
"type": "shingle",
},
"ngram_filter": {"type": "ngram", "min_gram": "3", "max_gram": "3"},
},
"analyzer": {
"default_analyzer": {
"filter": ["lowercase", "asciifolding", "token_shingle"],
"char_filter": [
"remove_comma",
"remove_tm_and_r",
"remove_loose_apostrophes",
"space_possessive_apostrophes",
"remove_special_beginning",
"remove_special_end",
"remove_special1",
"remove_special2",
"remove_special3",
],
"type": "custom",
"tokenizer": "whitespace",
},
"keyword_match_analyzer": {
"filter": ["lowercase", "asciifolding"],
"char_filter": [
"remove_comma",
"remove_tm_and_r",
"remove_loose_apostrophes",
"space_possessive_apostrophes",
"remove_special_beginning",
"remove_special_end",
"remove_special1",
"remove_special2",
"remove_special3",
],
"type": "custom",
"tokenizer": "keyword",
},
"char_ngram_analyzer": {
"filter": ["lowercase", "asciifolding", "ngram_filter"],
"char_filter": [
"remove_comma",
"remove_tm_and_r",
"remove_loose_apostrophes",
"space_possessive_apostrophes",
"remove_special_beginning",
"remove_special_end",
"remove_special1",
"remove_special2",
"remove_special3",
],
"type": "custom",
"tokenizer": "whitespace",
},
},
}
},
}
# Elasticsearch mapping to define knowledge base index specific configuration:
# - dynamic field mapping to index all synonym whitelist in fields with "$whitelist" suffix.
# - location field
#
# The common configuration is defined in default index template
DEFAULT_ES_QA_MAPPING = {
"mappings": {
"dynamic_templates": [
{
"synonym_whitelist_text": {
"match": "*$whitelist",
"match_mapping_type": "object",
"mapping": {
"type": "nested",
"properties": {
"name": {
"type": "text",
"fields": {
"raw": {"type": "keyword", "ignore_above": 256},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer",
},
"char_ngram": {
"type": "text",
"analyzer": "char_ngram_analyzer",
},
},
"analyzer": "default_analyzer",
}
},
},
}
}
],
"properties": {"location": {"type": "geo_point"}},
}
}
DEFAULT_PARSER_DEPENDENT_CONFIG = {
"left": True,
"right": True,
"min_instances": 0,
"max_instances": None,
"precedence": "left",
"linking_words": frozenset(),
}
DEFAULT_RANKING_CONFIG = {"query_clauses_operator": "or"}
DEFAULT_NLP_CONFIG = {
"resolve_entities_using_nbest_transcripts": [],
"system_entity_recognizer": {
"type": DUCKLING_SERVICE_NAME,
"url": DEFAULT_DUCKLING_URL,
},
}
class NlpConfigError(Exception):
pass
def get_custom_action_config(app_path):
if not app_path:
return None
try:
custom_action_config = getattr(
_get_config_module(app_path), "CUSTOM_ACTION_CONFIG", None
)
return custom_action_config
except (OSError, IOError):
logger.info("No app configuration file found.")
return None
def get_max_history_len(app_path):
if not app_path:
return None
try:
custom_action_config = getattr(
_get_config_module(app_path), "MAX_HISTORY_LEN", None
)
return custom_action_config
except (OSError, IOError):
logger.info("No app configuration file found.")
return None
def get_language_config(app_path):
if not app_path:
return ENGLISH_LANGUAGE_CODE, ENGLISH_US_LOCALE
try:
language_config = getattr(
_get_config_module(app_path), "LANGUAGE_CONFIG", DEFAULT_LANGUAGE_CONFIG
)
locale = language_config.get("locale")
language = language_config.get("language")
resolved_language = resolve_language(language, locale)
return resolved_language, locale
except (OSError, IOError):
logger.info(
"No app configuration file found. Using default language and locale."
)
return ENGLISH_LANGUAGE_CODE, ENGLISH_US_LOCALE
def resolve_language(language=None, locale=None):
"""
Resolves to a language given a locale.
"""
locale = validate_locale_code(locale)
language = validate_language_code(language)
# Locale overrides language
if locale:
language = locale.split("_")[0]
if not language:
language = ENGLISH_LANGUAGE_CODE
return language.lower()
def get_app_namespace(app_path):
"""Returns the namespace of the application at app_path"""
try:
_app_namespace = _get_config_module(app_path).APP_NAMESPACE
if "JUPYTER_USER" in os.environ:
_app_namespace = "{}_{}".format(os.environ["JUPYTER_USER"], _app_namespace)
return _app_namespace
except (OSError, IOError):
logger.debug("No app configuration file found")
except AttributeError:
logger.debug("App namespace not set in app configuration")
# If a relative path is passed in, we resolve to its abspath
app_path = os.path.abspath(app_path) if not os.path.isabs(app_path) else app_path
_app_namespace = os.path.split(app_path)[1]
if "JUPYTER_USER" in os.environ:
_app_namespace = "{jupyter_user}_{app_namespace}".format(
jupyter_user=os.environ["JUPYTER_USER"], app_namespace=_app_namespace
)
return _app_namespace
def is_duckling_configured(app_path):
"""Returns True if the app config specifies that duckling should be run
as a system entity recognizer
Args:
app_path (str): A application path
Returns:
(bool): True if the app config specifies that the numerical parsing
should be run
"""
if not app_path:
raise NlpConfigError("Application path is not valid")
config = get_nlp_config(app_path).get("system_entity_recognizer")
if isinstance(config, dict):
# We get into this conditional when the app has specified the system_entity_recognizer
# nlp config
return config.get("type") == DUCKLING_SERVICE_NAME
else:
# We get into this conditional when the app has not specified the system_entity_recognizer
# nlp config, in which case, we default to the duckling API
return True
def get_system_entity_url_config(app_path):
"""
Get system entity url from the application's config. If the application does not define the url,
return the default duckling url.
"""
if not app_path:
raise NlpConfigError("Application path is not valid")
return (
get_nlp_config(app_path)
.get("system_entity_recognizer", {})
.get("url", DEFAULT_DUCKLING_URL)
)
def get_classifier_config(
clf_type, app_path=None, domain=None, intent=None, entity=None
):
"""Returns the config for the specified classifier, with the
following order of precedence.
If the application contains a config.py file:
- Return the response from the get_*_model_config function in
config.py for the specified classifier type. E.g.
`get_intent_model_config`.
- If the function does not exist, or raise an exception, return the
config specified by *_MODEL_CONFIG in config.py, e.g.
INTENT_MODEL_CONFIG.
Otherwise, use the MindMeld default config for the classifier type
Args:
clf_type (str): The type of the classifier. One of 'domain',
'intent', 'entity', 'entity_resolution', or 'role'.
app_path (str, optional): The location of the app
domain (str, optional): The domain of the classifier
intent (str, optional): The intent of the classifier
entity (str, optional): The entity type of the classifier
Returns:
dict: A classifier config
"""
try:
module_conf = _get_config_module(app_path)
except (OSError, IOError):
logger.info(
"No app configuration file found. Using default %s model configuration",
clf_type,
)
return _get_default_classifier_config(clf_type)
func_name = {
"intent": "get_intent_classifier_config",
"entity": "get_entity_recognizer_config",
"entity_resolution": "get_entity_resolver_config",
"role": "get_role_classifier_config",
}.get(clf_type)
func_args = {
"intent": ("domain",),
"entity": ("domain", "intent"),
"entity_resolution": ("domain", "intent", "entity"),
"role": ("domain", "intent", "entity"),
}.get(clf_type)
if func_name:
func = None
try:
func = getattr(module_conf, func_name)
except AttributeError:
try:
func = getattr(module_conf, CONFIG_DEPRECATION_MAPPING[func_name])
msg = (
"%s config key is deprecated. Please use the equivalent %s config "
"key" % (CONFIG_DEPRECATION_MAPPING[func_name], func_name)
)
warnings.warn(msg, DeprecationWarning)
except AttributeError:
pass
if func:
try:
raw_args = {"domain": domain, "intent": intent, "entity": entity}
args = {k: raw_args[k] for k in func_args}
return copy.deepcopy(func(**args))
except Exception as exc: # pylint: disable=broad-except
# Note: this is intentionally broad -- provider could raise any exception
logger.warning(
"%r configuration provider raised exception: %s", clf_type, exc
)
attr_name = {
"domain": "DOMAIN_CLASSIFIER_CONFIG",
"intent": "INTENT_CLASSIFIER_CONFIG",
"entity": "ENTITY_RECOGNIZER_CONFIG",
"entity_resolution": "ENTITY_RESOLVER_CONFIG",
"role": "ROLE_CLASSIFIER_CONFIG",
"question_answering": "QUESTION_ANSWERER_CONFIG",
}[clf_type]
try:
return copy.deepcopy(getattr(module_conf, attr_name))
except AttributeError:
try:
result = copy.deepcopy(
getattr(module_conf, CONFIG_DEPRECATION_MAPPING[attr_name])
)
msg = (
"%s config is deprecated. Please use the equivalent %s config "
"key" % (CONFIG_DEPRECATION_MAPPING[attr_name], attr_name)
)
warnings.warn(msg, DeprecationWarning)
return result
except AttributeError:
logger.info("No %s model configuration set. Using default.", clf_type)
return _get_default_classifier_config(clf_type)
def _get_default_classifier_config(clf_type):
return copy.deepcopy(
{
"domain": DEFAULT_DOMAIN_CLASSIFIER_CONFIG,
"intent": DEFAULT_INTENT_CLASSIFIER_CONFIG,
"entity": DEFAULT_ENTITY_RECOGNIZER_CONFIG,
"entity_resolution": DEFAULT_ENTITY_RESOLVER_CONFIG,
"role": DEFAULT_ROLE_CLASSIFIER_CONFIG,
"language_config": DEFAULT_LANGUAGE_CONFIG,
"question_answering": DEFAULT_QUESTION_ANSWERER_CONFIG,
}[clf_type]
)
def get_parser_config(app_path=None, config=None, domain=None, intent=None):
"""Gets the fully specified parser configuration for the app at the
given path.
Args:
app_path (str, optional): The location of the MindMeld app
config (dict, optional): A config object to use. This will
override the config specified by the app's config.py file.
If necessary, this object will be expanded to a fully
specified config object.
domain (str, optional): The domain of the parser
intent (str, optional): The intent of the parser
Returns:
dict: A fully parser configuration
"""
if config:
return _expand_parser_config(config)
if not app_path:
raise NlpConfigError("Application path is not valid")
try:
module_conf = _get_config_module(app_path)
except (OSError, IOError):
logger.info("No app configuration file found. Not configuring parser.")
return _get_default_parser_config()
# Try provider first
config_provider = None
try:
config_provider = module_conf.get_parser_config
except AttributeError:
pass
if config_provider:
try:
config = config or config_provider(domain, intent)
return _expand_parser_config(config)
except Exception as exc: # pylint: disable=broad-except
# Note: this is intentionally broad -- provider could raise any exception
logger.warning("Parser configuration provider raised exception: %s", exc)
# Try object second
try:
config = config or module_conf.PARSER_CONFIG
return _expand_parser_config(config)
except AttributeError:
pass
return _get_default_parser_config()
def _get_default_parser_config():
return None
def _expand_parser_config(config):
# Replace with -- since | has a special meaning for parser
return {
head.replace("|", "--"): _expand_group_config(group)
for head, group in config.items()
}
def _expand_group_config(group_config):
"""Expands a parser group configuration.
A group config can either be a list of dependents or a dictionary with a
field for each dependent.
In the list a dependent can be a string containing the name of the
entity-role type identifier or a dictionary with at least a type field.
In the dictionary the dependent must be another dictionary.
Some example parser configs follow below.
A very simple configuration:
{
'head': ['dependent']
}
A more realistic simple config:
{
'product|beverage': ['size', 'quantity', 'option|beverage'],
'product|baked-good': ['size', 'quantity', 'option|baked-good'],
'store': ['location'],
'option': ['size']
}
A fully specified config:
{
'product': {
'quantity': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 3
},
'size': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 1
},
'option': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 1
}
},
'store': {
'location': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 1
}
},
'option': {
'size': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 1
}
}
}
"""
group_config = copy.deepcopy(group_config)
expanded = {}
if isinstance(group_config, (tuple, list, set)):
for dependent in group_config:
config = copy.copy(DEFAULT_PARSER_DEPENDENT_CONFIG)
try:
dep_type = dependent.pop("type")
config.update(dependent)
except (AttributeError, ValueError):
# simple style config -- dependent is a str
dep_type = dependent
# Replace with -- since | has a special meaning for parser
expanded[dep_type.replace("|", "--")] = config
else:
for dep_type, dep_config in group_config.items():
config = copy.copy(DEFAULT_PARSER_DEPENDENT_CONFIG)
dep_config.pop("type", None)
config.update(dep_config)
# Replace with -- since | has a special meaning for parser
expanded[dep_type.replace("|", "--")] = config
return expanded
def _get_config_module(app_path):
module_path = path.get_config_module_path(app_path)
config_module = imp.load_source(
"config_module_" + os.path.basename(app_path), module_path
)
return config_module
def _get_default_nlp_config():
return copy.deepcopy(DEFAULT_NLP_CONFIG)
def get_nlp_config(app_path=None, config=None):
"""Gets the fully specified processor configuration for the app at the
given path.
Args:
app_path (str, optional): The location of the MindMeld app
config (dict, optional): A config object to use. This will
override the config specified by the app's config.py file.
If necessary, this object will be expanded to a fully
specified config object.
Returns:
dict: The nbest inference configuration
"""
if config:
return config
try:
module_conf = _get_config_module(app_path)
except (OSError, IOError):
logger.info("No app configuration file found.")
return _get_default_nlp_config()
# Try provider first
try:
return copy.deepcopy(module_conf.get_nlp_config())
except AttributeError:
pass
# Try object second
try:
config = config or module_conf.NLP_CONFIG
return config
except AttributeError:
pass
return _get_default_nlp_config()
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the Config class.
"""
import copy
import imp
import logging
import os
import warnings
from .. import path
from .request import validate_language_code, validate_locale_code
logger = logging.getLogger(__name__)
DUCKLING_SERVICE_NAME = "duckling"
DEFAULT_DUCKLING_URL = "http://localhost:7151/parse"
CONFIG_DEPRECATION_MAPPING = {
"DOMAIN_CLASSIFIER_CONFIG": "DOMAIN_MODEL_CONFIG",
"INTENT_CLASSIFIER_CONFIG": "INTENT_MODEL_CONFIG",
"ENTITY_RECOGNIZER_CONFIG": "ENTITY_MODEL_CONFIG",
"ROLE_CLASSIFIER_CONFIG": "ROLE_MODEL_CONFIG",
"ENTITY_RESOLVER_CONFIG": "ENTITY_RESOLUTION_CONFIG",
"QUESTION_ANSWERER_CONFIG": "QUESTION_ANSWERING_CONFIG",
"get_entity_recognizer_config": "get_entity_model_config",
"get_intent_classifier_config": "get_intent_model_config",
"get_entity_resolver_config": "get_entity_resolution_model_config",
"get_role_classifier_config": "get_role_model_config",
}
DEFAULT_DOMAIN_CLASSIFIER_CONFIG = {
"model_type": "text",
"model_settings": {
"classifier_type": "logreg",
},
"param_selection": {
"type": "k-fold",
"k": 10,
"grid": {"fit_intercept": [True, False], "C": [10, 100, 1000, 10000, 100000]},
},
"features": {"bag-of-words": {"lengths": [1]}, "freq": {"bins": 5}, "in-gaz": {}},
}
DEFAULT_INTENT_CLASSIFIER_CONFIG = {
"model_type": "text",
"model_settings": {"classifier_type": "logreg"},
"param_selection": {
"type": "k-fold",
"k": 10,
"grid": {
"fit_intercept": [True, False],
"C": [0.01, 1, 100, 10000, 1000000],
"class_bias": [1, 0.7, 0.3, 0],
},
},
"features": {
"bag-of-words": {"lengths": [1]},
"in-gaz": {},
"freq": {"bins": 5},
"length": {},
},
}
DEFAULT_ENTITY_RECOGNIZER_CONFIG = {
"model_type": "tagger",
"label_type": "entities",
"model_settings": {
"classifier_type": "memm",
"tag_scheme": "IOB",
"feature_scaler": "max-abs",
},
"param_selection": {
"type": "k-fold",
"k": 5,
"scoring": "accuracy",
"grid": {
"penalty": ["l1", "l2"],
"C": [0.01, 1, 100, 10000, 1000000, 100000000],
},
},
"features": {
"bag-of-words-seq": {
"ngram_lengths_to_start_positions": {
1: [-2, -1, 0, 1, 2],
2: [-2, -1, 0, 1],
}
},
"in-gaz-span-seq": {},
"sys-candidates-seq": {"start_positions": [-1, 0, 1]},
},
}
DEFAULT_ENTITY_RESOLVER_CONFIG = {"model_type": "text_relevance"}
DEFAULT_QUESTION_ANSWERER_CONFIG = {"model_type": "keyword"}
ENGLISH_LANGUAGE_CODE = "en"
ENGLISH_US_LOCALE = "en_US"
DEFAULT_LANGUAGE_CONFIG = {
"language": ENGLISH_LANGUAGE_CODE,
"locale": ENGLISH_US_LOCALE,
}
# ElasticSearch mapping to define text analysis settings for text fields.
# It defines specific index configuration for synonym indices. The common index configuration
# is in default index template.
DEFAULT_ES_SYNONYM_MAPPING = {
"mappings": {
"properties": {
"sort_factor": {"type": "double"},
"whitelist": {
"type": "nested",
"properties": {
"name": {
"type": "text",
"fields": {
"raw": {"type": "keyword", "ignore_above": 256},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer",
},
"char_ngram": {
"type": "text",
"analyzer": "char_ngram_analyzer",
},
},
"analyzer": "default_analyzer",
}
},
},
}
}
}
PHONETIC_ES_SYNONYM_MAPPING = {
"mappings": {
"properties": {
"sort_factor": {"type": "double"},
"whitelist": {
"type": "nested",
"properties": {
"name": {
"type": "text",
"fields": {
"raw": {"type": "keyword", "ignore_above": 256},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer",
},
"char_ngram": {
"type": "text",
"analyzer": "char_ngram_analyzer",
},
"double_metaphone": {
"type": "text",
"analyzer": "phonetic_analyzer",
},
},
"analyzer": "default_analyzer",
}
},
},
"cname": {
"type": "text",
"analyzer": "default_analyzer",
"fields": {
"raw": {"type": "keyword", "ignore_above": 256},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer",
},
"char_ngram": {
"type": "text",
"analyzer": "char_ngram_analyzer",
},
"double_metaphone": {
"type": "text",
"analyzer": "phonetic_analyzer",
},
},
},
}
},
"settings": {
"analysis": {
"filter": {
"phonetic_filter": {
"type": "phonetic",
"encoder": "doublemetaphone",
"replace": True,
"max_code_len": 7,
}
},
"analyzer": {
"phonetic_analyzer": {
"filter": [
"lowercase",
"asciifolding",
"token_shingle",
"phonetic_filter",
],
"char_filter": [
"remove_comma",
"remove_tm_and_r",
"remove_loose_apostrophes",
"space_possessive_apostrophes",
"remove_special_beginning",
"remove_special_end",
"remove_special1",
"remove_special2",
"remove_special3",
"remove_dot",
],
"type": "custom",
"tokenizer": "whitespace",
}
},
}
},
}
DEFAULT_ROLE_CLASSIFIER_CONFIG = {
"model_type": "text",
"model_settings": {"classifier_type": "logreg"},
"params": {"C": 100, "penalty": "l1"},
"features": {
"bag-of-words-before": {
"ngram_lengths_to_start_positions": {1: [-2, -1], 2: [-2, -1]}
},
"bag-of-words-after": {
"ngram_lengths_to_start_positions": {1: [0, 1], 2: [0, 1]}
},
"other-entities": {},
},
}
DEFAULT_ES_INDEX_TEMPLATE_NAME = "mindmeld_default"
# Default ES index template that contains the base index configuration shared across different
# types of indices. Currently all ES indices will be created using this template.
# - custom text analysis settings such as custom analyzers, token filters and character filters.
# - dynamic field mapping template for text fields
# - common fields, e.g. id.
DEFAULT_ES_INDEX_TEMPLATE = {
"template": "*",
"mappings": {
"dynamic_templates": [
{
"default_text": {
"match": "*",
"match_mapping_type": "string",
"mapping": {
"type": "text",
"analyzer": "default_analyzer",
"fields": {
"raw": {"type": "keyword", "ignore_above": 256},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer",
},
"processed_text": {
"type": "text",
"analyzer": "english",
},
"char_ngram": {
"type": "text",
"analyzer": "char_ngram_analyzer",
},
},
},
}
}
],
"properties": {"id": {"type": "keyword"}},
},
"settings": {
"analysis": {
"char_filter": {
"remove_loose_apostrophes": {
"pattern": " '|' ",
"type": "pattern_replace",
"replacement": "",
},
"space_possessive_apostrophes": {
"pattern": "([^\\p{N}\\s]+)'s ",
"type": "pattern_replace",
"replacement": "$1 's ",
},
"remove_special_beginning": {
"pattern": "^[^\\p{L}\\p{N}\\p{Sc}&']+",
"type": "pattern_replace",
"replacement": "",
},
"remove_special_end": {
"pattern": "[^\\p{L}\\p{N}&']+$",
"type": "pattern_replace",
"replacement": "",
},
"remove_special1": {
"pattern": "([\\p{L}]+)[^\\p{L}\\p{N}&']+(?=[\\p{N}\\s]+)",
"type": "pattern_replace",
"replacement": "$1 ",
},
"remove_special2": {
"pattern": "([\\p{N}]+)[^\\p{L}\\p{N}&']+(?=[\\p{L}\\s]+)",
"type": "pattern_replace",
"replacement": "$1 ",
},
"remove_special3": {
"pattern": "([\\p{L}]+)[^\\p{L}\\p{N}&']+(?=[\\p{L}]+)",
"type": "pattern_replace",
"replacement": "$1 ",
},
"remove_comma": {
"pattern": ",",
"type": "pattern_replace",
"replacement": "",
},
"remove_tm_and_r": {
"pattern": "™|®",
"type": "pattern_replace",
"replacement": "",
},
"remove_dot": {
"pattern": "([\\p{L}]+)[.]+(?=[\\p{L}\\s]+)",
"type": "pattern_replace",
"replacement": "$1",
},
},
"filter": {
"token_shingle": {
"max_shingle_size": "4",
"min_shingle_size": "2",
"output_unigrams": "true",
"type": "shingle",
},
"ngram_filter": {"type": "ngram", "min_gram": "3", "max_gram": "3"},
},
"analyzer": {
"default_analyzer": {
"filter": ["lowercase", "asciifolding", "token_shingle"],
"char_filter": [
"remove_comma",
"remove_tm_and_r",
"remove_loose_apostrophes",
"space_possessive_apostrophes",
"remove_special_beginning",
"remove_special_end",
"remove_special1",
"remove_special2",
"remove_special3",
],
"type": "custom",
"tokenizer": "whitespace",
},
"keyword_match_analyzer": {
"filter": ["lowercase", "asciifolding"],
"char_filter": [
"remove_comma",
"remove_tm_and_r",
"remove_loose_apostrophes",
"space_possessive_apostrophes",
"remove_special_beginning",
"remove_special_end",
"remove_special1",
"remove_special2",
"remove_special3",
],
"type": "custom",
"tokenizer": "keyword",
},
"char_ngram_analyzer": {
"filter": ["lowercase", "asciifolding", "ngram_filter"],
"char_filter": [
"remove_comma",
"remove_tm_and_r",
"remove_loose_apostrophes",
"space_possessive_apostrophes",
"remove_special_beginning",
"remove_special_end",
"remove_special1",
"remove_special2",
"remove_special3",
],
"type": "custom",
"tokenizer": "whitespace",
},
},
}
},
}
# Elasticsearch mapping to define knowledge base index specific configuration:
# - dynamic field mapping to index all synonym whitelist in fields with "$whitelist" suffix.
# - location field
#
# The common configuration is defined in default index template
DEFAULT_ES_QA_MAPPING = {
"mappings": {
"dynamic_templates": [
{
"synonym_whitelist_text": {
"match": "*$whitelist",
"match_mapping_type": "object",
"mapping": {
"type": "nested",
"properties": {
"name": {
"type": "text",
"fields": {
"raw": {"type": "keyword", "ignore_above": 256},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer",
},
"char_ngram": {
"type": "text",
"analyzer": "char_ngram_analyzer",
},
},
"analyzer": "default_analyzer",
}
},
},
}
}
],
"properties": {"location": {"type": "geo_point"}},
}
}
DEFAULT_PARSER_DEPENDENT_CONFIG = {
"left": True,
"right": True,
"min_instances": 0,
"max_instances": None,
"precedence": "left",
"linking_words": frozenset(),
}
DEFAULT_RANKING_CONFIG = {"query_clauses_operator": "or"}
DEFAULT_NLP_CONFIG = {
"resolve_entities_using_nbest_transcripts": [],
"system_entity_recognizer": {
"type": DUCKLING_SERVICE_NAME,
"url": DEFAULT_DUCKLING_URL,
},
}
class NlpConfigError(Exception):
pass
def get_custom_action_config(app_path):
if not app_path:
return None
try:
custom_action_config = getattr(
_get_config_module(app_path), "CUSTOM_ACTION_CONFIG", None
)
return custom_action_config
except (OSError, IOError):
logger.info("No app configuration file found.")
return None
def get_max_history_len(app_path):
if not app_path:
return None
try:
custom_action_config = getattr(
_get_config_module(app_path), "MAX_HISTORY_LEN", None
)
return custom_action_config
except (OSError, IOError):
logger.info("No app configuration file found.")
return None
def get_language_config(app_path):
if not app_path:
return ENGLISH_LANGUAGE_CODE, ENGLISH_US_LOCALE
try:
language_config = getattr(
_get_config_module(app_path), "LANGUAGE_CONFIG", DEFAULT_LANGUAGE_CONFIG
)
locale = language_config.get("locale")
language = language_config.get("language")
resolved_language = resolve_language(language, locale)
return resolved_language, locale
except (OSError, IOError):
logger.info(
"No app configuration file found. Using default language and locale."
)
return ENGLISH_LANGUAGE_CODE, ENGLISH_US_LOCALE
def resolve_language(language=None, locale=None):
"""
Resolves to a language given a locale.
"""
locale = validate_locale_code(locale)
language = validate_language_code(language)
# Locale overrides language
if locale:
language = locale.split("_")[0]
if not language:
language = ENGLISH_LANGUAGE_CODE
return language.lower()
def get_app_namespace(app_path):
"""Returns the namespace of the application at app_path"""
try:
_app_namespace = _get_config_module(app_path).APP_NAMESPACE
if "JUPYTER_USER" in os.environ:
_app_namespace = "{}_{}".format(os.environ["JUPYTER_USER"], _app_namespace)
return _app_namespace
except (OSError, IOError):
logger.debug("No app configuration file found")
except AttributeError:
logger.debug("App namespace not set in app configuration")
# If a relative path is passed in, we resolve to its abspath
app_path = os.path.abspath(app_path) if not os.path.isabs(app_path) else app_path
_app_namespace = os.path.split(app_path)[1]
if "JUPYTER_USER" in os.environ:
_app_namespace = "{jupyter_user}_{app_namespace}".format(
jupyter_user=os.environ["JUPYTER_USER"], app_namespace=_app_namespace
)
return _app_namespace
def is_duckling_configured(app_path):
"""Returns True if the app config specifies that duckling should be run
as a system entity recognizer
Args:
app_path (str): A application path
Returns:
(bool): True if the app config specifies that the numerical parsing
should be run
"""
if not app_path:
raise NlpConfigError("Application path is not valid")
config = get_nlp_config(app_path).get("system_entity_recognizer")
if isinstance(config, dict):
# We get into this conditional when the app has specified the system_entity_recognizer
# nlp config
return config.get("type") == DUCKLING_SERVICE_NAME
else:
# We get into this conditional when the app has not specified the system_entity_recognizer
# nlp config, in which case, we default to the duckling API
return True
def get_system_entity_url_config(app_path):
"""
Get system entity url from the application's config. If the application does not define the url,
return the default duckling url.
"""
if not app_path:
raise NlpConfigError("Application path is not valid")
return (
get_nlp_config(app_path)
.get("system_entity_recognizer", {})
.get("url", DEFAULT_DUCKLING_URL)
)
def get_classifier_config(
clf_type, app_path=None, domain=None, intent=None, entity=None
):
"""Returns the config for the specified classifier, with the
following order of precedence.
If the application contains a config.py file:
- Return the response from the get_*_model_config function in
config.py for the specified classifier type. E.g.
`get_intent_model_config`.
- If the function does not exist, or raise an exception, return the
config specified by *_MODEL_CONFIG in config.py, e.g.
INTENT_MODEL_CONFIG.
Otherwise, use the MindMeld default config for the classifier type
Args:
clf_type (str): The type of the classifier. One of 'domain',
'intent', 'entity', 'entity_resolution', or 'role'.
app_path (str, optional): The location of the app
domain (str, optional): The domain of the classifier
intent (str, optional): The intent of the classifier
entity (str, optional): The entity type of the classifier
Returns:
dict: A classifier config
"""
try:
module_conf = _get_config_module(app_path)
except (OSError, IOError):
logger.info(
"No app configuration file found. Using default %s model configuration",
clf_type,
)
return _get_default_classifier_config(clf_type)
func_name = {
"intent": "get_intent_classifier_config",
"entity": "get_entity_recognizer_config",
"entity_resolution": "get_entity_resolver_config",
"role": "get_role_classifier_config",
}.get(clf_type)
func_args = {
"intent": ("domain",),
"entity": ("domain", "intent"),
"entity_resolution": ("domain", "intent", "entity"),
"role": ("domain", "intent", "entity"),
}.get(clf_type)
if func_name:
func = None
try:
func = getattr(module_conf, func_name)
except AttributeError:
try:
func = getattr(module_conf, CONFIG_DEPRECATION_MAPPING[func_name])
msg = (
"%s config key is deprecated. Please use the equivalent %s config "
"key" % (CONFIG_DEPRECATION_MAPPING[func_name], func_name)
)
warnings.warn(msg, DeprecationWarning)
except AttributeError:
pass
if func:
try:
raw_args = {"domain": domain, "intent": intent, "entity": entity}
args = {k: raw_args[k] for k in func_args}
return copy.deepcopy(func(**args))
except Exception as exc: # pylint: disable=broad-except
# Note: this is intentionally broad -- provider could raise any exception
logger.warning(
"%r configuration provider raised exception: %s", clf_type, exc
)
attr_name = {
"domain": "DOMAIN_CLASSIFIER_CONFIG",
"intent": "INTENT_CLASSIFIER_CONFIG",
"entity": "ENTITY_RECOGNIZER_CONFIG",
"entity_resolution": "ENTITY_RESOLVER_CONFIG",
"role": "ROLE_CLASSIFIER_CONFIG",
"question_answering": "QUESTION_ANSWERER_CONFIG",
}[clf_type]
try:
return copy.deepcopy(getattr(module_conf, attr_name))
except AttributeError:
try:
result = copy.deepcopy(
getattr(module_conf, CONFIG_DEPRECATION_MAPPING[attr_name])
)
msg = (
"%s config is deprecated. Please use the equivalent %s config "
"key" % (CONFIG_DEPRECATION_MAPPING[attr_name], attr_name)
)
warnings.warn(msg, DeprecationWarning)
return result
except AttributeError:
logger.info("No %s model configuration set. Using default.", clf_type)
return _get_default_classifier_config(clf_type)
def _get_default_classifier_config(clf_type):
return copy.deepcopy(
{
"domain": DEFAULT_DOMAIN_CLASSIFIER_CONFIG,
"intent": DEFAULT_INTENT_CLASSIFIER_CONFIG,
"entity": DEFAULT_ENTITY_RECOGNIZER_CONFIG,
"entity_resolution": DEFAULT_ENTITY_RESOLVER_CONFIG,
"role": DEFAULT_ROLE_CLASSIFIER_CONFIG,
"language_config": DEFAULT_LANGUAGE_CONFIG,
"question_answering": DEFAULT_QUESTION_ANSWERER_CONFIG,
}[clf_type]
)
def get_parser_config(app_path=None, config=None, domain=None, intent=None):
"""Gets the fully specified parser configuration for the app at the
given path.
Args:
app_path (str, optional): The location of the MindMeld app
config (dict, optional): A config object to use. This will
override the config specified by the app's config.py file.
If necessary, this object will be expanded to a fully
specified config object.
domain (str, optional): The domain of the parser
intent (str, optional): The intent of the parser
Returns:
dict: A fully parser configuration
"""
if config:
return _expand_parser_config(config)
if not app_path:
raise NlpConfigError("Application path is not valid")
try:
module_conf = _get_config_module(app_path)
except (OSError, IOError):
logger.info("No app configuration file found. Not configuring parser.")
return _get_default_parser_config()
# Try provider first
config_provider = None
try:
config_provider = module_conf.get_parser_config
except AttributeError:
pass
if config_provider:
try:
config = config or config_provider(domain, intent)
return _expand_parser_config(config)
except Exception as exc: # pylint: disable=broad-except
# Note: this is intentionally broad -- provider could raise any exception
logger.warning("Parser configuration provider raised exception: %s", exc)
# Try object second
try:
config = config or module_conf.PARSER_CONFIG
return _expand_parser_config(config)
except AttributeError:
pass
return _get_default_parser_config()
def _get_default_parser_config():
return None
def _expand_parser_config(config):
# Replace with -- since | has a special meaning for parser
return {
head.replace("|", "--"): _expand_group_config(group)
for head, group in config.items()
}
def _expand_group_config(group_config):
"""Expands a parser group configuration.
A group config can either be a list of dependents or a dictionary with a
field for each dependent.
In the list a dependent can be a string containing the name of the
entity-role type identifier or a dictionary with at least a type field.
In the dictionary the dependent must be another dictionary.
Some example parser configs follow below.
A very simple configuration:
{
'head': ['dependent']
}
A more realistic simple config:
{
'product|beverage': ['size', 'quantity', 'option|beverage'],
'product|baked-good': ['size', 'quantity', 'option|baked-good'],
'store': ['location'],
'option': ['size']
}
A fully specified config:
{
'product': {
'quantity': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 3
},
'size': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 1
},
'option': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 1
}
},
'store': {
'location': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 1
}
},
'option': {
'size': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 1
}
}
}
"""
group_config = copy.deepcopy(group_config)
expanded = {}
if isinstance(group_config, (tuple, list, set)):
for dependent in group_config:
config = copy.copy(DEFAULT_PARSER_DEPENDENT_CONFIG)
try:
dep_type = dependent.pop("type")
config.update(dependent)
except (AttributeError, ValueError):
# simple style config -- dependent is a str
dep_type = dependent
# Replace with -- since | has a special meaning for parser
expanded[dep_type.replace("|", "--")] = config
else:
for dep_type, dep_config in group_config.items():
config = copy.copy(DEFAULT_PARSER_DEPENDENT_CONFIG)
dep_config.pop("type", None)
config.update(dep_config)
# Replace with -- since | has a special meaning for parser
expanded[dep_type.replace("|", "--")] = config
return expanded
def _get_config_module(app_path):
module_path = path.get_config_module_path(app_path)
config_module = imp.load_source(
"config_module_" + os.path.basename(app_path), module_path
)
return config_module
def _get_default_nlp_config():
return copy.deepcopy(DEFAULT_NLP_CONFIG)
def get_nlp_config(app_path=None, config=None):
"""Gets the fully specified processor configuration for the app at the
given path.
Args:
app_path (str, optional): The location of the MindMeld app
config (dict, optional): A config object to use. This will
override the config specified by the app's config.py file.
If necessary, this object will be expanded to a fully
specified config object.
Returns:
dict: The nbest inference configuration
"""
if config:
return config
try:
module_conf = _get_config_module(app_path)
except (OSError, IOError):
logger.info("No app configuration file found.")
return _get_default_nlp_config()
# Try provider first
try:
return copy.deepcopy(module_conf.get_nlp_config())
except AttributeError:
pass
# Try object second
try:
config = config or module_conf.NLP_CONFIG
return config
except AttributeError:
pass
return _get_default_nlp_config()
|
en
| 0.594348
|
# -*- coding: utf-8 -*- # # Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. This module contains the Config class. # ElasticSearch mapping to define text analysis settings for text fields. # It defines specific index configuration for synonym indices. The common index configuration # is in default index template. # Default ES index template that contains the base index configuration shared across different # types of indices. Currently all ES indices will be created using this template. # - custom text analysis settings such as custom analyzers, token filters and character filters. # - dynamic field mapping template for text fields # - common fields, e.g. id. # Elasticsearch mapping to define knowledge base index specific configuration: # - dynamic field mapping to index all synonym whitelist in fields with "$whitelist" suffix. # - location field # # The common configuration is defined in default index template Resolves to a language given a locale. # Locale overrides language Returns the namespace of the application at app_path # If a relative path is passed in, we resolve to its abspath Returns True if the app config specifies that duckling should be run as a system entity recognizer Args: app_path (str): A application path Returns: (bool): True if the app config specifies that the numerical parsing should be run # We get into this conditional when the app has specified the system_entity_recognizer # nlp config # We get into this conditional when the app has not specified the system_entity_recognizer # nlp config, in which case, we default to the duckling API Get system entity url from the application's config. If the application does not define the url, return the default duckling url. Returns the config for the specified classifier, with the following order of precedence. If the application contains a config.py file: - Return the response from the get_*_model_config function in config.py for the specified classifier type. E.g. `get_intent_model_config`. - If the function does not exist, or raise an exception, return the config specified by *_MODEL_CONFIG in config.py, e.g. INTENT_MODEL_CONFIG. Otherwise, use the MindMeld default config for the classifier type Args: clf_type (str): The type of the classifier. One of 'domain', 'intent', 'entity', 'entity_resolution', or 'role'. app_path (str, optional): The location of the app domain (str, optional): The domain of the classifier intent (str, optional): The intent of the classifier entity (str, optional): The entity type of the classifier Returns: dict: A classifier config # pylint: disable=broad-except # Note: this is intentionally broad -- provider could raise any exception Gets the fully specified parser configuration for the app at the given path. Args: app_path (str, optional): The location of the MindMeld app config (dict, optional): A config object to use. This will override the config specified by the app's config.py file. If necessary, this object will be expanded to a fully specified config object. domain (str, optional): The domain of the parser intent (str, optional): The intent of the parser Returns: dict: A fully parser configuration # Try provider first # pylint: disable=broad-except # Note: this is intentionally broad -- provider could raise any exception # Try object second # Replace with -- since | has a special meaning for parser Expands a parser group configuration. A group config can either be a list of dependents or a dictionary with a field for each dependent. In the list a dependent can be a string containing the name of the entity-role type identifier or a dictionary with at least a type field. In the dictionary the dependent must be another dictionary. Some example parser configs follow below. A very simple configuration: { 'head': ['dependent'] } A more realistic simple config: { 'product|beverage': ['size', 'quantity', 'option|beverage'], 'product|baked-good': ['size', 'quantity', 'option|baked-good'], 'store': ['location'], 'option': ['size'] } A fully specified config: { 'product': { 'quantity': { 'left': True, 'right': True, 'precedence': 'left', 'min_instances': 0, 'max_instances': 3 }, 'size': { 'left': True, 'right': True, 'precedence': 'left', 'min_instances': 0, 'max_instances': 1 }, 'option': { 'left': True, 'right': True, 'precedence': 'left', 'min_instances': 0, 'max_instances': 1 } }, 'store': { 'location': { 'left': True, 'right': True, 'precedence': 'left', 'min_instances': 0, 'max_instances': 1 } }, 'option': { 'size': { 'left': True, 'right': True, 'precedence': 'left', 'min_instances': 0, 'max_instances': 1 } } } # simple style config -- dependent is a str # Replace with -- since | has a special meaning for parser # Replace with -- since | has a special meaning for parser Gets the fully specified processor configuration for the app at the given path. Args: app_path (str, optional): The location of the MindMeld app config (dict, optional): A config object to use. This will override the config specified by the app's config.py file. If necessary, this object will be expanded to a fully specified config object. Returns: dict: The nbest inference configuration # Try provider first # Try object second
| 1.533501
| 2
|
jobmonitor/star.py
|
tvogels/job-monitor
| 1
|
6626444
|
<reponame>tvogels/job-monitor<filename>jobmonitor/star.py<gh_stars>1-10
#!/usr/bin/env python3
from bson.objectid import ObjectId
import jobmonitor.delete
from jobmonitor.connections import mongo
"""
Kill a job if it is running on kubernetes
"""
def star(job_id):
mongo.job.update_one({"_id": ObjectId(job_id)}, {"$set": {"annotations.star": True}})
def main():
jobmonitor.delete.main(star, action_name="star")
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
from bson.objectid import ObjectId
import jobmonitor.delete
from jobmonitor.connections import mongo
"""
Kill a job if it is running on kubernetes
"""
def star(job_id):
mongo.job.update_one({"_id": ObjectId(job_id)}, {"$set": {"annotations.star": True}})
def main():
jobmonitor.delete.main(star, action_name="star")
if __name__ == "__main__":
main()
|
en
| 0.45511
|
#!/usr/bin/env python3 Kill a job if it is running on kubernetes
| 2.371374
| 2
|
jupyter_xprahtml5_proxy/__init__.py
|
ccha23/jupyter-xprahtml5-proxy
| 0
|
6626445
|
<reponame>ccha23/jupyter-xprahtml5-proxy<gh_stars>0
import os
import logging
logger = logging.getLogger(__name__)
logger.setLevel('INFO')
HERE = os.path.dirname(os.path.abspath(__file__))
### Why are so many lines commented?
# Currently jupyter-server-proxy does not support url-parameters, yet.
# A pull request is waiting to be merged: https://github.com/jupyterhub/jupyter-server-proxy/pull/226
# Be aware! Until then, we need to comment the support for password and encryption.
# def _xprahtml5_urlparams():
# from getpass import getuser
#
# url_params = '?' + '&'.join([
# 'username=' + getuser(),
# 'password=' + <PASSWORD>,
# 'encryption=AES',
# 'key=' + _xprahtml5_aeskey,
# 'sharing=true',
# ])
#
# return url_params
def _xprahtml5_mappath(path):
# # always pass the url parameter
if path in ('/', '/index.html', ):
# url_params = _xprahtml5_urlparams()
path = '/index.html' # + url_params
return path
def setup_xprahtml5():
""" Setup commands and and return a dictionary compatible
with jupyter-server-proxy.
"""
from pathlib import Path
from tempfile import gettempdir, mkstemp
# from random import choice
# from string import ascii_letters, digits
global _xprahtml5_passwd, _xprahtml5_aeskey
# # password generator
# def _get_random_alphanumeric_string(length):
# letters_and_digits = ascii_letters + digits
# return (''.join((choice(letters_and_digits) for i in range(length))))
#
# ensure a known secure sockets directory exists, as /run/user/$UID might not be available
socket_path = os.path.join(gettempdir(), 'xpra_sockets_' + str(os.getuid()))
Path(socket_path).mkdir(mode=0o700, parents=True, exist_ok=True)
logger.info('Created secure socket directory for Xpra: ' + socket_path)
# # generate file with random one-time-password
# _xprahtml5_passwd = _get_random_alphanumeric_string(16)
# try:
# fd_passwd, fpath_passwd = mkstemp()
# logger.info('Created secure password file for Xpra: ' + fpath_passwd)
#
# with open(fd_passwd, 'w') as f:
# f.write(_xprahtml5_passwd)
#
# except Exception:
# logger.error("Passwd generation in temp file FAILED")
# raise FileNotFoundError("Passwd generation in temp file FAILED")
#
# # generate file with random encryption key
# _xprahtml5_aeskey = _get_random_alphanumeric_string(16)
# try:
# fd_aeskey, fpath_aeskey = mkstemp()
# logger.info('Created secure encryption key file for Xpra: ' + fpath_aeskey)
#
# with open(fd_aeskey, 'w') as f:
# f.write(_xprahtml5_aeskey)
#
# except Exception:
# logger.error("Encryption key generation in temp file FAILED")
# raise FileNotFoundError("Encryption key generation in temp file FAILED")
#
# # launchers url file including url parameters
# urlfile = 'index.html' + _xprahtml5_urlparams()
# create command
cmd = [
os.path.join(HERE, 'share/launch_xpra.sh'),
'start',
'--html=on',
'--bind-tcp=0.0.0.0:{port}',
# '--socket-dir="' + socket_path + '/"', # fixme: socket_dir not recognized
# '--server-idle-timeout=86400', # stop server after 24h with no client connection
# '--exit-with-client=yes', # stop Xpra when the browser disconnects
'--start=xterm -fa Monospace',
# '--start=xfce4-session',
# '--start-child=xterm', '--exit-with-children',
# '--tcp-auth=file:filename=' + fpath_passwd,
# '--tcp-encryption=AES',
# '--tcp-encryption-keyfile=' + fpath_aeskey,
'--clipboard-direction=both',
'--no-mdns', # do not advertise the xpra session on the local network
'--no-bell',
'--no-speaker',
'--no-printing',
'--no-microphone',
'--no-notifications',
'--no-systemd-run', # do not delegated start-cmd to the system wide proxy server instance
# '--dpi=96', # only needed if Xserver does not support dynamic dpi change
# '--sharing', # this allows to open the desktop in multiple browsers at the same time
'--no-daemon', # mandatory
]
logger.info('Xpra command: ' + ' '.join(cmd))
return {
'environment': { # as '--socket-dir' does not work as expected, we set this
'XDG_RUNTIME_DIR': socket_path,
},
'command': cmd,
'mappath': _xprahtml5_mappath,
'absolute_url': False,
'timeout': 90,
'new_browser_tab': True,
'launcher_entry': {
'enabled': True,
'icon_path': os.path.join(HERE, 'share/xpra-logo.svg'),
'title': 'Xpra Desktop',
# 'urlfile': urlfile,
},
}
|
import os
import logging
logger = logging.getLogger(__name__)
logger.setLevel('INFO')
HERE = os.path.dirname(os.path.abspath(__file__))
### Why are so many lines commented?
# Currently jupyter-server-proxy does not support url-parameters, yet.
# A pull request is waiting to be merged: https://github.com/jupyterhub/jupyter-server-proxy/pull/226
# Be aware! Until then, we need to comment the support for password and encryption.
# def _xprahtml5_urlparams():
# from getpass import getuser
#
# url_params = '?' + '&'.join([
# 'username=' + getuser(),
# 'password=' + <PASSWORD>,
# 'encryption=AES',
# 'key=' + _xprahtml5_aeskey,
# 'sharing=true',
# ])
#
# return url_params
def _xprahtml5_mappath(path):
# # always pass the url parameter
if path in ('/', '/index.html', ):
# url_params = _xprahtml5_urlparams()
path = '/index.html' # + url_params
return path
def setup_xprahtml5():
""" Setup commands and and return a dictionary compatible
with jupyter-server-proxy.
"""
from pathlib import Path
from tempfile import gettempdir, mkstemp
# from random import choice
# from string import ascii_letters, digits
global _xprahtml5_passwd, _xprahtml5_aeskey
# # password generator
# def _get_random_alphanumeric_string(length):
# letters_and_digits = ascii_letters + digits
# return (''.join((choice(letters_and_digits) for i in range(length))))
#
# ensure a known secure sockets directory exists, as /run/user/$UID might not be available
socket_path = os.path.join(gettempdir(), 'xpra_sockets_' + str(os.getuid()))
Path(socket_path).mkdir(mode=0o700, parents=True, exist_ok=True)
logger.info('Created secure socket directory for Xpra: ' + socket_path)
# # generate file with random one-time-password
# _xprahtml5_passwd = _get_random_alphanumeric_string(16)
# try:
# fd_passwd, fpath_passwd = mkstemp()
# logger.info('Created secure password file for Xpra: ' + fpath_passwd)
#
# with open(fd_passwd, 'w') as f:
# f.write(_xprahtml5_passwd)
#
# except Exception:
# logger.error("Passwd generation in temp file FAILED")
# raise FileNotFoundError("Passwd generation in temp file FAILED")
#
# # generate file with random encryption key
# _xprahtml5_aeskey = _get_random_alphanumeric_string(16)
# try:
# fd_aeskey, fpath_aeskey = mkstemp()
# logger.info('Created secure encryption key file for Xpra: ' + fpath_aeskey)
#
# with open(fd_aeskey, 'w') as f:
# f.write(_xprahtml5_aeskey)
#
# except Exception:
# logger.error("Encryption key generation in temp file FAILED")
# raise FileNotFoundError("Encryption key generation in temp file FAILED")
#
# # launchers url file including url parameters
# urlfile = 'index.html' + _xprahtml5_urlparams()
# create command
cmd = [
os.path.join(HERE, 'share/launch_xpra.sh'),
'start',
'--html=on',
'--bind-tcp=0.0.0.0:{port}',
# '--socket-dir="' + socket_path + '/"', # fixme: socket_dir not recognized
# '--server-idle-timeout=86400', # stop server after 24h with no client connection
# '--exit-with-client=yes', # stop Xpra when the browser disconnects
'--start=xterm -fa Monospace',
# '--start=xfce4-session',
# '--start-child=xterm', '--exit-with-children',
# '--tcp-auth=file:filename=' + fpath_passwd,
# '--tcp-encryption=AES',
# '--tcp-encryption-keyfile=' + fpath_aeskey,
'--clipboard-direction=both',
'--no-mdns', # do not advertise the xpra session on the local network
'--no-bell',
'--no-speaker',
'--no-printing',
'--no-microphone',
'--no-notifications',
'--no-systemd-run', # do not delegated start-cmd to the system wide proxy server instance
# '--dpi=96', # only needed if Xserver does not support dynamic dpi change
# '--sharing', # this allows to open the desktop in multiple browsers at the same time
'--no-daemon', # mandatory
]
logger.info('Xpra command: ' + ' '.join(cmd))
return {
'environment': { # as '--socket-dir' does not work as expected, we set this
'XDG_RUNTIME_DIR': socket_path,
},
'command': cmd,
'mappath': _xprahtml5_mappath,
'absolute_url': False,
'timeout': 90,
'new_browser_tab': True,
'launcher_entry': {
'enabled': True,
'icon_path': os.path.join(HERE, 'share/xpra-logo.svg'),
'title': 'Xpra Desktop',
# 'urlfile': urlfile,
},
}
|
en
| 0.602482
|
### Why are so many lines commented? # Currently jupyter-server-proxy does not support url-parameters, yet. # A pull request is waiting to be merged: https://github.com/jupyterhub/jupyter-server-proxy/pull/226 # Be aware! Until then, we need to comment the support for password and encryption. # def _xprahtml5_urlparams(): # from getpass import getuser # # url_params = '?' + '&'.join([ # 'username=' + getuser(), # 'password=' + <PASSWORD>, # 'encryption=AES', # 'key=' + _xprahtml5_aeskey, # 'sharing=true', # ]) # # return url_params # # always pass the url parameter # url_params = _xprahtml5_urlparams() # + url_params Setup commands and and return a dictionary compatible with jupyter-server-proxy. # from random import choice # from string import ascii_letters, digits # # password generator # def _get_random_alphanumeric_string(length): # letters_and_digits = ascii_letters + digits # return (''.join((choice(letters_and_digits) for i in range(length)))) # # ensure a known secure sockets directory exists, as /run/user/$UID might not be available # # generate file with random one-time-password # _xprahtml5_passwd = _get_random_alphanumeric_string(16) # try: # fd_passwd, fpath_passwd = mkstemp() # logger.info('Created secure password file for Xpra: ' + fpath_passwd) # # with open(fd_passwd, 'w') as f: # f.write(_xprahtml5_passwd) # # except Exception: # logger.error("Passwd generation in temp file FAILED") # raise FileNotFoundError("Passwd generation in temp file FAILED") # # # generate file with random encryption key # _xprahtml5_aeskey = _get_random_alphanumeric_string(16) # try: # fd_aeskey, fpath_aeskey = mkstemp() # logger.info('Created secure encryption key file for Xpra: ' + fpath_aeskey) # # with open(fd_aeskey, 'w') as f: # f.write(_xprahtml5_aeskey) # # except Exception: # logger.error("Encryption key generation in temp file FAILED") # raise FileNotFoundError("Encryption key generation in temp file FAILED") # # # launchers url file including url parameters # urlfile = 'index.html' + _xprahtml5_urlparams() # create command # '--socket-dir="' + socket_path + '/"', # fixme: socket_dir not recognized # '--server-idle-timeout=86400', # stop server after 24h with no client connection # '--exit-with-client=yes', # stop Xpra when the browser disconnects # '--start=xfce4-session', # '--start-child=xterm', '--exit-with-children', # '--tcp-auth=file:filename=' + fpath_passwd, # '--tcp-encryption=AES', # '--tcp-encryption-keyfile=' + fpath_aeskey, # do not advertise the xpra session on the local network # do not delegated start-cmd to the system wide proxy server instance # '--dpi=96', # only needed if Xserver does not support dynamic dpi change # '--sharing', # this allows to open the desktop in multiple browsers at the same time # mandatory # as '--socket-dir' does not work as expected, we set this # 'urlfile': urlfile,
| 2.205506
| 2
|
onecodex/vendored/potion_client/links.py
|
jairideout/onecodex
| 0
|
6626446
|
# flake8: noqa
try:
import simplejson as json
except ImportError:
import json
import re
from requests import Request
from requests.exceptions import HTTPError
from .collection import PaginatedList
from .converter import PotionJSONEncoder, PotionJSONDecoder
from .schema import Schema
class Link(object):
def __init__(self, client, method, href, rel, schema=None, target_schema=None):
self.method = method
self.href_placeholders = re.findall(r"{(\w+)}", href)
self.href = href
self.rel = rel
self.schema = Schema(schema)
self.target_schema = Schema(target_schema)
@property
def requires_instance(self):
return '{id}' in self.href
def returns_pagination(self):
if self.method == 'GET' and self.schema is not None:
schema_properties = self.schema.get('properties', {})
return 'page' in schema_properties and 'per_page' in schema_properties
return False
def __get__(self, instance, owner):
return LinkBinding(self, instance, owner)
class LinkBinding(object):
def __init__(self, link, instance, owner):
self.link = link
self.instance = instance
self.owner = owner
def request_factory(self, data, params):
if self.instance is None:
request_url = self.owner._client._root_url + self.link.href.format(**params)
else:
request_url = self.owner._client._root_url + self.link.href.format(id=self.instance.id, **self.instance)
request_data = data
request_params = {name: value for name, value in params.items()
if name not in self.link.href_placeholders and self.link.schema.can_include_property(name)}
if data is None:
request_data = request_params
elif isinstance(data, dict):
request_params = data
if self.link.method == 'GET':
req = Request(self.link.method,
request_url,
params={k: json.dumps(v, cls=PotionJSONEncoder)
for k, v in request_params.items()})
else:
req = Request(self.link.method,
request_url,
headers={'content-type': 'application/json'},
data=json.dumps(request_data, cls=PotionJSONEncoder))
return req
def raise_for_status(self, response):
http_error_msg = ''
if 400 <= response.status_code < 500:
try:
http_error_msg = response.json()
except:
http_error_msg = ('{code} Client Error: {reason} for url: {url}'.format(
code=response.status_code, reason=response.reason, url=response.url)
)
elif 500 <= response.status_code < 600:
http_error_msg = ('{code} Server Error: {reason} for url: {url}'.format(
code=response.status_code, reason=response.reason, url=response.url)
)
if http_error_msg:
raise HTTPError(http_error_msg, response=response)
def make_request(self, data, params):
req = self.request_factory(data, params)
prepared_request = self.owner._client.session.prepare_request(req)
response = self.owner._client.session.send(prepared_request)
# return error for some error conditions
self.raise_for_status(response)
if response.status_code == 204:
return response, None
return response, response.json(cls=PotionJSONDecoder,
client=self.owner._client,
default_instance=self.instance)
def __getattr__(self, item):
return getattr(self.link, item)
def __call__(self, *arg, **params):
data = None
# Need to pass positional argument as *arg so that properties of the same name are not overridden in **params.
if len(arg) > 1:
raise TypeError('Link must be called with no more than one positional argument')
elif len(arg) == 1:
data = arg[0]
if self.link.returns_pagination():
return PaginatedList(self, params)
response, response_data = self.make_request(data, params)
return response_data
|
# flake8: noqa
try:
import simplejson as json
except ImportError:
import json
import re
from requests import Request
from requests.exceptions import HTTPError
from .collection import PaginatedList
from .converter import PotionJSONEncoder, PotionJSONDecoder
from .schema import Schema
class Link(object):
def __init__(self, client, method, href, rel, schema=None, target_schema=None):
self.method = method
self.href_placeholders = re.findall(r"{(\w+)}", href)
self.href = href
self.rel = rel
self.schema = Schema(schema)
self.target_schema = Schema(target_schema)
@property
def requires_instance(self):
return '{id}' in self.href
def returns_pagination(self):
if self.method == 'GET' and self.schema is not None:
schema_properties = self.schema.get('properties', {})
return 'page' in schema_properties and 'per_page' in schema_properties
return False
def __get__(self, instance, owner):
return LinkBinding(self, instance, owner)
class LinkBinding(object):
def __init__(self, link, instance, owner):
self.link = link
self.instance = instance
self.owner = owner
def request_factory(self, data, params):
if self.instance is None:
request_url = self.owner._client._root_url + self.link.href.format(**params)
else:
request_url = self.owner._client._root_url + self.link.href.format(id=self.instance.id, **self.instance)
request_data = data
request_params = {name: value for name, value in params.items()
if name not in self.link.href_placeholders and self.link.schema.can_include_property(name)}
if data is None:
request_data = request_params
elif isinstance(data, dict):
request_params = data
if self.link.method == 'GET':
req = Request(self.link.method,
request_url,
params={k: json.dumps(v, cls=PotionJSONEncoder)
for k, v in request_params.items()})
else:
req = Request(self.link.method,
request_url,
headers={'content-type': 'application/json'},
data=json.dumps(request_data, cls=PotionJSONEncoder))
return req
def raise_for_status(self, response):
http_error_msg = ''
if 400 <= response.status_code < 500:
try:
http_error_msg = response.json()
except:
http_error_msg = ('{code} Client Error: {reason} for url: {url}'.format(
code=response.status_code, reason=response.reason, url=response.url)
)
elif 500 <= response.status_code < 600:
http_error_msg = ('{code} Server Error: {reason} for url: {url}'.format(
code=response.status_code, reason=response.reason, url=response.url)
)
if http_error_msg:
raise HTTPError(http_error_msg, response=response)
def make_request(self, data, params):
req = self.request_factory(data, params)
prepared_request = self.owner._client.session.prepare_request(req)
response = self.owner._client.session.send(prepared_request)
# return error for some error conditions
self.raise_for_status(response)
if response.status_code == 204:
return response, None
return response, response.json(cls=PotionJSONDecoder,
client=self.owner._client,
default_instance=self.instance)
def __getattr__(self, item):
return getattr(self.link, item)
def __call__(self, *arg, **params):
data = None
# Need to pass positional argument as *arg so that properties of the same name are not overridden in **params.
if len(arg) > 1:
raise TypeError('Link must be called with no more than one positional argument')
elif len(arg) == 1:
data = arg[0]
if self.link.returns_pagination():
return PaginatedList(self, params)
response, response_data = self.make_request(data, params)
return response_data
|
en
| 0.808851
|
# flake8: noqa # return error for some error conditions # Need to pass positional argument as *arg so that properties of the same name are not overridden in **params.
| 2.433381
| 2
|
logicaDeProgramacao/exec3.py
|
SabrinadeSousa/pythonExercise
| 0
|
6626447
|
#Faça um programa para imprimir todo o alfabeto uma letra por linha e em
#vai me o alfabeto em forma de triangulo.
for alfa in range(96, 123):
for repete in range(0, (alfa - 96)):
print(chr(alfa), end="")
#esse print vazio vai me da formato de triangulo.
print()
# cada linha deve ter 10 letras repetidas com utilização da repetição (for)
|
#Faça um programa para imprimir todo o alfabeto uma letra por linha e em
#vai me o alfabeto em forma de triangulo.
for alfa in range(96, 123):
for repete in range(0, (alfa - 96)):
print(chr(alfa), end="")
#esse print vazio vai me da formato de triangulo.
print()
# cada linha deve ter 10 letras repetidas com utilização da repetição (for)
|
pt
| 0.991479
|
#Faça um programa para imprimir todo o alfabeto uma letra por linha e em #vai me o alfabeto em forma de triangulo. #esse print vazio vai me da formato de triangulo. # cada linha deve ter 10 letras repetidas com utilização da repetição (for)
| 4.257932
| 4
|
deformable_potential_figure/deformable_fig_v2.py
|
kolbt/whingdingdilly
| 4
|
6626448
|
'''
A schematic figure to illustrate how Pe (and F_act) sets deformability:
-LJ potential
-Overlay 2 Forces (Strong under week)
-Corresponds to collision angle
'''
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as axes3d
from PIL import Image
import matplotlib.patches as patches
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.colorbar import colorbar
fsize = 9
plt.rcParams.update({'font.size': fsize})
params = {'legend.fontsize': fsize,
'axes.labelsize': fsize,
'axes.titlesize': fsize,
'xtick.labelsize': fsize,
'ytick.labelsize': fsize}
plt.rcParams.update(params)
eps = 0.1
sigma = 1.
def ljPotential(r, eps=0.1, sigma=1.):
div = (sigma/r)
U = ( 4. * eps * ((div)**12 - (div)**6) ) + eps
return U
def ljForce(r, eps=0.1, sigma=1.):
div = (sigma/r)
dU = (24. * eps / r) * ((2*(div**12)) - (div)**6)
return dU
def convergeConstPeEps(pe, eps):
r = 1.112
while ljForce(r, eps) < pe:
r -= 0.0001
return r
# Compute the weak and strong collision force
peWeak = 10.
peMid = 50.
peStrong = 150.
rWeak = convergeConstPeEps(peWeak, eps)
rMid = convergeConstPeEps(peMid, eps)
rStrong = convergeConstPeEps(peStrong, eps)
# Plot the figure
width = 3. + (3./8.) # single-column figure width
fig = plt.figure(figsize=(width, width))
ax = []
#ax.append(fig.add_subplot(131)) # left column
#ax.append(fig.add_subplot(332, projection='3d')) # right top
#ax.append(fig.add_subplot(324, projection='3d')) # right mid
#ax.append(fig.add_subplot(326, projection='3d')) # right bottom
ax.append(fig.add_subplot(111)) # potential figure
sWidth = 0.3
sHeight = sWidth * (1./2.)
ax.append(plt.axes([0.55, 0.26, sWidth, sHeight], projection='3d')) # weak pe spheres
ax.append(plt.axes([0.325, 0.425, sWidth, sHeight], projection='3d')) # mid pe spheres
ax.append(plt.axes([0.225, 0.675, sWidth, sHeight], projection='3d')) # strong pe spheres
ax[1].patch.set_alpha(0.0)
ax[2].patch.set_alpha(0.0)
ax[3].patch.set_alpha(0.0)
# Plot LJ potential
dist = np.arange(0.0001, ((2.**(1./6.))*sigma)*2., 0.001)
#ax[0].plot(dist, ljPotential(dist, eps=eps), c='k', lw=1.5, label='LJ-Potential', zorder=0)
# Plot LJ Force
ax[0].plot(dist, ljForce(dist, eps=eps), c='k', lw=1.5, label='LJ-Potential', zorder=0)
# Overlay the weak activity range
# LJ shift:
shift = 1.
# LJF shift:
shift = 20.
weakRange = np.arange(rWeak, ((2.**(1./6.))*sigma)*2., 0.001)
# LJ potential:
#ax[0].plot(weakRange, ljPotential(weakRange, eps=eps) + (1.5 * shift), c='r', lw=1.25, label='Weak', zorder=0)
#ax[0].scatter(rWeak, ljPotential(rWeak, eps=eps) + (1.5 * shift), c='r', zorder=1)
# LJ force:
ax[0].plot(weakRange, ljForce(weakRange, eps=eps) + (1.5 * shift), c='r', lw=1.25, label='Weak', zorder=0)
ax[0].scatter(rWeak, ljForce(rWeak, eps=eps) + (1.5 * shift), c='r', zorder=1)
# Overlay the middle activity range
midRange = np.arange(rMid, ((2.**(1./6.))*sigma)*2., 0.001)
# LJ potential:
#ax[0].plot(midRange, ljPotential(midRange, eps=eps) + (1. * shift), c='b', lw=1.25, label='Mid', zorder=0)
#ax[0].scatter(rMid, ljPotential(rMid, eps=eps) + (1. * shift), c='b', zorder=1)
# LJ force:
ax[0].plot(midRange, ljForce(midRange, eps=eps) + (1. * shift), c='b', lw=1.25, label='Mid', zorder=0)
ax[0].scatter(rMid, ljForce(rMid, eps=eps) + (1. * shift), c='b', zorder=1)
# Overlay the strong activity range
strongRange = np.arange(rStrong, ((2.**(1./6.))*sigma)*2., 0.001)
# LJ potential:
#ax[0].plot(strongRange, ljPotential(strongRange, eps=eps) + (0.5 * shift), c='g', lw=1.25, label='Strong', zorder=0)
#ax[0].scatter(rStrong, ljPotential(rStrong, eps=eps) + (0.5 * shift), c='g', zorder=1)
# LJ force:
ax[0].plot(strongRange, ljForce(strongRange, eps=eps) + (0.5 * shift), c='g', lw=1.25, label='Strong', zorder=0)
ax[0].scatter(rStrong, ljForce(rStrong, eps=eps) + (0.5 * shift), c='g', zorder=1)
# Limits
ax[0].set_xlim(rStrong - 0.05, (2.**(1./6.))*sigma)
ax[0].set_ylim(0., 175.)
ax[0].set_xlabel(r'Interparticle distance $(\alpha_{i,j})$', fontsize=fsize)
#ax[0].set_ylabel(r'Lennard-Jones potential $(U_{LJ})$', fontsize=fsize)
ax[0].set_ylabel(r'Lennard-Jones force $(F_{LJ})$', fontsize=fsize)
#ax[0].legend()
# Plot the overlap of spheres
# For wire mesh
backu, backv = np.mgrid[1*np.pi:2*np.pi:10j, 0:np.pi:10j]
backx = np.cos(backu)*np.sin(backv)
backy = np.sin(backu)*np.sin(backv)
backz = np.cos(backv)
frontu, frontv = np.mgrid[0*np.pi:1*np.pi:10j, 0:np.pi:10j]
frontx = np.cos(frontu)*np.sin(frontv)
fronty = np.sin(frontu)*np.sin(frontv)
frontz = np.cos(frontv)
# For solid sphere
uS, vS = np.mgrid[0:2*np.pi:1000j, 0:np.pi:500j]
xS = np.cos(uS)*np.sin(vS)
yS = np.sin(uS)*np.sin(vS)
zS = np.cos(vS)
backAlph = 0.3
frontAlph = 0.5
ax[1].plot_wireframe(backx - rWeak, backy, backz, color="#808080", alpha=backAlph)
ax[1].plot_wireframe(backx + rWeak, backy, backz, color="#808080", alpha=backAlph)
ax[1].plot_surface((xS*rWeak) - rWeak, yS*rWeak, zS*rWeak, color="r")
ax[1].plot_surface((xS*rWeak) + rWeak, yS*rWeak, zS*rWeak, color="r")
ax[1].plot_wireframe(frontx - rWeak, fronty, frontz, color="#808080", alpha=frontAlph)
ax[1].plot_wireframe(frontx + rWeak, fronty, frontz, color="#808080", alpha=frontAlph)
ax[1].set_axis_off()
ax[1].view_init(0, 90)
ax[1].set_xlim(-2., 2.)
ax[1].set_ylim(-1., 1.)
ax[1].set_zlim(-1., 1.)
ax[1].dist = 6.
ax[2].plot_wireframe(backx - rMid, backy, backz, color="#808080", alpha=backAlph)
ax[2].plot_wireframe(backx + rMid, backy, backz, color="#808080", alpha=backAlph)
ax[2].plot_surface((xS*rMid) - rMid, yS*rMid, zS*rMid, color="b")
ax[2].plot_surface((xS*rMid) + rMid, yS*rMid, zS*rMid, color="b")
ax[2].plot_wireframe(frontx - rMid, fronty, frontz, color="#808080", alpha=frontAlph)
ax[2].plot_wireframe(frontx + rMid, fronty, frontz, color="#808080", alpha=frontAlph)
ax[2].set_axis_off()
ax[2].view_init(0, 90)
ax[2].set_xlim(-2., 2.)
ax[2].set_ylim(-1., 1.)
ax[2].set_zlim(-1., 1.)
ax[2].dist = 6.
ax[3].plot_wireframe(backx - rStrong, backy, backz, color="#808080", alpha=backAlph)
ax[3].plot_wireframe(backx + rStrong, backy, backz, color="#808080", alpha=backAlph)
ax[3].plot_surface((xS*rStrong) - rStrong, yS*rStrong, zS*rStrong, color="g")
ax[3].plot_surface((xS*rStrong) + rStrong, yS*rStrong, zS*rStrong, color="g")
ax[3].plot_wireframe(frontx - rStrong, fronty, frontz, color="#808080", alpha=frontAlph)
ax[3].plot_wireframe(frontx + rStrong, fronty, frontz, color="#808080", alpha=frontAlph)
ax[3].set_axis_off()
ax[3].view_init(0, 90)
ax[3].set_xlim(-2., 2.)
#ax[3].set_ylim(-1.5, 1.5)
#ax[3].set_zlim(-1.5, 1.5)
ax[3].set_ylim(-1., 1.)
ax[3].set_zlim(-1., 1.)
ax[3].dist = 6.
ax[0].text(0.75, 0.75, r'$Pe=$'+"{0:g}".format(peWeak), color='r', transform=ax[0].transAxes, fontsize=fsize)
ax[0].text(0.75, 0.825, r'$Pe=$'+"{0:g}".format(peMid), color='b', transform=ax[0].transAxes, fontsize=fsize)
ax[0].text(0.75, 0.9, r'$Pe=$'+"{0:g}".format(peStrong), color='g', transform=ax[0].transAxes, fontsize=fsize)
# Label for figure letter (a)
ax[0].text(0.02, 0.925, r'$(a)$', transform=ax[0].transAxes, fontsize=fsize)
# Set tick parameters
ax[0].tick_params(axis='both', direction='in', labelsize=fsize)
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,AutoMinorLocator)
ax[0].xaxis.set_minor_locator(MultipleLocator(0.025))
ax[0].yaxis.set_minor_locator(MultipleLocator(5))
ax[0].tick_params(axis='both', which='minor', length=2, direction='in')
labels = ax[0].get_xticks().tolist()
print(labels)
for i in range(0, len(labels)):
if labels[i] == 1.0:
labels[i] = r'$\sigma$'
print(labels)
ax[0].set_xticklabels(labels)
# Let's add spatial heatmaps to this figure
ld_img = [] # list to hold images
# Path to images
imPath = '/Users/kolbt/Desktop/soft_figures/method_schematic'
imPath = '/Users/kolbt/Desktop/compiled/whingdingdilly/ipython/clusters_soft'
# Image file names
imgs = ['spatial_delta_pa10.0_pb0_xa100.0_frame0600.png',
'spatial_delta_pa50.0_pb0_xa100.0_frame0600.png',
'spatial_delta_pa150.0_pb0_xa100.0_frame0600.png']
# The height/width
dim = 6600
for i in imgs:
im = Image.open(imPath + '/' + i)
# (left, upper, right, lower)
# image is 9600 x 7200
left = 700
upper = 250
im1 = im.crop((left, upper, left+dim, upper+dim))
ld_img.append(im1)
# Add an axes to the right of the plot for the heatmaps
imdim = 0.25
base = 0.1125
buff = 0.008
left = 0.92
bottom, width, height = base, imdim, imdim
hm1 = fig.add_axes([left, bottom, width, height])
hm1.imshow(ld_img[0])
hm1.set_axis_off()
hm1.set_aspect('equal')
bottom, width, height = base + imdim + buff, imdim, imdim
hm2 = fig.add_axes([left, bottom, width, height])
hm2.imshow(ld_img[1])
hm2.set_axis_off()
hm2.set_aspect('equal')
bottom, width, height = base + (2.*imdim) + (2.*buff), imdim, imdim
hm3 = fig.add_axes([left, bottom, width, height])
hm3.imshow(ld_img[2])
hm3.set_axis_off()
hm3.set_aspect('equal')
hm1.add_patch(patches.Rectangle((0, 0), 1, 1, linewidth=2.5, edgecolor='r', facecolor='none', transform=hm1.transAxes))
hm2.add_patch(patches.Rectangle((0, 0), 1, 1, linewidth=2.5, edgecolor='b', facecolor='none', transform=hm2.transAxes))
hm3.add_patch(patches.Rectangle((0, 0), 1, 1, linewidth=2.5, edgecolor='g', facecolor='none', transform=hm3.transAxes))
# Spatial heatmap letters (b.i, b.ii, b.iii)
letx, lety = 0.55, 0.1
hm1.text(letx, lety, r'$(b.iii)$', transform=hm1.transAxes, fontsize=fsize)
hm2.text(letx+.04, lety, r'$(b.ii)$', transform=hm2.transAxes, fontsize=fsize)
hm3.text(letx+0.08, lety, r'$(b.i)$', transform=hm3.transAxes, fontsize=fsize)
cbax = fig.add_axes([0.55, base, 0.765, 0.765])
divider = make_axes_locatable(cbax)
cax = divider.append_axes("left", size="1%", pad=0.0)
cmap = mpl.cm.jet_r
norm = mpl.colors.Normalize(vmin=0.6, vmax=1.0)
sm = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
sm.set_array([])
cb1 = fig.colorbar(sm, ax=cax, orientation='vertical')
cb1.set_label(r'$\alpha$', fontsize=fsize)
cbax.axis('off')
cax.axis('off')
# Try? To add the schematic figure underneath all of this?
#schmIm = '/Users/kolbt/Desktop/compiled/whingdingdilly/deformable_potential_figure/analytical_schematic.png'
schmIm = '/Users/kolbt/Desktop/compiled/whingdingdilly/deformable_potential_figure/analytical_w_hcp.png'
schm = Image.open(schmIm)
# Add an axes to the right of the plot for the heatmaps
left = -0.025
bottom, width, height = -1.05, 1.5, 1.5
sch = fig.add_axes([left, bottom, width, height])
sch.imshow(schm)
sch.set_axis_off()
sch.set_aspect('equal')
sch.text(-0.01, 0.9, r'$(c)$', transform=sch.transAxes, fontsize=fsize)
sch.text(0.4, 0.9, r'$(d)$', transform=sch.transAxes, fontsize=fsize)
#plt.savefig("particle_deformation_eps" + str(eps) + ".png", dpi=2000, bbox_inches='tight', pad_inches=0)
plt.savefig("particle_deformation_eps" + str(eps) + ".png", dpi=500, bbox_inches="tight", pad_inches=0.01)
plt.close()
|
'''
A schematic figure to illustrate how Pe (and F_act) sets deformability:
-LJ potential
-Overlay 2 Forces (Strong under week)
-Corresponds to collision angle
'''
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as axes3d
from PIL import Image
import matplotlib.patches as patches
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.colorbar import colorbar
fsize = 9
plt.rcParams.update({'font.size': fsize})
params = {'legend.fontsize': fsize,
'axes.labelsize': fsize,
'axes.titlesize': fsize,
'xtick.labelsize': fsize,
'ytick.labelsize': fsize}
plt.rcParams.update(params)
eps = 0.1
sigma = 1.
def ljPotential(r, eps=0.1, sigma=1.):
div = (sigma/r)
U = ( 4. * eps * ((div)**12 - (div)**6) ) + eps
return U
def ljForce(r, eps=0.1, sigma=1.):
div = (sigma/r)
dU = (24. * eps / r) * ((2*(div**12)) - (div)**6)
return dU
def convergeConstPeEps(pe, eps):
r = 1.112
while ljForce(r, eps) < pe:
r -= 0.0001
return r
# Compute the weak and strong collision force
peWeak = 10.
peMid = 50.
peStrong = 150.
rWeak = convergeConstPeEps(peWeak, eps)
rMid = convergeConstPeEps(peMid, eps)
rStrong = convergeConstPeEps(peStrong, eps)
# Plot the figure
width = 3. + (3./8.) # single-column figure width
fig = plt.figure(figsize=(width, width))
ax = []
#ax.append(fig.add_subplot(131)) # left column
#ax.append(fig.add_subplot(332, projection='3d')) # right top
#ax.append(fig.add_subplot(324, projection='3d')) # right mid
#ax.append(fig.add_subplot(326, projection='3d')) # right bottom
ax.append(fig.add_subplot(111)) # potential figure
sWidth = 0.3
sHeight = sWidth * (1./2.)
ax.append(plt.axes([0.55, 0.26, sWidth, sHeight], projection='3d')) # weak pe spheres
ax.append(plt.axes([0.325, 0.425, sWidth, sHeight], projection='3d')) # mid pe spheres
ax.append(plt.axes([0.225, 0.675, sWidth, sHeight], projection='3d')) # strong pe spheres
ax[1].patch.set_alpha(0.0)
ax[2].patch.set_alpha(0.0)
ax[3].patch.set_alpha(0.0)
# Plot LJ potential
dist = np.arange(0.0001, ((2.**(1./6.))*sigma)*2., 0.001)
#ax[0].plot(dist, ljPotential(dist, eps=eps), c='k', lw=1.5, label='LJ-Potential', zorder=0)
# Plot LJ Force
ax[0].plot(dist, ljForce(dist, eps=eps), c='k', lw=1.5, label='LJ-Potential', zorder=0)
# Overlay the weak activity range
# LJ shift:
shift = 1.
# LJF shift:
shift = 20.
weakRange = np.arange(rWeak, ((2.**(1./6.))*sigma)*2., 0.001)
# LJ potential:
#ax[0].plot(weakRange, ljPotential(weakRange, eps=eps) + (1.5 * shift), c='r', lw=1.25, label='Weak', zorder=0)
#ax[0].scatter(rWeak, ljPotential(rWeak, eps=eps) + (1.5 * shift), c='r', zorder=1)
# LJ force:
ax[0].plot(weakRange, ljForce(weakRange, eps=eps) + (1.5 * shift), c='r', lw=1.25, label='Weak', zorder=0)
ax[0].scatter(rWeak, ljForce(rWeak, eps=eps) + (1.5 * shift), c='r', zorder=1)
# Overlay the middle activity range
midRange = np.arange(rMid, ((2.**(1./6.))*sigma)*2., 0.001)
# LJ potential:
#ax[0].plot(midRange, ljPotential(midRange, eps=eps) + (1. * shift), c='b', lw=1.25, label='Mid', zorder=0)
#ax[0].scatter(rMid, ljPotential(rMid, eps=eps) + (1. * shift), c='b', zorder=1)
# LJ force:
ax[0].plot(midRange, ljForce(midRange, eps=eps) + (1. * shift), c='b', lw=1.25, label='Mid', zorder=0)
ax[0].scatter(rMid, ljForce(rMid, eps=eps) + (1. * shift), c='b', zorder=1)
# Overlay the strong activity range
strongRange = np.arange(rStrong, ((2.**(1./6.))*sigma)*2., 0.001)
# LJ potential:
#ax[0].plot(strongRange, ljPotential(strongRange, eps=eps) + (0.5 * shift), c='g', lw=1.25, label='Strong', zorder=0)
#ax[0].scatter(rStrong, ljPotential(rStrong, eps=eps) + (0.5 * shift), c='g', zorder=1)
# LJ force:
ax[0].plot(strongRange, ljForce(strongRange, eps=eps) + (0.5 * shift), c='g', lw=1.25, label='Strong', zorder=0)
ax[0].scatter(rStrong, ljForce(rStrong, eps=eps) + (0.5 * shift), c='g', zorder=1)
# Limits
ax[0].set_xlim(rStrong - 0.05, (2.**(1./6.))*sigma)
ax[0].set_ylim(0., 175.)
ax[0].set_xlabel(r'Interparticle distance $(\alpha_{i,j})$', fontsize=fsize)
#ax[0].set_ylabel(r'Lennard-Jones potential $(U_{LJ})$', fontsize=fsize)
ax[0].set_ylabel(r'Lennard-Jones force $(F_{LJ})$', fontsize=fsize)
#ax[0].legend()
# Plot the overlap of spheres
# For wire mesh
backu, backv = np.mgrid[1*np.pi:2*np.pi:10j, 0:np.pi:10j]
backx = np.cos(backu)*np.sin(backv)
backy = np.sin(backu)*np.sin(backv)
backz = np.cos(backv)
frontu, frontv = np.mgrid[0*np.pi:1*np.pi:10j, 0:np.pi:10j]
frontx = np.cos(frontu)*np.sin(frontv)
fronty = np.sin(frontu)*np.sin(frontv)
frontz = np.cos(frontv)
# For solid sphere
uS, vS = np.mgrid[0:2*np.pi:1000j, 0:np.pi:500j]
xS = np.cos(uS)*np.sin(vS)
yS = np.sin(uS)*np.sin(vS)
zS = np.cos(vS)
backAlph = 0.3
frontAlph = 0.5
ax[1].plot_wireframe(backx - rWeak, backy, backz, color="#808080", alpha=backAlph)
ax[1].plot_wireframe(backx + rWeak, backy, backz, color="#808080", alpha=backAlph)
ax[1].plot_surface((xS*rWeak) - rWeak, yS*rWeak, zS*rWeak, color="r")
ax[1].plot_surface((xS*rWeak) + rWeak, yS*rWeak, zS*rWeak, color="r")
ax[1].plot_wireframe(frontx - rWeak, fronty, frontz, color="#808080", alpha=frontAlph)
ax[1].plot_wireframe(frontx + rWeak, fronty, frontz, color="#808080", alpha=frontAlph)
ax[1].set_axis_off()
ax[1].view_init(0, 90)
ax[1].set_xlim(-2., 2.)
ax[1].set_ylim(-1., 1.)
ax[1].set_zlim(-1., 1.)
ax[1].dist = 6.
ax[2].plot_wireframe(backx - rMid, backy, backz, color="#808080", alpha=backAlph)
ax[2].plot_wireframe(backx + rMid, backy, backz, color="#808080", alpha=backAlph)
ax[2].plot_surface((xS*rMid) - rMid, yS*rMid, zS*rMid, color="b")
ax[2].plot_surface((xS*rMid) + rMid, yS*rMid, zS*rMid, color="b")
ax[2].plot_wireframe(frontx - rMid, fronty, frontz, color="#808080", alpha=frontAlph)
ax[2].plot_wireframe(frontx + rMid, fronty, frontz, color="#808080", alpha=frontAlph)
ax[2].set_axis_off()
ax[2].view_init(0, 90)
ax[2].set_xlim(-2., 2.)
ax[2].set_ylim(-1., 1.)
ax[2].set_zlim(-1., 1.)
ax[2].dist = 6.
ax[3].plot_wireframe(backx - rStrong, backy, backz, color="#808080", alpha=backAlph)
ax[3].plot_wireframe(backx + rStrong, backy, backz, color="#808080", alpha=backAlph)
ax[3].plot_surface((xS*rStrong) - rStrong, yS*rStrong, zS*rStrong, color="g")
ax[3].plot_surface((xS*rStrong) + rStrong, yS*rStrong, zS*rStrong, color="g")
ax[3].plot_wireframe(frontx - rStrong, fronty, frontz, color="#808080", alpha=frontAlph)
ax[3].plot_wireframe(frontx + rStrong, fronty, frontz, color="#808080", alpha=frontAlph)
ax[3].set_axis_off()
ax[3].view_init(0, 90)
ax[3].set_xlim(-2., 2.)
#ax[3].set_ylim(-1.5, 1.5)
#ax[3].set_zlim(-1.5, 1.5)
ax[3].set_ylim(-1., 1.)
ax[3].set_zlim(-1., 1.)
ax[3].dist = 6.
ax[0].text(0.75, 0.75, r'$Pe=$'+"{0:g}".format(peWeak), color='r', transform=ax[0].transAxes, fontsize=fsize)
ax[0].text(0.75, 0.825, r'$Pe=$'+"{0:g}".format(peMid), color='b', transform=ax[0].transAxes, fontsize=fsize)
ax[0].text(0.75, 0.9, r'$Pe=$'+"{0:g}".format(peStrong), color='g', transform=ax[0].transAxes, fontsize=fsize)
# Label for figure letter (a)
ax[0].text(0.02, 0.925, r'$(a)$', transform=ax[0].transAxes, fontsize=fsize)
# Set tick parameters
ax[0].tick_params(axis='both', direction='in', labelsize=fsize)
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,AutoMinorLocator)
ax[0].xaxis.set_minor_locator(MultipleLocator(0.025))
ax[0].yaxis.set_minor_locator(MultipleLocator(5))
ax[0].tick_params(axis='both', which='minor', length=2, direction='in')
labels = ax[0].get_xticks().tolist()
print(labels)
for i in range(0, len(labels)):
if labels[i] == 1.0:
labels[i] = r'$\sigma$'
print(labels)
ax[0].set_xticklabels(labels)
# Let's add spatial heatmaps to this figure
ld_img = [] # list to hold images
# Path to images
imPath = '/Users/kolbt/Desktop/soft_figures/method_schematic'
imPath = '/Users/kolbt/Desktop/compiled/whingdingdilly/ipython/clusters_soft'
# Image file names
imgs = ['spatial_delta_pa10.0_pb0_xa100.0_frame0600.png',
'spatial_delta_pa50.0_pb0_xa100.0_frame0600.png',
'spatial_delta_pa150.0_pb0_xa100.0_frame0600.png']
# The height/width
dim = 6600
for i in imgs:
im = Image.open(imPath + '/' + i)
# (left, upper, right, lower)
# image is 9600 x 7200
left = 700
upper = 250
im1 = im.crop((left, upper, left+dim, upper+dim))
ld_img.append(im1)
# Add an axes to the right of the plot for the heatmaps
imdim = 0.25
base = 0.1125
buff = 0.008
left = 0.92
bottom, width, height = base, imdim, imdim
hm1 = fig.add_axes([left, bottom, width, height])
hm1.imshow(ld_img[0])
hm1.set_axis_off()
hm1.set_aspect('equal')
bottom, width, height = base + imdim + buff, imdim, imdim
hm2 = fig.add_axes([left, bottom, width, height])
hm2.imshow(ld_img[1])
hm2.set_axis_off()
hm2.set_aspect('equal')
bottom, width, height = base + (2.*imdim) + (2.*buff), imdim, imdim
hm3 = fig.add_axes([left, bottom, width, height])
hm3.imshow(ld_img[2])
hm3.set_axis_off()
hm3.set_aspect('equal')
hm1.add_patch(patches.Rectangle((0, 0), 1, 1, linewidth=2.5, edgecolor='r', facecolor='none', transform=hm1.transAxes))
hm2.add_patch(patches.Rectangle((0, 0), 1, 1, linewidth=2.5, edgecolor='b', facecolor='none', transform=hm2.transAxes))
hm3.add_patch(patches.Rectangle((0, 0), 1, 1, linewidth=2.5, edgecolor='g', facecolor='none', transform=hm3.transAxes))
# Spatial heatmap letters (b.i, b.ii, b.iii)
letx, lety = 0.55, 0.1
hm1.text(letx, lety, r'$(b.iii)$', transform=hm1.transAxes, fontsize=fsize)
hm2.text(letx+.04, lety, r'$(b.ii)$', transform=hm2.transAxes, fontsize=fsize)
hm3.text(letx+0.08, lety, r'$(b.i)$', transform=hm3.transAxes, fontsize=fsize)
cbax = fig.add_axes([0.55, base, 0.765, 0.765])
divider = make_axes_locatable(cbax)
cax = divider.append_axes("left", size="1%", pad=0.0)
cmap = mpl.cm.jet_r
norm = mpl.colors.Normalize(vmin=0.6, vmax=1.0)
sm = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
sm.set_array([])
cb1 = fig.colorbar(sm, ax=cax, orientation='vertical')
cb1.set_label(r'$\alpha$', fontsize=fsize)
cbax.axis('off')
cax.axis('off')
# Try? To add the schematic figure underneath all of this?
#schmIm = '/Users/kolbt/Desktop/compiled/whingdingdilly/deformable_potential_figure/analytical_schematic.png'
schmIm = '/Users/kolbt/Desktop/compiled/whingdingdilly/deformable_potential_figure/analytical_w_hcp.png'
schm = Image.open(schmIm)
# Add an axes to the right of the plot for the heatmaps
left = -0.025
bottom, width, height = -1.05, 1.5, 1.5
sch = fig.add_axes([left, bottom, width, height])
sch.imshow(schm)
sch.set_axis_off()
sch.set_aspect('equal')
sch.text(-0.01, 0.9, r'$(c)$', transform=sch.transAxes, fontsize=fsize)
sch.text(0.4, 0.9, r'$(d)$', transform=sch.transAxes, fontsize=fsize)
#plt.savefig("particle_deformation_eps" + str(eps) + ".png", dpi=2000, bbox_inches='tight', pad_inches=0)
plt.savefig("particle_deformation_eps" + str(eps) + ".png", dpi=500, bbox_inches="tight", pad_inches=0.01)
plt.close()
|
en
| 0.481546
|
A schematic figure to illustrate how Pe (and F_act) sets deformability: -LJ potential -Overlay 2 Forces (Strong under week) -Corresponds to collision angle # Compute the weak and strong collision force # Plot the figure # single-column figure width #ax.append(fig.add_subplot(131)) # left column #ax.append(fig.add_subplot(332, projection='3d')) # right top #ax.append(fig.add_subplot(324, projection='3d')) # right mid #ax.append(fig.add_subplot(326, projection='3d')) # right bottom # potential figure # weak pe spheres # mid pe spheres # strong pe spheres # Plot LJ potential #ax[0].plot(dist, ljPotential(dist, eps=eps), c='k', lw=1.5, label='LJ-Potential', zorder=0) # Plot LJ Force # Overlay the weak activity range # LJ shift: # LJF shift: # LJ potential: #ax[0].plot(weakRange, ljPotential(weakRange, eps=eps) + (1.5 * shift), c='r', lw=1.25, label='Weak', zorder=0) #ax[0].scatter(rWeak, ljPotential(rWeak, eps=eps) + (1.5 * shift), c='r', zorder=1) # LJ force: # Overlay the middle activity range # LJ potential: #ax[0].plot(midRange, ljPotential(midRange, eps=eps) + (1. * shift), c='b', lw=1.25, label='Mid', zorder=0) #ax[0].scatter(rMid, ljPotential(rMid, eps=eps) + (1. * shift), c='b', zorder=1) # LJ force: # Overlay the strong activity range # LJ potential: #ax[0].plot(strongRange, ljPotential(strongRange, eps=eps) + (0.5 * shift), c='g', lw=1.25, label='Strong', zorder=0) #ax[0].scatter(rStrong, ljPotential(rStrong, eps=eps) + (0.5 * shift), c='g', zorder=1) # LJ force: # Limits #ax[0].set_ylabel(r'Lennard-Jones potential $(U_{LJ})$', fontsize=fsize) #ax[0].legend() # Plot the overlap of spheres # For wire mesh # For solid sphere #ax[3].set_ylim(-1.5, 1.5) #ax[3].set_zlim(-1.5, 1.5) # Label for figure letter (a) # Set tick parameters # Let's add spatial heatmaps to this figure # list to hold images # Path to images # Image file names # The height/width # (left, upper, right, lower) # image is 9600 x 7200 # Add an axes to the right of the plot for the heatmaps # Spatial heatmap letters (b.i, b.ii, b.iii) # Try? To add the schematic figure underneath all of this? #schmIm = '/Users/kolbt/Desktop/compiled/whingdingdilly/deformable_potential_figure/analytical_schematic.png' # Add an axes to the right of the plot for the heatmaps #plt.savefig("particle_deformation_eps" + str(eps) + ".png", dpi=2000, bbox_inches='tight', pad_inches=0)
| 2.550203
| 3
|
tweet/twitterspidertest/useapi.py
|
Octoberr/swm0920
| 2
|
6626449
|
<reponame>Octoberr/swm0920
"""
实在是莫得法了,用用api
create by judy 2019/09/10
"""
|
"""
实在是莫得法了,用用api
create by judy 2019/09/10
"""
|
en
| 0.213526
|
实在是莫得法了,用用api create by judy 2019/09/10
| 0.899537
| 1
|
examples/symmetrical_torque_driven_ocp/symmetry_by_constraint.py
|
vennand/BiorbdOptim
| 0
|
6626450
|
<gh_stars>0
import biorbd
from bioptim import (
Node,
OptimalControlProgram,
DynamicsTypeList,
DynamicsType,
ObjectiveList,
Objective,
ConstraintList,
Constraint,
BoundsList,
QAndQDotBounds,
InitialGuessList,
ShowResult,
OdeSolver,
)
def prepare_ocp(biorbd_model_path="cubeSym.bioMod", ode_solver=OdeSolver.RK):
# --- Options --- #
# Model path
biorbd_model = biorbd.Model(biorbd_model_path)
# Problem parameters
number_shooting_points = 30
final_time = 2
tau_min, tau_max, tau_init = -100, 100, 0
# Add objective functions
objective_functions = ObjectiveList()
objective_functions.add(Objective.Lagrange.MINIMIZE_TORQUE, weight=100)
# Dynamics
dynamics = DynamicsTypeList()
dynamics.add(DynamicsType.TORQUE_DRIVEN)
# Constraints
constraints = ConstraintList()
constraints.add(Constraint.ALIGN_MARKERS, node=Node.START, first_marker_idx=0, second_marker_idx=1)
constraints.add(Constraint.ALIGN_MARKERS, node=Node.END, first_marker_idx=0, second_marker_idx=2)
constraints.add(Constraint.PROPORTIONAL_STATE, node=Node.ALL, first_dof=2, second_dof=3, coef=-1)
# Path constraint
x_bounds = BoundsList()
x_bounds.add(QAndQDotBounds(biorbd_model))
x_bounds[0][4:8, [0, -1]] = 0
# Initial guess
x_init = InitialGuessList()
x_init.add([0] * (biorbd_model.nbQ() + biorbd_model.nbQdot()))
# Define control path constraint
u_bounds = BoundsList()
u_bounds.add([[tau_min] * biorbd_model.nbQ(), [tau_max] * biorbd_model.nbQ()])
u_init = InitialGuessList()
u_init.add([tau_init] * biorbd_model.nbQ())
# ------------- #
return OptimalControlProgram(
biorbd_model,
dynamics,
number_shooting_points,
final_time,
x_init,
u_init,
x_bounds,
u_bounds,
objective_functions,
constraints,
ode_solver=ode_solver,
)
if __name__ == "__main__":
ocp = prepare_ocp()
# --- Solve the program --- #
sol = ocp.solve(show_online_optim=True)
# --- Show results --- #
result = ShowResult(ocp, sol)
result.animate()
|
import biorbd
from bioptim import (
Node,
OptimalControlProgram,
DynamicsTypeList,
DynamicsType,
ObjectiveList,
Objective,
ConstraintList,
Constraint,
BoundsList,
QAndQDotBounds,
InitialGuessList,
ShowResult,
OdeSolver,
)
def prepare_ocp(biorbd_model_path="cubeSym.bioMod", ode_solver=OdeSolver.RK):
# --- Options --- #
# Model path
biorbd_model = biorbd.Model(biorbd_model_path)
# Problem parameters
number_shooting_points = 30
final_time = 2
tau_min, tau_max, tau_init = -100, 100, 0
# Add objective functions
objective_functions = ObjectiveList()
objective_functions.add(Objective.Lagrange.MINIMIZE_TORQUE, weight=100)
# Dynamics
dynamics = DynamicsTypeList()
dynamics.add(DynamicsType.TORQUE_DRIVEN)
# Constraints
constraints = ConstraintList()
constraints.add(Constraint.ALIGN_MARKERS, node=Node.START, first_marker_idx=0, second_marker_idx=1)
constraints.add(Constraint.ALIGN_MARKERS, node=Node.END, first_marker_idx=0, second_marker_idx=2)
constraints.add(Constraint.PROPORTIONAL_STATE, node=Node.ALL, first_dof=2, second_dof=3, coef=-1)
# Path constraint
x_bounds = BoundsList()
x_bounds.add(QAndQDotBounds(biorbd_model))
x_bounds[0][4:8, [0, -1]] = 0
# Initial guess
x_init = InitialGuessList()
x_init.add([0] * (biorbd_model.nbQ() + biorbd_model.nbQdot()))
# Define control path constraint
u_bounds = BoundsList()
u_bounds.add([[tau_min] * biorbd_model.nbQ(), [tau_max] * biorbd_model.nbQ()])
u_init = InitialGuessList()
u_init.add([tau_init] * biorbd_model.nbQ())
# ------------- #
return OptimalControlProgram(
biorbd_model,
dynamics,
number_shooting_points,
final_time,
x_init,
u_init,
x_bounds,
u_bounds,
objective_functions,
constraints,
ode_solver=ode_solver,
)
if __name__ == "__main__":
ocp = prepare_ocp()
# --- Solve the program --- #
sol = ocp.solve(show_online_optim=True)
# --- Show results --- #
result = ShowResult(ocp, sol)
result.animate()
|
en
| 0.74444
|
# --- Options --- # # Model path # Problem parameters # Add objective functions # Dynamics # Constraints # Path constraint # Initial guess # Define control path constraint # ------------- # # --- Solve the program --- # # --- Show results --- #
| 2.340724
| 2
|