hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3cf3fa8909e682b90ef4b30a1f452499c8e9b5 | 3,068 | py | Python | monilog/log_generator.py | tayciryahmed/monilog | 991c909e93d1c8bf087519b4105308ce2cb5195a | [
"MIT"
] | 2 | 2020-03-03T09:32:42.000Z | 2020-05-27T21:15:30.000Z | monilog/log_generator.py | tayciryahmed/monilog | 991c909e93d1c8bf087519b4105308ce2cb5195a | [
"MIT"
] | null | null | null | monilog/log_generator.py | tayciryahmed/monilog | 991c909e93d1c8bf087519b4105308ce2cb5195a | [
"MIT"
] | null | null | null | '''
Log generation simulation with different durations and rates.
'''
import os
import time
import random
from time import sleep
from datetime import datetime
import logging
log_format = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(format=log_format, level=logging.INFO)
class LogGenerator:
'''
Simulation of log generator.
Args:
file (str): The file with the logs to monitor.
rate (int): The average of number of requests per sec.
ips (list): Random ips to choose from.
methods (list): Random methods to choose from.
sections (list): Random sections to choose from.
codes (list): Random codes to choose from.
'''
def __init__(self,
file="/tmp/access.log",
rate=20):
self.file = file
self.rate = rate
self.ips = ["::1", "192.168.0.110", "127.0.0.1", "60.242.26.14"]
self.methods = ["GET", "POST", "POST", "PUT", "DELETE"]
self.sections = ["/img", "/captcha", "/css", "/foo", "/foo", "/bar"]
self.codes = ["200", "200", "200", "200",
"200", "304", "403", "404", "501"]
def write_log(self, timestamp):
'''
Write a log entry, given a timestamp.
Args:
timestamp (str): A timestamp for the random log.
'''
with open(self.file, 'a+', os.O_NONBLOCK) as f:
f.write(self.generate_log(timestamp))
f.flush()
f.close()
def random_ip(self):
'''
Generate a random ip.
Returns:
(str): Generated random ip.
'''
return str(random.randint(0, 255)) + "." + str(random.randint(0, 255)) \
+ "." + str(random.randint(0, 255)) + "." \
+ str(random.randint(0, 255))
def generate_log(self, timestamp):
'''
Generate a log string given a timestamp.
Args:
timestamp (str): A timestamp for the random log.
Returns:
(str): a random generated log entry.
'''
ip = random.choice([random.choice(self.ips), self.random_ip()])
method = random.choice(self.methods)
section = random.choice(self.sections) \
+ random.choice([".html",
random.choice(self.sections)+'/',
random.choice(self.sections)+'/'])
code = random.choice(self.codes)
size = random.randint(10, 100000)
return ('%s - - [%s +1000] "%s %s HTTP/1.1" %s %d\n'
% (ip,
timestamp.strftime("%d/%b/%Y:%H:%M:%S"),
method,
section,
code,
size))
def run(self, duration):
'''
Run the log generation.
Args:
duration (str): duration of log generation simulation.
'''
start = time.time()
while time.time()-start < duration:
self.write_log(datetime.now())
sleep(random.random()*2/self.rate)
| 29.5 | 80 | 0.518579 |
import os
import time
import random
from time import sleep
from datetime import datetime
import logging
log_format = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(format=log_format, level=logging.INFO)
class LogGenerator:
def __init__(self,
file="/tmp/access.log",
rate=20):
self.file = file
self.rate = rate
self.ips = ["::1", "192.168.0.110", "127.0.0.1", "60.242.26.14"]
self.methods = ["GET", "POST", "POST", "PUT", "DELETE"]
self.sections = ["/img", "/captcha", "/css", "/foo", "/foo", "/bar"]
self.codes = ["200", "200", "200", "200",
"200", "304", "403", "404", "501"]
def write_log(self, timestamp):
with open(self.file, 'a+', os.O_NONBLOCK) as f:
f.write(self.generate_log(timestamp))
f.flush()
f.close()
def random_ip(self):
return str(random.randint(0, 255)) + "." + str(random.randint(0, 255)) \
+ "." + str(random.randint(0, 255)) + "." \
+ str(random.randint(0, 255))
def generate_log(self, timestamp):
ip = random.choice([random.choice(self.ips), self.random_ip()])
method = random.choice(self.methods)
section = random.choice(self.sections) \
+ random.choice([".html",
random.choice(self.sections)+'/',
random.choice(self.sections)+'/'])
code = random.choice(self.codes)
size = random.randint(10, 100000)
return ('%s - - [%s +1000] "%s %s HTTP/1.1" %s %d\n'
% (ip,
timestamp.strftime("%d/%b/%Y:%H:%M:%S"),
method,
section,
code,
size))
def run(self, duration):
start = time.time()
while time.time()-start < duration:
self.write_log(datetime.now())
sleep(random.random()*2/self.rate)
| true | true |
1c3cf4406a7247ddaca749c3a675a988c4acb16d | 5,510 | py | Python | stroylux/main/account/emails.py | vladkoblynsky/shop | aaf027f4111605772624a868a0243b221b97c857 | [
"BSD-3-Clause"
] | null | null | null | stroylux/main/account/emails.py | vladkoblynsky/shop | aaf027f4111605772624a868a0243b221b97c857 | [
"BSD-3-Clause"
] | 7 | 2020-09-19T16:24:46.000Z | 2022-01-13T03:19:46.000Z | stroylux/main/account/emails.py | vladkoblynsky/shop | aaf027f4111605772624a868a0243b221b97c857 | [
"BSD-3-Clause"
] | null | null | null | from urllib.parse import urlencode
from django.contrib.auth.tokens import default_token_generator
from templated_email import send_templated_mail
# from ..account import events as account_events
from ..celeryconf import app
from main.core.emails import get_email_context, prepare_url
REQUEST_EMAIL_CHANGE_TEMPLATE = "account/request_email_change"
EMAIL_CHANGED_NOTIFICATION_TEMPLATE = "account/email_changed_notification"
ACCOUNT_DELETE_TEMPLATE = "account/account_delete"
PASSWORD_RESET_TEMPLATE = "account/password_reset"
def send_user_password_reset_email_with_url(redirect_url, user):
"""Trigger sending a password reset email for the given user."""
token = default_token_generator.make_token(user)
_send_password_reset_email_with_url.delay(user.email, redirect_url, user.pk, token)
def send_account_confirmation_email(user, redirect_url):
"""Trigger sending an account confirmation email for the given user."""
token = default_token_generator.make_token(user)
_send_account_confirmation_email.delay(user.email, token, redirect_url)
@app.task
def _send_account_confirmation_email(email, token, redirect_url):
params = urlencode({"email": email, "token": token})
confirm_url = prepare_url(params, redirect_url)
send_kwargs, ctx = get_email_context()
ctx["confirm_url"] = confirm_url
send_templated_mail(
template_name="account/confirm",
recipient_list=[email],
context=ctx,
**send_kwargs,
)
@app.task
def _send_password_reset_email_with_url(recipient_email, redirect_url, user_id, token):
params = urlencode({"email": recipient_email, "token": token})
reset_url = prepare_url(params, redirect_url)
_send_password_reset_email(recipient_email, reset_url, user_id)
def _send_password_reset_email(recipient_email, reset_url, user_id):
send_kwargs, ctx = get_email_context()
ctx["reset_url"] = reset_url
send_templated_mail(
template_name="account/password_reset",
recipient_list=[recipient_email],
context=ctx,
**send_kwargs,
)
# account_events.customer_password_reset_link_sent_event(user_id=user_id)
def send_user_change_email_url(redirect_url, user, new_email, token):
"""Trigger sending a email change email for the given user."""
event_parameters = {"old_email": user.email, "new_email": new_email}
_send_request_email_change_email.delay(
new_email, redirect_url, user.pk, token, event_parameters
)
@app.task
def _send_request_email_change_email(
recipient_email, redirect_url, user_id, token, event_parameters
):
params = urlencode({"token": token})
redirect_url = prepare_url(params, redirect_url)
send_kwargs, ctx = get_email_context()
ctx["redirect_url"] = redirect_url
send_templated_mail(
template_name=REQUEST_EMAIL_CHANGE_TEMPLATE,
recipient_list=[recipient_email],
context=ctx,
**send_kwargs,
)
# account_events.customer_email_change_request_event(
# user_id=user_id, parameters=event_parameters
# )
def send_user_change_email_notification(recipient_email):
"""Trigger sending a email change notification email for the given user."""
_send_user_change_email_notification.delay(recipient_email)
@app.task
def _send_user_change_email_notification(recipient_email):
send_kwargs, ctx = get_email_context()
send_templated_mail(
template_name=EMAIL_CHANGED_NOTIFICATION_TEMPLATE,
recipient_list=[recipient_email],
context=ctx,
**send_kwargs,
)
def send_account_delete_confirmation_email_with_url(redirect_url, user):
"""Trigger sending a account delete email for the given user."""
token = default_token_generator.make_token(user)
_send_account_delete_confirmation_email_with_url.delay(
user.email, redirect_url, token
)
@app.task
def _send_account_delete_confirmation_email_with_url(
recipient_email, redirect_url, token
):
params = urlencode({"token": token})
delete_url = prepare_url(params, redirect_url)
_send_delete_confirmation_email(recipient_email, delete_url)
def _send_delete_confirmation_email(recipient_email, delete_url):
send_kwargs, ctx = get_email_context()
ctx["delete_url"] = delete_url
send_templated_mail(
template_name=ACCOUNT_DELETE_TEMPLATE,
recipient_list=[recipient_email],
context=ctx,
**send_kwargs,
)
def send_set_password_email_with_url(redirect_url, user, staff=False):
"""Trigger sending a set password email for the given customer/staff."""
template_type = "staff" if staff else "customer"
template = f"dashboard/{template_type}/set_password"
token = default_token_generator.make_token(user)
_send_set_user_password_email_with_url.delay(
user.email, redirect_url, token, template
)
@app.task
def _send_set_user_password_email_with_url(
recipient_email, redirect_url, token, template_name
):
params = urlencode({"email": recipient_email, "token": token})
password_set_url = prepare_url(params, redirect_url)
_send_set_password_email(recipient_email, password_set_url, template_name)
def _send_set_password_email(recipient_email, password_set_url, template_name):
send_kwargs, ctx = get_email_context()
ctx["password_set_url"] = password_set_url
send_templated_mail(
template_name=template_name,
recipient_list=[recipient_email],
context=ctx,
**send_kwargs,
) | 34.654088 | 87 | 0.75735 | from urllib.parse import urlencode
from django.contrib.auth.tokens import default_token_generator
from templated_email import send_templated_mail
from ..celeryconf import app
from main.core.emails import get_email_context, prepare_url
REQUEST_EMAIL_CHANGE_TEMPLATE = "account/request_email_change"
EMAIL_CHANGED_NOTIFICATION_TEMPLATE = "account/email_changed_notification"
ACCOUNT_DELETE_TEMPLATE = "account/account_delete"
PASSWORD_RESET_TEMPLATE = "account/password_reset"
def send_user_password_reset_email_with_url(redirect_url, user):
token = default_token_generator.make_token(user)
_send_password_reset_email_with_url.delay(user.email, redirect_url, user.pk, token)
def send_account_confirmation_email(user, redirect_url):
token = default_token_generator.make_token(user)
_send_account_confirmation_email.delay(user.email, token, redirect_url)
@app.task
def _send_account_confirmation_email(email, token, redirect_url):
params = urlencode({"email": email, "token": token})
confirm_url = prepare_url(params, redirect_url)
send_kwargs, ctx = get_email_context()
ctx["confirm_url"] = confirm_url
send_templated_mail(
template_name="account/confirm",
recipient_list=[email],
context=ctx,
**send_kwargs,
)
@app.task
def _send_password_reset_email_with_url(recipient_email, redirect_url, user_id, token):
params = urlencode({"email": recipient_email, "token": token})
reset_url = prepare_url(params, redirect_url)
_send_password_reset_email(recipient_email, reset_url, user_id)
def _send_password_reset_email(recipient_email, reset_url, user_id):
send_kwargs, ctx = get_email_context()
ctx["reset_url"] = reset_url
send_templated_mail(
template_name="account/password_reset",
recipient_list=[recipient_email],
context=ctx,
**send_kwargs,
)
def send_user_change_email_url(redirect_url, user, new_email, token):
event_parameters = {"old_email": user.email, "new_email": new_email}
_send_request_email_change_email.delay(
new_email, redirect_url, user.pk, token, event_parameters
)
@app.task
def _send_request_email_change_email(
recipient_email, redirect_url, user_id, token, event_parameters
):
params = urlencode({"token": token})
redirect_url = prepare_url(params, redirect_url)
send_kwargs, ctx = get_email_context()
ctx["redirect_url"] = redirect_url
send_templated_mail(
template_name=REQUEST_EMAIL_CHANGE_TEMPLATE,
recipient_list=[recipient_email],
context=ctx,
**send_kwargs,
)
def send_user_change_email_notification(recipient_email):
_send_user_change_email_notification.delay(recipient_email)
@app.task
def _send_user_change_email_notification(recipient_email):
send_kwargs, ctx = get_email_context()
send_templated_mail(
template_name=EMAIL_CHANGED_NOTIFICATION_TEMPLATE,
recipient_list=[recipient_email],
context=ctx,
**send_kwargs,
)
def send_account_delete_confirmation_email_with_url(redirect_url, user):
token = default_token_generator.make_token(user)
_send_account_delete_confirmation_email_with_url.delay(
user.email, redirect_url, token
)
@app.task
def _send_account_delete_confirmation_email_with_url(
recipient_email, redirect_url, token
):
params = urlencode({"token": token})
delete_url = prepare_url(params, redirect_url)
_send_delete_confirmation_email(recipient_email, delete_url)
def _send_delete_confirmation_email(recipient_email, delete_url):
send_kwargs, ctx = get_email_context()
ctx["delete_url"] = delete_url
send_templated_mail(
template_name=ACCOUNT_DELETE_TEMPLATE,
recipient_list=[recipient_email],
context=ctx,
**send_kwargs,
)
def send_set_password_email_with_url(redirect_url, user, staff=False):
template_type = "staff" if staff else "customer"
template = f"dashboard/{template_type}/set_password"
token = default_token_generator.make_token(user)
_send_set_user_password_email_with_url.delay(
user.email, redirect_url, token, template
)
@app.task
def _send_set_user_password_email_with_url(
recipient_email, redirect_url, token, template_name
):
params = urlencode({"email": recipient_email, "token": token})
password_set_url = prepare_url(params, redirect_url)
_send_set_password_email(recipient_email, password_set_url, template_name)
def _send_set_password_email(recipient_email, password_set_url, template_name):
send_kwargs, ctx = get_email_context()
ctx["password_set_url"] = password_set_url
send_templated_mail(
template_name=template_name,
recipient_list=[recipient_email],
context=ctx,
**send_kwargs,
) | true | true |
1c3cf441982ae1444b25caea21aae44986c5c952 | 6,984 | py | Python | tests/policy/test_linear.py | nmasahiro/zr-obp | dde815dfe75fc6cc3c9ee6479d97db1e5567de6d | [
"Apache-2.0"
] | null | null | null | tests/policy/test_linear.py | nmasahiro/zr-obp | dde815dfe75fc6cc3c9ee6479d97db1e5567de6d | [
"Apache-2.0"
] | null | null | null | tests/policy/test_linear.py | nmasahiro/zr-obp | dde815dfe75fc6cc3c9ee6479d97db1e5567de6d | [
"Apache-2.0"
] | null | null | null | import pytest
import numpy as np
from obp.policy.linear import LinEpsilonGreedy
from obp.policy.linear import LinUCB
from obp.policy.linear import LinTS
def test_linear_base_exception():
# invalid dim
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=-3)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=0)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim="3")
# invalid n_actions
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=-3, dim=2)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=1, dim=2)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions="2", dim=2)
# invalid len_list
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, len_list=-3)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, len_list=0)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, len_list="3")
# invalid batch_size
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, batch_size=-2)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, batch_size=0)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, batch_size="10")
# invalid relationship between n_actions and len_list
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=5, len_list=10, dim=2)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, len_list=3, dim=2)
# invalid alpha and lambda
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, alpha_=0.0, lambda_=-3.0)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, alpha_=-0.0)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, lambda_=-1.0)
def test_lin_epsilon_normal_epsilon():
policy1 = LinEpsilonGreedy(n_actions=2, dim=2)
assert 0 <= policy1.epsilon <= 1
policy2 = LinEpsilonGreedy(n_actions=2, dim=2, epsilon=0.3)
assert policy2.epsilon == 0.3
def test_lin_epsilon_abnormal_epsilon():
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, epsilon=1.2)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, epsilon=-0.2)
def test_lin_epsilon_select_action_exploitation():
trial_num = 50
policy = LinEpsilonGreedy(n_actions=2, dim=2, epsilon=0.0)
context = np.array([1.0, 1.0]).reshape(1, -1)
policy.update_params(action=0, reward=1.0, context=context)
policy.update_params(action=0, reward=1.0, context=context)
policy.update_params(action=1, reward=1.0, context=context)
policy.update_params(action=1, reward=0.0, context=context)
for _ in range(trial_num):
assert policy.select_action(context=context)[0] == 0
def test_lin_epsilon_select_action_exploration():
trial_num = 50
policy = LinEpsilonGreedy(n_actions=2, dim=2, epsilon=1.0)
context = np.array([1.0, 1.0]).reshape(1, -1)
policy.update_params(action=0, reward=1.0, context=context)
policy.update_params(action=0, reward=1.0, context=context)
policy.update_params(action=1, reward=1.0, context=context)
policy.update_params(action=1, reward=0.0, context=context)
selected_action = [policy.select_action(context=context) for _ in range(trial_num)]
assert 0 < sum(selected_action)[0] < trial_num
def test_lin_epsilon_update_params():
# check the consistency with Sherman–Morrison formula
policy = LinEpsilonGreedy(n_actions=2, dim=2, epsilon=1.0)
action = 0
reward = 1.0
context = np.array([1, 0]).reshape(1, -1)
A_inv_temp = np.array([[1 / 2, 0], [0, 1]])
b_temp = np.array([1, 1])
policy.A_inv_temp[action] = np.copy(A_inv_temp)
policy.b_temp[:, action] = np.copy(b_temp)
policy.update_params(action=action, reward=reward, context=context)
next_A_inv = A_inv_temp - np.array([[1 / 4, 0], [0, 0]]) / (1 + 1 / 2)
next_b = b_temp + reward * context
assert np.allclose(policy.A_inv[action], next_A_inv)
assert np.allclose(policy.b[:, action], next_b)
def test_lin_ucb_initialize():
# note that the meaning of epsilon is different from that of LinEpsilonGreedy
with pytest.raises(ValueError):
LinUCB(n_actions=2, dim=2, epsilon=-0.2)
n_actions = 3
dim = 2
policy = LinUCB(n_actions=n_actions, dim=dim, epsilon=2.0)
assert policy.theta_hat.shape == (dim, n_actions)
assert policy.A_inv.shape == (n_actions, dim, dim)
assert policy.b.shape == (dim, n_actions)
assert policy.A_inv_temp.shape == (n_actions, dim, dim)
assert policy.b_temp.shape == (dim, n_actions)
def test_lin_ucb_select_action():
dim = 3
len_list = 2
policy = LinUCB(n_actions=4, dim=dim, len_list=2, epsilon=0.0)
context = np.ones(dim).reshape(1, -1)
action = policy.select_action(context=context)
assert len(action) == len_list
def test_lin_ucb_update_params():
# check the consistency with Sherman–Morrison formula
policy = LinUCB(n_actions=2, dim=2, epsilon=1.0)
action = 0
reward = 1.0
context = np.array([1, 0]).reshape(1, -1)
A_inv_temp = np.array([[1 / 2, 0], [0, 1]])
b_temp = np.array([1, 1])
policy.A_inv_temp[action] = np.copy(A_inv_temp)
policy.b_temp[:, action] = np.copy(b_temp)
policy.update_params(action=action, reward=reward, context=context)
next_A_inv = A_inv_temp - np.array([[1 / 4, 0], [0, 0]]) / (1 + 1 / 2)
next_b = b_temp + reward * context
assert np.allclose(policy.A_inv[action], next_A_inv)
assert np.allclose(policy.b[:, action], next_b)
def test_lin_ts_initialize():
n_actions = 3
dim = 2
policy = LinTS(n_actions=n_actions, dim=dim)
assert policy.A_inv.shape == (n_actions, dim, dim)
assert policy.b.shape == (dim, n_actions)
assert policy.A_inv_temp.shape == (n_actions, dim, dim)
assert policy.b_temp.shape == (dim, n_actions)
def test_lin_ts_select_action():
dim = 3
len_list = 2
policy = LinTS(n_actions=4, dim=dim, len_list=2)
context = np.ones(dim).reshape(1, -1)
action = policy.select_action(context=context)
assert len(action) == len_list
def test_lin_ts_update_params():
# check the consistency with Sherman–Morrison formula
policy = LinTS(n_actions=2, dim=2)
action = 0
reward = 1.0
context = np.array([1, 0]).reshape(1, -1)
A_inv_temp = np.array([[1 / 2, 0], [0, 1]])
b_temp = np.array([1, 1])
policy.A_inv_temp[action] = np.copy(A_inv_temp)
policy.b_temp[:, action] = np.copy(b_temp)
policy.update_params(action=action, reward=reward, context=context)
next_A_inv = A_inv_temp - np.array([[1 / 4, 0], [0, 0]]) / (1 + 1 / 2)
next_b = b_temp + reward * context
assert np.allclose(policy.A_inv[action], next_A_inv)
assert np.allclose(policy.b[:, action], next_b)
| 34.574257 | 87 | 0.683562 | import pytest
import numpy as np
from obp.policy.linear import LinEpsilonGreedy
from obp.policy.linear import LinUCB
from obp.policy.linear import LinTS
def test_linear_base_exception():
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=-3)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=0)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim="3")
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=-3, dim=2)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=1, dim=2)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions="2", dim=2)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, len_list=-3)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, len_list=0)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, len_list="3")
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, batch_size=-2)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, batch_size=0)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, batch_size="10")
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=5, len_list=10, dim=2)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, len_list=3, dim=2)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, alpha_=0.0, lambda_=-3.0)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, alpha_=-0.0)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, lambda_=-1.0)
def test_lin_epsilon_normal_epsilon():
policy1 = LinEpsilonGreedy(n_actions=2, dim=2)
assert 0 <= policy1.epsilon <= 1
policy2 = LinEpsilonGreedy(n_actions=2, dim=2, epsilon=0.3)
assert policy2.epsilon == 0.3
def test_lin_epsilon_abnormal_epsilon():
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, epsilon=1.2)
with pytest.raises(ValueError):
LinEpsilonGreedy(n_actions=2, dim=2, epsilon=-0.2)
def test_lin_epsilon_select_action_exploitation():
trial_num = 50
policy = LinEpsilonGreedy(n_actions=2, dim=2, epsilon=0.0)
context = np.array([1.0, 1.0]).reshape(1, -1)
policy.update_params(action=0, reward=1.0, context=context)
policy.update_params(action=0, reward=1.0, context=context)
policy.update_params(action=1, reward=1.0, context=context)
policy.update_params(action=1, reward=0.0, context=context)
for _ in range(trial_num):
assert policy.select_action(context=context)[0] == 0
def test_lin_epsilon_select_action_exploration():
trial_num = 50
policy = LinEpsilonGreedy(n_actions=2, dim=2, epsilon=1.0)
context = np.array([1.0, 1.0]).reshape(1, -1)
policy.update_params(action=0, reward=1.0, context=context)
policy.update_params(action=0, reward=1.0, context=context)
policy.update_params(action=1, reward=1.0, context=context)
policy.update_params(action=1, reward=0.0, context=context)
selected_action = [policy.select_action(context=context) for _ in range(trial_num)]
assert 0 < sum(selected_action)[0] < trial_num
def test_lin_epsilon_update_params():
policy = LinEpsilonGreedy(n_actions=2, dim=2, epsilon=1.0)
action = 0
reward = 1.0
context = np.array([1, 0]).reshape(1, -1)
A_inv_temp = np.array([[1 / 2, 0], [0, 1]])
b_temp = np.array([1, 1])
policy.A_inv_temp[action] = np.copy(A_inv_temp)
policy.b_temp[:, action] = np.copy(b_temp)
policy.update_params(action=action, reward=reward, context=context)
next_A_inv = A_inv_temp - np.array([[1 / 4, 0], [0, 0]]) / (1 + 1 / 2)
next_b = b_temp + reward * context
assert np.allclose(policy.A_inv[action], next_A_inv)
assert np.allclose(policy.b[:, action], next_b)
def test_lin_ucb_initialize():
with pytest.raises(ValueError):
LinUCB(n_actions=2, dim=2, epsilon=-0.2)
n_actions = 3
dim = 2
policy = LinUCB(n_actions=n_actions, dim=dim, epsilon=2.0)
assert policy.theta_hat.shape == (dim, n_actions)
assert policy.A_inv.shape == (n_actions, dim, dim)
assert policy.b.shape == (dim, n_actions)
assert policy.A_inv_temp.shape == (n_actions, dim, dim)
assert policy.b_temp.shape == (dim, n_actions)
def test_lin_ucb_select_action():
dim = 3
len_list = 2
policy = LinUCB(n_actions=4, dim=dim, len_list=2, epsilon=0.0)
context = np.ones(dim).reshape(1, -1)
action = policy.select_action(context=context)
assert len(action) == len_list
def test_lin_ucb_update_params():
policy = LinUCB(n_actions=2, dim=2, epsilon=1.0)
action = 0
reward = 1.0
context = np.array([1, 0]).reshape(1, -1)
A_inv_temp = np.array([[1 / 2, 0], [0, 1]])
b_temp = np.array([1, 1])
policy.A_inv_temp[action] = np.copy(A_inv_temp)
policy.b_temp[:, action] = np.copy(b_temp)
policy.update_params(action=action, reward=reward, context=context)
next_A_inv = A_inv_temp - np.array([[1 / 4, 0], [0, 0]]) / (1 + 1 / 2)
next_b = b_temp + reward * context
assert np.allclose(policy.A_inv[action], next_A_inv)
assert np.allclose(policy.b[:, action], next_b)
def test_lin_ts_initialize():
n_actions = 3
dim = 2
policy = LinTS(n_actions=n_actions, dim=dim)
assert policy.A_inv.shape == (n_actions, dim, dim)
assert policy.b.shape == (dim, n_actions)
assert policy.A_inv_temp.shape == (n_actions, dim, dim)
assert policy.b_temp.shape == (dim, n_actions)
def test_lin_ts_select_action():
dim = 3
len_list = 2
policy = LinTS(n_actions=4, dim=dim, len_list=2)
context = np.ones(dim).reshape(1, -1)
action = policy.select_action(context=context)
assert len(action) == len_list
def test_lin_ts_update_params():
policy = LinTS(n_actions=2, dim=2)
action = 0
reward = 1.0
context = np.array([1, 0]).reshape(1, -1)
A_inv_temp = np.array([[1 / 2, 0], [0, 1]])
b_temp = np.array([1, 1])
policy.A_inv_temp[action] = np.copy(A_inv_temp)
policy.b_temp[:, action] = np.copy(b_temp)
policy.update_params(action=action, reward=reward, context=context)
next_A_inv = A_inv_temp - np.array([[1 / 4, 0], [0, 0]]) / (1 + 1 / 2)
next_b = b_temp + reward * context
assert np.allclose(policy.A_inv[action], next_A_inv)
assert np.allclose(policy.b[:, action], next_b)
| true | true |
1c3cf4f2e8e52c39faa6724427da91d2ea0606a0 | 61 | py | Python | aoc_2020/inputs/__init__.py | n1ckdm/advent-of-code-2020 | 913ea4cff29fa76df15c0c22616cc1eebb903490 | [
"MIT"
] | 1 | 2020-12-05T09:25:03.000Z | 2020-12-05T09:25:03.000Z | aoc_2020/inputs/__init__.py | n1ckdm/advent-of-code-2020 | 913ea4cff29fa76df15c0c22616cc1eebb903490 | [
"MIT"
] | null | null | null | aoc_2020/inputs/__init__.py | n1ckdm/advent-of-code-2020 | 913ea4cff29fa76df15c0c22616cc1eebb903490 | [
"MIT"
] | null | null | null | from . import day1, day2, day3, day4, day5, day6, day7, day8
| 30.5 | 60 | 0.688525 | from . import day1, day2, day3, day4, day5, day6, day7, day8
| true | true |
1c3cf597b1f7c1c09621ac009f4e4888a73adaa7 | 3,938 | py | Python | floof/tests/functional/test_account.py | eevee/floof | 431bb9b13bcff2823dd1ef58ffea16ea5858d031 | [
"0BSD"
] | 2 | 2016-05-21T23:36:23.000Z | 2017-06-21T20:42:43.000Z | floof/tests/functional/test_account.py | silky/floof | 431bb9b13bcff2823dd1ef58ffea16ea5858d031 | [
"0BSD"
] | null | null | null | floof/tests/functional/test_account.py | silky/floof | 431bb9b13bcff2823dd1ef58ffea16ea5858d031 | [
"0BSD"
] | null | null | null | import time
from itertools import chain
from floof import model
from floof.tests import FunctionalTests
from floof.tests import sim
class TestAccount(FunctionalTests):
def setUp(self):
"""Creates a user to be used as a fake login."""
super(TestAccount, self).setUp()
self.user = sim.sim_user()
model.session.flush()
self.default_environ = {
'tests.user_id': self.user.id,
'tests.auth_openid_uid': self.user.id,
'tests.auth_openid_time': time.time(),
}
def test_login(self):
"""Test display of login page."""
response = self.app.get(
self.url('account.login'),
)
assert 'Log in or register' in response, 'Anticipated heading not found in login page.'
def test_logins(self):
"""Test logging in with various ``cert_auth` values, using mechanism overrides."""
externalids = [['openid']]
cert_with_others = [
['cert', 'openid'],
]
runsheet = dict(
disabled=[
('logged_out', ([], ['cert'])),
('logged_in', chain(externalids, cert_with_others)),
],
allowed=[
('logged_out', ([],)),
('logged_in', chain(['cert'], externalids, cert_with_others)),
],
sensitive_required=[
('logged_out', ([],)),
('logged_in', chain(['cert'], externalids, cert_with_others)),
],
required=[
('logged_out', chain([], externalids)),
('logged_in', chain(['cert'], cert_with_others)),
],
)
response = self.app.get(self.url('root'))
assert 'or register' in response, 'Page does not appear logged out even when no auth data should be present.'
user = model.session.query(model.User).filter_by(id=self.user.id).one()
assert len(user.certificates) == 1, 'Expected user to have exactly one certificate, found {0}. (Test setup error)'.format(len(user.certificates))
for cert_auth in runsheet:
user.cert_auth = cert_auth
model.session.flush()
for result, mech_combos in runsheet[cert_auth]:
for mech_combo in mech_combos:
if isinstance(mech_combo, basestring):
# XXX is there a more elegant way?
mech_combo = [mech_combo]
extra = sim.sim_user_env(self.user, *mech_combo)
response = self.app.post(self.url('account.logout'))
response = self.app.get(self.url('root'), extra_environ=extra)
if 'or register' in response:
assert result == 'logged_out', 'Wound up in state "logged_out", wanted "{0}", for cert_auth "{1}" with authed mechanisms: {2}'.format(result, cert_auth, mech_combo)
else:
assert result == 'logged_in', 'Wound up in state "logged_in", wanted "{0}", for cert_auth "{1}" with authed mechanisms: {2}'.format(result, cert_auth, mech_combo)
def test_login_cert_invalid(self):
"""Test automatic fallback to "allowed" if the user has no valid certs."""
user = model.session.query(model.User).filter_by(id=self.user.id).one()
user.cert_auth = u'required'
model.session.flush()
response = self.app.post(
self.url('account.logout'),
expect_errors=True,
)
response = self.app.get(
self.url('root'),
extra_environ={'tests.auth_openid_uid': self.user.id},
)
assert 'Hello, ' in response, 'Expected to be logged in, but do not appear to be.'
| 41.020833 | 188 | 0.538852 | import time
from itertools import chain
from floof import model
from floof.tests import FunctionalTests
from floof.tests import sim
class TestAccount(FunctionalTests):
def setUp(self):
super(TestAccount, self).setUp()
self.user = sim.sim_user()
model.session.flush()
self.default_environ = {
'tests.user_id': self.user.id,
'tests.auth_openid_uid': self.user.id,
'tests.auth_openid_time': time.time(),
}
def test_login(self):
response = self.app.get(
self.url('account.login'),
)
assert 'Log in or register' in response, 'Anticipated heading not found in login page.'
def test_logins(self):
externalids = [['openid']]
cert_with_others = [
['cert', 'openid'],
]
runsheet = dict(
disabled=[
('logged_out', ([], ['cert'])),
('logged_in', chain(externalids, cert_with_others)),
],
allowed=[
('logged_out', ([],)),
('logged_in', chain(['cert'], externalids, cert_with_others)),
],
sensitive_required=[
('logged_out', ([],)),
('logged_in', chain(['cert'], externalids, cert_with_others)),
],
required=[
('logged_out', chain([], externalids)),
('logged_in', chain(['cert'], cert_with_others)),
],
)
response = self.app.get(self.url('root'))
assert 'or register' in response, 'Page does not appear logged out even when no auth data should be present.'
user = model.session.query(model.User).filter_by(id=self.user.id).one()
assert len(user.certificates) == 1, 'Expected user to have exactly one certificate, found {0}. (Test setup error)'.format(len(user.certificates))
for cert_auth in runsheet:
user.cert_auth = cert_auth
model.session.flush()
for result, mech_combos in runsheet[cert_auth]:
for mech_combo in mech_combos:
if isinstance(mech_combo, basestring):
mech_combo = [mech_combo]
extra = sim.sim_user_env(self.user, *mech_combo)
response = self.app.post(self.url('account.logout'))
response = self.app.get(self.url('root'), extra_environ=extra)
if 'or register' in response:
assert result == 'logged_out', 'Wound up in state "logged_out", wanted "{0}", for cert_auth "{1}" with authed mechanisms: {2}'.format(result, cert_auth, mech_combo)
else:
assert result == 'logged_in', 'Wound up in state "logged_in", wanted "{0}", for cert_auth "{1}" with authed mechanisms: {2}'.format(result, cert_auth, mech_combo)
def test_login_cert_invalid(self):
user = model.session.query(model.User).filter_by(id=self.user.id).one()
user.cert_auth = u'required'
model.session.flush()
response = self.app.post(
self.url('account.logout'),
expect_errors=True,
)
response = self.app.get(
self.url('root'),
extra_environ={'tests.auth_openid_uid': self.user.id},
)
assert 'Hello, ' in response, 'Expected to be logged in, but do not appear to be.'
| true | true |
1c3cf65bca8b32ffdc7f5648703f439cc1fcef05 | 1,142 | py | Python | tests/hooks/test_itl.py | KevinMusgrave/pytorch-adapt | ff1491e1bfcc586afb8ee619712c8816ddf10358 | [
"MIT"
] | 131 | 2021-11-19T14:29:59.000Z | 2022-03-26T16:33:30.000Z | tests/hooks/test_itl.py | KevinMusgrave/pytorch-adapt | ff1491e1bfcc586afb8ee619712c8816ddf10358 | [
"MIT"
] | 28 | 2021-11-19T17:31:56.000Z | 2022-03-02T18:42:51.000Z | tests/hooks/test_itl.py | KevinMusgrave/pytorch-adapt | ff1491e1bfcc586afb8ee619712c8816ddf10358 | [
"MIT"
] | 8 | 2021-11-22T17:22:45.000Z | 2022-03-08T15:49:28.000Z | import unittest
import torch
from pytorch_adapt.hooks import ISTLossHook
from pytorch_adapt.layers import ISTLoss
from .utils import assertRequiresGrad, get_models_and_data
class TestITL(unittest.TestCase):
def test_ist_loss_hook(self):
torch.manual_seed(334)
h = ISTLossHook()
(
G,
_,
_,
src_imgs,
_,
target_imgs,
src_domain,
target_domain,
) = get_models_and_data()
outputs, losses = h(locals())
self.assertTrue(G.count == 2)
assertRequiresGrad(self, outputs)
outputs, losses2 = h({**locals(), **outputs})
assertRequiresGrad(self, outputs)
self.assertTrue(G.count == 2)
self.assertTrue(losses == losses2)
src_features = G(src_imgs)
target_features = G(target_imgs)
loss_fn = ISTLoss()
self.assertTrue(
losses["ist_loss"]
== loss_fn(
torch.cat([src_features, target_features], dim=0),
torch.cat([src_domain, target_domain], dim=0),
)
)
| 24.826087 | 66 | 0.56655 | import unittest
import torch
from pytorch_adapt.hooks import ISTLossHook
from pytorch_adapt.layers import ISTLoss
from .utils import assertRequiresGrad, get_models_and_data
class TestITL(unittest.TestCase):
def test_ist_loss_hook(self):
torch.manual_seed(334)
h = ISTLossHook()
(
G,
_,
_,
src_imgs,
_,
target_imgs,
src_domain,
target_domain,
) = get_models_and_data()
outputs, losses = h(locals())
self.assertTrue(G.count == 2)
assertRequiresGrad(self, outputs)
outputs, losses2 = h({**locals(), **outputs})
assertRequiresGrad(self, outputs)
self.assertTrue(G.count == 2)
self.assertTrue(losses == losses2)
src_features = G(src_imgs)
target_features = G(target_imgs)
loss_fn = ISTLoss()
self.assertTrue(
losses["ist_loss"]
== loss_fn(
torch.cat([src_features, target_features], dim=0),
torch.cat([src_domain, target_domain], dim=0),
)
)
| true | true |
1c3cf6e8c976e83bf88e7aa845b5902975dca457 | 12,271 | py | Python | pypy3.6/multiprocess/synchronize.py | UniverseFly/multiprocess | 97f67493eccfb893ac1bba7285cf452bfc640211 | [
"BSD-3-Clause"
] | 356 | 2015-06-21T21:05:10.000Z | 2022-03-30T11:57:08.000Z | pypy3.6/multiprocess/synchronize.py | UniverseFly/multiprocess | 97f67493eccfb893ac1bba7285cf452bfc640211 | [
"BSD-3-Clause"
] | 103 | 2015-06-22T01:44:14.000Z | 2022-03-01T03:44:25.000Z | pypy3.6/multiprocess/synchronize.py | UniverseFly/multiprocess | 97f67493eccfb893ac1bba7285cf452bfc640211 | [
"BSD-3-Clause"
] | 72 | 2015-09-02T14:10:24.000Z | 2022-03-25T06:49:43.000Z | #
# Module implementing synchronization primitives
#
# multiprocessing/synchronize.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event'
]
import threading
import sys
import tempfile
try:
import _multiprocess as _multiprocessing
except ImportError:
import _multiprocessing
import time
from . import context
from . import process
from . import util
# Try to import the mp.synchronize module cleanly, if it fails
# raise ImportError for platforms lacking a working sem_open implementation.
# See issue 3770
try:
from _multiprocess import SemLock, sem_unlink
except (ImportError):
try:
from _multiprocessing import SemLock, sem_unlink
except (ImportError):
raise ImportError("This platform lacks a functioning sem_open" +
" implementation, therefore, the required" +
" synchronization primitives needed will not" +
" function, see issue 3770.")
#
# Constants
#
RECURSIVE_MUTEX, SEMAPHORE = list(range(2))
SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
#
# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
#
class SemLock(object):
_rand = tempfile._RandomNameSequence()
def __init__(self, kind, value, maxvalue, *, ctx):
if ctx is None:
ctx = context._default_context.get_context()
name = ctx.get_start_method()
unlink_now = sys.platform == 'win32' or name == 'fork'
for i in range(100):
try:
sl = self._semlock = _multiprocessing.SemLock(
kind, value, maxvalue, self._make_name(),
unlink_now)
except FileExistsError:
pass
else:
break
else:
raise FileExistsError('cannot find name for semaphore')
util.debug('created semlock with handle %s' % sl.handle)
self._make_methods()
if sys.platform != 'win32':
def _after_fork(obj):
obj._semlock._after_fork()
util.register_after_fork(self, _after_fork)
if self._semlock.name is not None:
# We only get here if we are on Unix with forking
# disabled. When the object is garbage collected or the
# process shuts down we unlink the semaphore name
from .semaphore_tracker import register
register(self._semlock.name)
util.Finalize(self, SemLock._cleanup, (self._semlock.name,),
exitpriority=0)
@staticmethod
def _cleanup(name):
from .semaphore_tracker import unregister
sem_unlink(name)
unregister(name)
def _make_methods(self):
self.acquire = self._semlock.acquire
self.release = self._semlock.release
def __enter__(self):
return self._semlock.__enter__()
def __exit__(self, *args):
return self._semlock.__exit__(*args)
def __getstate__(self):
context.assert_spawning(self)
sl = self._semlock
if sys.platform == 'win32':
h = context.get_spawning_popen().duplicate_for_child(sl.handle)
else:
h = sl.handle
return (h, sl.kind, sl.maxvalue, sl.name)
def __setstate__(self, state):
self._semlock = _multiprocessing.SemLock._rebuild(*state)
util.debug('recreated blocker with handle %r' % state[0])
self._make_methods()
@staticmethod
def _make_name():
return '%s-%s' % (process.current_process()._config['semprefix'],
next(SemLock._rand))
#
# Semaphore
#
class Semaphore(SemLock):
def __init__(self, value=1, *, ctx):
SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx)
def get_value(self):
return self._semlock._get_value()
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<%s(value=%s)>' % (self.__class__.__name__, value)
#
# Bounded semaphore
#
class BoundedSemaphore(Semaphore):
def __init__(self, value=1, *, ctx):
SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx)
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<%s(value=%s, maxvalue=%s)>' % \
(self.__class__.__name__, value, self._semlock.maxvalue)
#
# Non-recursive lock
#
class Lock(SemLock):
def __init__(self, *, ctx):
SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx)
def __repr__(self):
try:
if self._semlock._is_mine():
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
elif self._semlock._get_value() == 1:
name = 'None'
elif self._semlock._count() > 0:
name = 'SomeOtherThread'
else:
name = 'SomeOtherProcess'
except Exception:
name = 'unknown'
return '<%s(owner=%s)>' % (self.__class__.__name__, name)
#
# Recursive lock
#
class RLock(SemLock):
def __init__(self, *, ctx):
SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx)
def __repr__(self):
try:
if self._semlock._is_mine():
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
count = self._semlock._count()
elif self._semlock._get_value() == 1:
name, count = 'None', 0
elif self._semlock._count() > 0:
name, count = 'SomeOtherThread', 'nonzero'
else:
name, count = 'SomeOtherProcess', 'nonzero'
except Exception:
name, count = 'unknown', 'unknown'
return '<%s(%s, %s)>' % (self.__class__.__name__, name, count)
#
# Condition variable
#
class Condition(object):
def __init__(self, lock=None, *, ctx):
self._lock = lock or ctx.RLock()
self._sleeping_count = ctx.Semaphore(0)
self._woken_count = ctx.Semaphore(0)
self._wait_semaphore = ctx.Semaphore(0)
self._make_methods()
def __getstate__(self):
context.assert_spawning(self)
return (self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore)
def __setstate__(self, state):
(self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore) = state
self._make_methods()
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def _make_methods(self):
self.acquire = self._lock.acquire
self.release = self._lock.release
def __repr__(self):
try:
num_waiters = (self._sleeping_count._semlock._get_value() -
self._woken_count._semlock._get_value())
except Exception:
num_waiters = 'unknown'
return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters)
def wait(self, timeout=None):
assert self._lock._semlock._is_mine(), \
'must acquire() condition before using wait()'
# indicate that this thread is going to sleep
self._sleeping_count.release()
# release lock
count = self._lock._semlock._count()
for i in range(count):
self._lock.release()
try:
# wait for notification or timeout
return self._wait_semaphore.acquire(True, timeout)
finally:
# indicate that this thread has woken
self._woken_count.release()
# reacquire lock
for i in range(count):
self._lock.acquire()
def notify(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
if self._sleeping_count.acquire(False): # try grabbing a sleeper
self._wait_semaphore.release() # wake up one sleeper
self._woken_count.acquire() # wait for the sleeper to wake
# rezero _wait_semaphore in case a timeout just happened
self._wait_semaphore.acquire(False)
def notify_all(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify*() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
sleepers = 0
while self._sleeping_count.acquire(False):
self._wait_semaphore.release() # wake up one sleeper
sleepers += 1
if sleepers:
for i in range(sleepers):
self._woken_count.acquire() # wait for a sleeper to wake
# rezero wait_semaphore in case some timeouts just happened
while self._wait_semaphore.acquire(False):
pass
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = getattr(time,'monotonic',time.time)() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - getattr(time,'monotonic',time.time)()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
#
# Event
#
class Event(object):
def __init__(self, *, ctx):
self._cond = ctx.Condition(ctx.Lock())
self._flag = ctx.Semaphore(0)
def is_set(self):
with self._cond:
if self._flag.acquire(False):
self._flag.release()
return True
return False
def set(self):
with self._cond:
self._flag.acquire(False)
self._flag.release()
self._cond.notify_all()
def clear(self):
with self._cond:
self._flag.acquire(False)
def wait(self, timeout=None):
with self._cond:
if self._flag.acquire(False):
self._flag.release()
else:
self._cond.wait(timeout)
if self._flag.acquire(False):
self._flag.release()
return True
return False
#
# Barrier
#
class Barrier(threading.Barrier):
def __init__(self, parties, action=None, timeout=None, *, ctx):
import struct
from .heap import BufferWrapper
wrapper = BufferWrapper(struct.calcsize('i') * 2)
cond = ctx.Condition()
self.__setstate__((parties, action, timeout, cond, wrapper))
self._state = 0
self._count = 0
def __setstate__(self, state):
(self._parties, self._action, self._timeout,
self._cond, self._wrapper) = state
self._array = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._parties, self._action, self._timeout,
self._cond, self._wrapper)
@property
def _state(self):
return self._array[0]
@_state.setter
def _state(self, value):
self._array[0] = value
@property
def _count(self):
return self._array[1]
@_count.setter
def _count(self, value):
self._array[1] = value
| 29.783981 | 82 | 0.593106 |
__all__ = [
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event'
]
import threading
import sys
import tempfile
try:
import _multiprocess as _multiprocessing
except ImportError:
import _multiprocessing
import time
from . import context
from . import process
from . import util
try:
from _multiprocess import SemLock, sem_unlink
except (ImportError):
try:
from _multiprocessing import SemLock, sem_unlink
except (ImportError):
raise ImportError("This platform lacks a functioning sem_open" +
" implementation, therefore, the required" +
" synchronization primitives needed will not" +
" function, see issue 3770.")
RECURSIVE_MUTEX, SEMAPHORE = list(range(2))
SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
class SemLock(object):
_rand = tempfile._RandomNameSequence()
def __init__(self, kind, value, maxvalue, *, ctx):
if ctx is None:
ctx = context._default_context.get_context()
name = ctx.get_start_method()
unlink_now = sys.platform == 'win32' or name == 'fork'
for i in range(100):
try:
sl = self._semlock = _multiprocessing.SemLock(
kind, value, maxvalue, self._make_name(),
unlink_now)
except FileExistsError:
pass
else:
break
else:
raise FileExistsError('cannot find name for semaphore')
util.debug('created semlock with handle %s' % sl.handle)
self._make_methods()
if sys.platform != 'win32':
def _after_fork(obj):
obj._semlock._after_fork()
util.register_after_fork(self, _after_fork)
if self._semlock.name is not None:
from .semaphore_tracker import register
register(self._semlock.name)
util.Finalize(self, SemLock._cleanup, (self._semlock.name,),
exitpriority=0)
@staticmethod
def _cleanup(name):
from .semaphore_tracker import unregister
sem_unlink(name)
unregister(name)
def _make_methods(self):
self.acquire = self._semlock.acquire
self.release = self._semlock.release
def __enter__(self):
return self._semlock.__enter__()
def __exit__(self, *args):
return self._semlock.__exit__(*args)
def __getstate__(self):
context.assert_spawning(self)
sl = self._semlock
if sys.platform == 'win32':
h = context.get_spawning_popen().duplicate_for_child(sl.handle)
else:
h = sl.handle
return (h, sl.kind, sl.maxvalue, sl.name)
def __setstate__(self, state):
self._semlock = _multiprocessing.SemLock._rebuild(*state)
util.debug('recreated blocker with handle %r' % state[0])
self._make_methods()
@staticmethod
def _make_name():
return '%s-%s' % (process.current_process()._config['semprefix'],
next(SemLock._rand))
class Semaphore(SemLock):
def __init__(self, value=1, *, ctx):
SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx)
def get_value(self):
return self._semlock._get_value()
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<%s(value=%s)>' % (self.__class__.__name__, value)
class BoundedSemaphore(Semaphore):
def __init__(self, value=1, *, ctx):
SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx)
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<%s(value=%s, maxvalue=%s)>' % \
(self.__class__.__name__, value, self._semlock.maxvalue)
class Lock(SemLock):
def __init__(self, *, ctx):
SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx)
def __repr__(self):
try:
if self._semlock._is_mine():
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
elif self._semlock._get_value() == 1:
name = 'None'
elif self._semlock._count() > 0:
name = 'SomeOtherThread'
else:
name = 'SomeOtherProcess'
except Exception:
name = 'unknown'
return '<%s(owner=%s)>' % (self.__class__.__name__, name)
class RLock(SemLock):
def __init__(self, *, ctx):
SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx)
def __repr__(self):
try:
if self._semlock._is_mine():
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
count = self._semlock._count()
elif self._semlock._get_value() == 1:
name, count = 'None', 0
elif self._semlock._count() > 0:
name, count = 'SomeOtherThread', 'nonzero'
else:
name, count = 'SomeOtherProcess', 'nonzero'
except Exception:
name, count = 'unknown', 'unknown'
return '<%s(%s, %s)>' % (self.__class__.__name__, name, count)
class Condition(object):
def __init__(self, lock=None, *, ctx):
self._lock = lock or ctx.RLock()
self._sleeping_count = ctx.Semaphore(0)
self._woken_count = ctx.Semaphore(0)
self._wait_semaphore = ctx.Semaphore(0)
self._make_methods()
def __getstate__(self):
context.assert_spawning(self)
return (self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore)
def __setstate__(self, state):
(self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore) = state
self._make_methods()
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def _make_methods(self):
self.acquire = self._lock.acquire
self.release = self._lock.release
def __repr__(self):
try:
num_waiters = (self._sleeping_count._semlock._get_value() -
self._woken_count._semlock._get_value())
except Exception:
num_waiters = 'unknown'
return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters)
def wait(self, timeout=None):
assert self._lock._semlock._is_mine(), \
'must acquire() condition before using wait()'
self._sleeping_count.release()
count = self._lock._semlock._count()
for i in range(count):
self._lock.release()
try:
return self._wait_semaphore.acquire(True, timeout)
finally:
self._woken_count.release()
for i in range(count):
self._lock.acquire()
def notify(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
if self._sleeping_count.acquire(False):
self._wait_semaphore.release()
self._woken_count.acquire()
self._wait_semaphore.acquire(False)
def notify_all(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
sleepers = 0
while self._sleeping_count.acquire(False):
self._wait_semaphore.release()
sleepers += 1
if sleepers:
for i in range(sleepers):
self._woken_count.acquire()
while self._wait_semaphore.acquire(False):
pass
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = getattr(time,'monotonic',time.time)() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - getattr(time,'monotonic',time.time)()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
class Event(object):
def __init__(self, *, ctx):
self._cond = ctx.Condition(ctx.Lock())
self._flag = ctx.Semaphore(0)
def is_set(self):
with self._cond:
if self._flag.acquire(False):
self._flag.release()
return True
return False
def set(self):
with self._cond:
self._flag.acquire(False)
self._flag.release()
self._cond.notify_all()
def clear(self):
with self._cond:
self._flag.acquire(False)
def wait(self, timeout=None):
with self._cond:
if self._flag.acquire(False):
self._flag.release()
else:
self._cond.wait(timeout)
if self._flag.acquire(False):
self._flag.release()
return True
return False
class Barrier(threading.Barrier):
def __init__(self, parties, action=None, timeout=None, *, ctx):
import struct
from .heap import BufferWrapper
wrapper = BufferWrapper(struct.calcsize('i') * 2)
cond = ctx.Condition()
self.__setstate__((parties, action, timeout, cond, wrapper))
self._state = 0
self._count = 0
def __setstate__(self, state):
(self._parties, self._action, self._timeout,
self._cond, self._wrapper) = state
self._array = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._parties, self._action, self._timeout,
self._cond, self._wrapper)
@property
def _state(self):
return self._array[0]
@_state.setter
def _state(self, value):
self._array[0] = value
@property
def _count(self):
return self._array[1]
@_count.setter
def _count(self, value):
self._array[1] = value
| true | true |
1c3cf75ca61656324cb0c4e896f264b63e114061 | 660 | py | Python | bcbio/chipseq/__init__.py | YTLogos/bcbio-nextgen | f964a25ab74a31551273b7e50518f3451c90f473 | [
"MIT"
] | 1 | 2019-08-29T07:55:48.000Z | 2019-08-29T07:55:48.000Z | bcbio/chipseq/__init__.py | YTLogos/bcbio-nextgen | f964a25ab74a31551273b7e50518f3451c90f473 | [
"MIT"
] | null | null | null | bcbio/chipseq/__init__.py | YTLogos/bcbio-nextgen | f964a25ab74a31551273b7e50518f3451c90f473 | [
"MIT"
] | null | null | null | from bcbio.ngsalign.bowtie2 import filter_multimappers
import bcbio.pipeline.datadict as dd
from bcbio.log import logger
def clean_chipseq_alignment(data):
aligner = dd.get_aligner(data)
data["raw_bam"] = dd.get_work_bam(data)
if aligner:
assert aligner == "bowtie2", "ChIP-seq only supported for bowtie2."
unique_bam = filter_multimappers(dd.get_work_bam(data), data)
data["work_bam"] = unique_bam
else:
logger.info("Warning: When BAM file is given as input, bcbio skips multimappers removal."
"If BAM is not cleaned for peak calling, can result in downstream errors.")
return [[data]]
| 41.25 | 97 | 0.7 | from bcbio.ngsalign.bowtie2 import filter_multimappers
import bcbio.pipeline.datadict as dd
from bcbio.log import logger
def clean_chipseq_alignment(data):
aligner = dd.get_aligner(data)
data["raw_bam"] = dd.get_work_bam(data)
if aligner:
assert aligner == "bowtie2", "ChIP-seq only supported for bowtie2."
unique_bam = filter_multimappers(dd.get_work_bam(data), data)
data["work_bam"] = unique_bam
else:
logger.info("Warning: When BAM file is given as input, bcbio skips multimappers removal."
"If BAM is not cleaned for peak calling, can result in downstream errors.")
return [[data]]
| true | true |
1c3cf765e59820e4e35f7d112297a7888f12c48a | 1,029 | py | Python | plotting/crs_benchmark.py | AlonFischer/SpatialDatabaseBench | 1fe933bd4196ba17c687f04c37cb5a34acc6d824 | [
"Apache-2.0"
] | 1 | 2020-11-17T22:56:56.000Z | 2020-11-17T22:56:56.000Z | plotting/crs_benchmark.py | AlonFischer/SpatialDatabaseBench | 1fe933bd4196ba17c687f04c37cb5a34acc6d824 | [
"Apache-2.0"
] | null | null | null | plotting/crs_benchmark.py | AlonFischer/SpatialDatabaseBench | 1fe933bd4196ba17c687f04c37cb5a34acc6d824 | [
"Apache-2.0"
] | null | null | null | import argparse
import json
import logging
from bar_chart import create_bar_chart
logger = logging.getLogger(__name__)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('mode', metavar='M', type=str,
choices=['join', 'analysis'],
help='Constrains which benchmarks are run')
args = parser.parse_args()
output_file = f"{args.mode}_crs_benchmark"
data_files = [f"{args.mode}_benchmark_pg_index_GIST.json",
f"{args.mode}_benchmark_pg_index_GIST_gcs.json"]
benchmark_data = {}
for data_file in data_files:
with open(f"results/{data_file}", 'r') as file:
benchmark_data.update(json.loads(file.read()))
logger.info(benchmark_data)
create_bar_chart(benchmark_data, "Time to Run Query With PCS and GCS",
"Seconds", f"figures/{output_file}.png", yscale='log', fig_size=(15, 5))
| 35.482759 | 93 | 0.662779 | import argparse
import json
import logging
from bar_chart import create_bar_chart
logger = logging.getLogger(__name__)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('mode', metavar='M', type=str,
choices=['join', 'analysis'],
help='Constrains which benchmarks are run')
args = parser.parse_args()
output_file = f"{args.mode}_crs_benchmark"
data_files = [f"{args.mode}_benchmark_pg_index_GIST.json",
f"{args.mode}_benchmark_pg_index_GIST_gcs.json"]
benchmark_data = {}
for data_file in data_files:
with open(f"results/{data_file}", 'r') as file:
benchmark_data.update(json.loads(file.read()))
logger.info(benchmark_data)
create_bar_chart(benchmark_data, "Time to Run Query With PCS and GCS",
"Seconds", f"figures/{output_file}.png", yscale='log', fig_size=(15, 5))
| true | true |
1c3cf80c7576baed4214a843ac7bd37860718704 | 5,831 | py | Python | tacker-1.0.0/tacker/tests/functional/vnfm/test_tosca_vnf_alarm.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | null | null | null | tacker-1.0.0/tacker/tests/functional/vnfm/test_tosca_vnf_alarm.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | tacker-1.0.0/tacker/tests/functional/vnfm/test_tosca_vnf_alarm.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | #
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_serialization import jsonutils
from tacker.plugins.common import constants as evt_constants
from tacker.tests import constants
from tacker.tests.functional import base
from tacker.tests.utils import read_file
import yaml
class VnfTestAlarmMonitor(base.BaseTackerTest):
def _test_vnf_tosca_alarm(self, vnfd_file, vnf_name):
input_yaml = read_file(vnfd_file)
tosca_dict = yaml.safe_load(input_yaml)
tosca_arg = {'vnfd': {'name': vnf_name,
'attributes': {'vnfd': tosca_dict}}}
# Create vnfd with tosca template
vnfd_instance = self.client.create_vnfd(body=tosca_arg)
self.assertIsNotNone(vnfd_instance)
# Create vnf with vnfd_id
vnfd_id = vnfd_instance['vnfd']['id']
vnf_arg = {'vnf': {'vnfd_id': vnfd_id, 'name': vnf_name}}
vnf_instance = self.client.create_vnf(body=vnf_arg)
self.validate_vnf_instance(vnfd_instance, vnf_instance)
vnf_id = vnf_instance['vnf']['id']
def _waiting_time(count):
self.wait_until_vnf_active(
vnf_id,
constants.VNF_CIRROS_CREATE_TIMEOUT,
constants.ACTIVE_SLEEP_TIME)
vnf = self.client.show_vnf(vnf_id)['vnf']
# {"VDU1": ["10.0.0.14", "10.0.0.5"]}
self.assertEqual(count, len(jsonutils.loads(vnf[
'mgmt_ip_address'])['VDU1']))
def _inject_monitoring_policy(vnfd_dict):
polices = vnfd_dict['topology_template'].get('policies', [])
mon_policy = dict()
for policy_dict in polices:
for name, policy in policy_dict.items():
if policy['type'] == constants.POLICY_ALARMING:
triggers = policy['triggers']
for trigger_name, trigger_dict in triggers.items():
policy_action_list = trigger_dict['action']
for policy_action_name in policy_action_list:
mon_policy[trigger_name] = policy_action_name
return mon_policy
def verify_policy(policy_dict, kw_policy):
for name, action in policy_dict.items():
if kw_policy in name:
return name
# trigger alarm
monitoring_policy = _inject_monitoring_policy(tosca_dict)
for mon_policy_name, mon_policy_action in monitoring_policy.items():
if mon_policy_action in constants.DEFAULT_ALARM_ACTIONS:
self.wait_until_vnf_active(
vnf_id,
constants.VNF_CIRROS_CREATE_TIMEOUT,
constants.ACTIVE_SLEEP_TIME)
self.trigger_vnf(vnf_id, mon_policy_name, mon_policy_action)
else:
if 'scaling_out' in mon_policy_name:
_waiting_time(2)
time.sleep(constants.SCALE_WINDOW_SLEEP_TIME)
# scaling-out backend action
scaling_out_action = mon_policy_action + '-out'
self.trigger_vnf(
vnf_id, mon_policy_name, scaling_out_action)
_waiting_time(3)
scaling_in_name = verify_policy(monitoring_policy,
kw_policy='scaling_in')
if scaling_in_name:
time.sleep(constants.SCALE_WINDOW_SLEEP_TIME)
# scaling-in backend action
scaling_in_action = mon_policy_action + '-in'
self.trigger_vnf(
vnf_id, scaling_in_name, scaling_in_action)
_waiting_time(2)
self.verify_vnf_crud_events(
vnf_id, evt_constants.RES_EVT_SCALE,
evt_constants.ACTIVE, cnt=2)
self.verify_vnf_crud_events(
vnf_id, evt_constants.RES_EVT_SCALE,
evt_constants.PENDING_SCALE_OUT, cnt=1)
self.verify_vnf_crud_events(
vnf_id, evt_constants.RES_EVT_SCALE,
evt_constants.PENDING_SCALE_IN, cnt=1)
# Delete vnf_instance with vnf_id
try:
self.client.delete_vnf(vnf_id)
except Exception:
assert False, ("Failed to delete vnf %s after the monitor test" %
vnf_id)
# Verify VNF monitor events captured for states, ACTIVE and DEAD
vnf_state_list = [evt_constants.ACTIVE, evt_constants.DEAD]
self.verify_vnf_monitor_events(vnf_id, vnf_state_list)
# Delete vnfd_instance
self.addCleanup(self.client.delete_vnfd, vnfd_id)
self.addCleanup(self.wait_until_vnf_delete, vnf_id,
constants.VNF_CIRROS_DELETE_TIMEOUT)
def test_vnf_alarm_respawn(self):
self._test_vnf_tosca_alarm(
'sample-tosca-alarm-respawn.yaml',
'alarm and respawn-vnf')
def test_vnf_alarm_scale(self):
self._test_vnf_tosca_alarm(
'sample-tosca-alarm-scale.yaml',
'alarm and scale vnf')
| 41.65 | 78 | 0.595953 |
import time
from oslo_serialization import jsonutils
from tacker.plugins.common import constants as evt_constants
from tacker.tests import constants
from tacker.tests.functional import base
from tacker.tests.utils import read_file
import yaml
class VnfTestAlarmMonitor(base.BaseTackerTest):
def _test_vnf_tosca_alarm(self, vnfd_file, vnf_name):
input_yaml = read_file(vnfd_file)
tosca_dict = yaml.safe_load(input_yaml)
tosca_arg = {'vnfd': {'name': vnf_name,
'attributes': {'vnfd': tosca_dict}}}
vnfd_instance = self.client.create_vnfd(body=tosca_arg)
self.assertIsNotNone(vnfd_instance)
vnfd_id = vnfd_instance['vnfd']['id']
vnf_arg = {'vnf': {'vnfd_id': vnfd_id, 'name': vnf_name}}
vnf_instance = self.client.create_vnf(body=vnf_arg)
self.validate_vnf_instance(vnfd_instance, vnf_instance)
vnf_id = vnf_instance['vnf']['id']
def _waiting_time(count):
self.wait_until_vnf_active(
vnf_id,
constants.VNF_CIRROS_CREATE_TIMEOUT,
constants.ACTIVE_SLEEP_TIME)
vnf = self.client.show_vnf(vnf_id)['vnf']
self.assertEqual(count, len(jsonutils.loads(vnf[
'mgmt_ip_address'])['VDU1']))
def _inject_monitoring_policy(vnfd_dict):
polices = vnfd_dict['topology_template'].get('policies', [])
mon_policy = dict()
for policy_dict in polices:
for name, policy in policy_dict.items():
if policy['type'] == constants.POLICY_ALARMING:
triggers = policy['triggers']
for trigger_name, trigger_dict in triggers.items():
policy_action_list = trigger_dict['action']
for policy_action_name in policy_action_list:
mon_policy[trigger_name] = policy_action_name
return mon_policy
def verify_policy(policy_dict, kw_policy):
for name, action in policy_dict.items():
if kw_policy in name:
return name
monitoring_policy = _inject_monitoring_policy(tosca_dict)
for mon_policy_name, mon_policy_action in monitoring_policy.items():
if mon_policy_action in constants.DEFAULT_ALARM_ACTIONS:
self.wait_until_vnf_active(
vnf_id,
constants.VNF_CIRROS_CREATE_TIMEOUT,
constants.ACTIVE_SLEEP_TIME)
self.trigger_vnf(vnf_id, mon_policy_name, mon_policy_action)
else:
if 'scaling_out' in mon_policy_name:
_waiting_time(2)
time.sleep(constants.SCALE_WINDOW_SLEEP_TIME)
scaling_out_action = mon_policy_action + '-out'
self.trigger_vnf(
vnf_id, mon_policy_name, scaling_out_action)
_waiting_time(3)
scaling_in_name = verify_policy(monitoring_policy,
kw_policy='scaling_in')
if scaling_in_name:
time.sleep(constants.SCALE_WINDOW_SLEEP_TIME)
scaling_in_action = mon_policy_action + '-in'
self.trigger_vnf(
vnf_id, scaling_in_name, scaling_in_action)
_waiting_time(2)
self.verify_vnf_crud_events(
vnf_id, evt_constants.RES_EVT_SCALE,
evt_constants.ACTIVE, cnt=2)
self.verify_vnf_crud_events(
vnf_id, evt_constants.RES_EVT_SCALE,
evt_constants.PENDING_SCALE_OUT, cnt=1)
self.verify_vnf_crud_events(
vnf_id, evt_constants.RES_EVT_SCALE,
evt_constants.PENDING_SCALE_IN, cnt=1)
try:
self.client.delete_vnf(vnf_id)
except Exception:
assert False, ("Failed to delete vnf %s after the monitor test" %
vnf_id)
vnf_state_list = [evt_constants.ACTIVE, evt_constants.DEAD]
self.verify_vnf_monitor_events(vnf_id, vnf_state_list)
self.addCleanup(self.client.delete_vnfd, vnfd_id)
self.addCleanup(self.wait_until_vnf_delete, vnf_id,
constants.VNF_CIRROS_DELETE_TIMEOUT)
def test_vnf_alarm_respawn(self):
self._test_vnf_tosca_alarm(
'sample-tosca-alarm-respawn.yaml',
'alarm and respawn-vnf')
def test_vnf_alarm_scale(self):
self._test_vnf_tosca_alarm(
'sample-tosca-alarm-scale.yaml',
'alarm and scale vnf')
| true | true |
1c3cf842406fc99a73b9ac61e512db5b70718811 | 2,563 | py | Python | mcarch/views/user.py | Scotsguy/MCArchive | 89847bab722c6782fa53c7b11ee83f1f5b2d9f05 | [
"MIT"
] | null | null | null | mcarch/views/user.py | Scotsguy/MCArchive | 89847bab722c6782fa53c7b11ee83f1f5b2d9f05 | [
"MIT"
] | null | null | null | mcarch/views/user.py | Scotsguy/MCArchive | 89847bab722c6782fa53c7b11ee83f1f5b2d9f05 | [
"MIT"
] | null | null | null | from flask import Blueprint, render_template, url_for, redirect, abort, flash, request
from flask_wtf import FlaskForm
from wtforms.fields import StringField, PasswordField
from wtforms.validators import DataRequired, Length, EqualTo
from mcarch.app import db
from mcarch.model.user import User, ResetToken
from mcarch.login import login_required, logout_required, log_in, log_out, \
cur_user, insecure_cur_user
from mcarch.util.security import is_safe_url
user = Blueprint('user', __name__, template_folder="templates")
# Bcrypt can't handle passwords longer than 72 characters
MAX_PASSWORD_LEN = 72
class LoginForm(FlaskForm):
username = StringField('Username',
validators=[DataRequired(), Length(max=User.name.type.length)])
password = PasswordField('Password',
validators=[DataRequired(), Length(max=MAX_PASSWORD_LEN)])
@user.route("/login", methods=['GET', 'POST'])
@logout_required
def login():
form = LoginForm()
if request.method == 'POST':
nextpage = request.args.get('next')
if nextpage and not is_safe_url(nextpage):
return abort(400)
if form.validate() and log_in(form.data['username'], form.data['password']):
user = insecure_cur_user()
flash('Logged in as {}.'.format(user['name']))
if nextpage: return redirect(nextpage)
else: return redirect(url_for('root.home'))
else:
flash('Login failed.')
return render_template("login.html", form=form)
@user.route("/logout")
def logout():
log_out()
flash('Logged out.')
return redirect(url_for('root.home'))
class PassResetForm(FlaskForm):
password = PasswordField('New Password',
validators=[DataRequired(), Length(max=MAX_PASSWORD_LEN),
EqualTo('confirm', message='Passwords must match')
])
confirm = PasswordField('Confirm Password',
validators=[DataRequired(), Length(max=MAX_PASSWORD_LEN)])
@user.route("/reset-password/<token>", methods=['GET', 'POST'])
def reset_password(token):
token = ResetToken.query.filter_by(token=token).first_or_404()
form = PassResetForm()
if request.method == 'POST':
if form.validate():
token.user.set_password(form.data['password'])
db.session.delete(token)
db.session.commit()
flash('Your password has been changed. Please log in with your new password.')
return redirect(url_for('user.login'))
return render_template("reset-password.html", form=form)
| 37.144928 | 90 | 0.676551 | from flask import Blueprint, render_template, url_for, redirect, abort, flash, request
from flask_wtf import FlaskForm
from wtforms.fields import StringField, PasswordField
from wtforms.validators import DataRequired, Length, EqualTo
from mcarch.app import db
from mcarch.model.user import User, ResetToken
from mcarch.login import login_required, logout_required, log_in, log_out, \
cur_user, insecure_cur_user
from mcarch.util.security import is_safe_url
user = Blueprint('user', __name__, template_folder="templates")
MAX_PASSWORD_LEN = 72
class LoginForm(FlaskForm):
username = StringField('Username',
validators=[DataRequired(), Length(max=User.name.type.length)])
password = PasswordField('Password',
validators=[DataRequired(), Length(max=MAX_PASSWORD_LEN)])
@user.route("/login", methods=['GET', 'POST'])
@logout_required
def login():
form = LoginForm()
if request.method == 'POST':
nextpage = request.args.get('next')
if nextpage and not is_safe_url(nextpage):
return abort(400)
if form.validate() and log_in(form.data['username'], form.data['password']):
user = insecure_cur_user()
flash('Logged in as {}.'.format(user['name']))
if nextpage: return redirect(nextpage)
else: return redirect(url_for('root.home'))
else:
flash('Login failed.')
return render_template("login.html", form=form)
@user.route("/logout")
def logout():
log_out()
flash('Logged out.')
return redirect(url_for('root.home'))
class PassResetForm(FlaskForm):
password = PasswordField('New Password',
validators=[DataRequired(), Length(max=MAX_PASSWORD_LEN),
EqualTo('confirm', message='Passwords must match')
])
confirm = PasswordField('Confirm Password',
validators=[DataRequired(), Length(max=MAX_PASSWORD_LEN)])
@user.route("/reset-password/<token>", methods=['GET', 'POST'])
def reset_password(token):
token = ResetToken.query.filter_by(token=token).first_or_404()
form = PassResetForm()
if request.method == 'POST':
if form.validate():
token.user.set_password(form.data['password'])
db.session.delete(token)
db.session.commit()
flash('Your password has been changed. Please log in with your new password.')
return redirect(url_for('user.login'))
return render_template("reset-password.html", form=form)
| true | true |
1c3cfa0acfafe193db5cfa41f10534c110c52129 | 429 | py | Python | app/schemas/stars.py | NewShadesDAO/api | 1e66336f0ea526f245918ecdc328c9a66280be91 | [
"CC0-1.0"
] | 1 | 2022-03-21T07:37:02.000Z | 2022-03-21T07:37:02.000Z | app/schemas/stars.py | NewShadesDAO/api | 1e66336f0ea526f245918ecdc328c9a66280be91 | [
"CC0-1.0"
] | 25 | 2022-01-16T13:18:21.000Z | 2022-03-29T13:08:19.000Z | app/schemas/stars.py | NewShadesDAO/api | 1e66336f0ea526f245918ecdc328c9a66280be91 | [
"CC0-1.0"
] | 1 | 2022-01-15T21:42:00.000Z | 2022-01-15T21:42:00.000Z | from typing import Optional
from app.schemas.base import APIBaseCreateSchema, APIBaseSchema, PyObjectId
class StarSchema(APIBaseSchema):
user: PyObjectId
type: str
server: Optional[PyObjectId]
channel: Optional[PyObjectId]
message: Optional[PyObjectId]
class StarCreateSchema(APIBaseCreateSchema):
server: Optional[str]
channel: Optional[str]
message: Optional[str]
type: Optional[str]
| 20.428571 | 75 | 0.748252 | from typing import Optional
from app.schemas.base import APIBaseCreateSchema, APIBaseSchema, PyObjectId
class StarSchema(APIBaseSchema):
user: PyObjectId
type: str
server: Optional[PyObjectId]
channel: Optional[PyObjectId]
message: Optional[PyObjectId]
class StarCreateSchema(APIBaseCreateSchema):
server: Optional[str]
channel: Optional[str]
message: Optional[str]
type: Optional[str]
| true | true |
1c3cfad0ccfcabea2b8edff798ceb56c8550cd76 | 1,758 | py | Python | tests/integration_tests/framework/constants.py | TS-at-WS/cloudify-manager | 3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc | [
"Apache-2.0"
] | null | null | null | tests/integration_tests/framework/constants.py | TS-at-WS/cloudify-manager | 3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc | [
"Apache-2.0"
] | null | null | null | tests/integration_tests/framework/constants.py | TS-at-WS/cloudify-manager | 3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc | [
"Apache-2.0"
] | null | null | null | ########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# User configured environment variables
#######################################
# if your test fetches hello world or some other repo, configure this env var
# to your liking if you wish to use a branch different than master
BRANCH_NAME_CORE = 'BRANCH_NAME_CORE'
# Internal framework environment variables
##########################################
DOCL_CONTAINER_IP = 'DOCL_CONTAINER_IP'
CLOUDIFY_REST_PORT = 'CLOUDIFY_REST_PORT'
PLUGIN_STORAGE_DIR = '/opt/integration-plugin-storage'
DOCKER_COMPUTE_DIR = '/etc/cloudify/dockercompute'
CONFIG_FILE_LOCATION = '/opt/manager/cloudify-rest.conf'
AUTHORIZATION_FILE_LOCATION = '/opt/manager/authorization.conf'
CLOUDIFY_USER = 'cfyuser'
ADMIN_TOKEN_SCRIPT = '/opt/cloudify/mgmtworker/create-admin-token.py'
INSERT_MOCK_LICENSE_QUERY = "INSERT INTO licenses(customer_id, " \
"expiration_date, license_edition, trial," \
" cloudify_version, capabilities, signature)" \
" VALUES('mock_customer', '2050-01-01', 'Spire'," \
" false, '4.6', '{mock}', 'mock_signature');"
| 42.878049 | 79 | 0.685438 | true | true | |
1c3cfc86a4966f13275f8c8826ba69abb15a9450 | 17,426 | py | Python | diagnnose/models/recurrent_lm.py | i-machine-think/diagnnose | 4533347d1f2cc2959903ae667f99dccd4dda73ee | [
"MIT"
] | 35 | 2019-06-12T13:50:39.000Z | 2020-11-10T22:29:19.000Z | diagnnose/models/recurrent_lm.py | i-machine-think/diagnnose | 4533347d1f2cc2959903ae667f99dccd4dda73ee | [
"MIT"
] | 50 | 2019-04-07T20:22:54.000Z | 2020-11-14T12:58:27.000Z | diagnnose/models/recurrent_lm.py | i-machine-think/diagnnose | 4533347d1f2cc2959903ae667f99dccd4dda73ee | [
"MIT"
] | 5 | 2019-06-06T13:37:29.000Z | 2020-09-24T12:04:17.000Z | import os
from itertools import product
from typing import Dict, List, Optional, Tuple, Union
import torch
from torch import Tensor
from torch.nn.functional import log_softmax
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence
from transformers import PreTrainedTokenizer
from diagnnose.activations.selection_funcs import final_sen_token
from diagnnose.attribute import ShapleyTensor
from diagnnose.corpus import Corpus
from diagnnose.extract import Extractor
from diagnnose.models import LanguageModel
from diagnnose.typedefs.activations import (
ActivationDict,
ActivationName,
ActivationNames,
)
from diagnnose.utils import __file__ as diagnnose_utils_init
from diagnnose.utils.misc import suppress_print
from diagnnose.utils.pickle import load_pickle
class RecurrentLM(LanguageModel):
"""Base class for RNN LM with intermediate activations.
This class contains all the base logic (including forward passes)
for LSTM-type LMs, except for loading in the weights of a specific
model.
"""
is_causal: bool = True
forget_offset: int = 0
ih_concat_order: List[str] = ["h", "i"]
split_order: List[str]
use_char_embs: bool = False
use_peepholes: bool = False
init_states: ActivationDict = {}
def __init__(self, device: str = "cpu"):
super().__init__(device)
# layer index -> layer weights
self.weight: Dict[int, Tensor] = {}
self.bias: Dict[int, Tensor] = {}
# Projects cell state dimension (8192) back to hidden dimension (1024)
self.weight_P: Dict[int, Tensor] = {}
# The 3 peepholes are weighted by a diagonal matrix
self.peepholes: ActivationDict = {}
self.decoder_w: Optional[Tensor] = None
self.decoder_b: Optional[Tensor] = None
def create_inputs_embeds(self, input_ids: Tensor) -> Tensor:
return self.word_embeddings[input_ids]
def decode(self, hidden_state: Tensor) -> Tensor:
return hidden_state @ self.decoder_w.t() + self.decoder_b
@property
def num_layers(self) -> int:
return max(layer for layer, _name in self.sizes) + 1
@property
def top_layer(self) -> int:
return self.num_layers - 1
@property
def output_size(self) -> int:
return self.sizes[self.top_layer, "hx"]
def nhid(self, activation_name: ActivationName) -> int:
"""Returns number of hidden units for a (layer, name) tuple.
If `name` != emb/hx/cx returns the size of (layer, `cx`).
"""
layer, name = activation_name
return self.sizes.get((layer, name), self.sizes[layer, "cx"])
def activation_names(self, compute_out: bool = False) -> ActivationNames:
"""Returns a list of all the model's activation names.
Parameters
----------
compute_out : bool, optional
Toggles the computation of the final decoder projection.
If set to False this projection is not calculated.
Defaults to True.
Returns
-------
activation_names : ActivationNames
List of (layer, name) tuples.
"""
lstm_names = ["hx", "cx", "f_g", "i_g", "o_g", "c_tilde_g"]
activation_names = list(product(range(self.num_layers), lstm_names))
activation_names.append((0, "emb"))
if compute_out:
activation_names.append((self.top_layer, "out"))
return activation_names
def forward(
self,
input_ids: Optional[Tensor] = None,
inputs_embeds: Optional[Union[Tensor, ShapleyTensor]] = None,
input_lengths: Optional[Tensor] = None,
calc_causal_lm_probs: bool = False,
compute_out: bool = False,
only_return_top_embs: bool = False,
) -> Union[ActivationDict, Tensor]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
if inputs_embeds is None and input_ids is None:
raise ValueError("inputs_embeds or input_ids must be provided")
if inputs_embeds is None:
inputs_embeds = self.create_inputs_embeds(input_ids)
if len(inputs_embeds.shape) == 2:
inputs_embeds = inputs_embeds.unsqueeze(0)
inputs_embeds = inputs_embeds.to(self.device)
iterator, unsorted_indices = self._create_iterator(inputs_embeds, input_lengths)
all_activations = self._init_activations(inputs_embeds, compute_out)
cur_activations = self.init_hidden(inputs_embeds.size(0))
for w_idx, input_ in enumerate(iterator):
num_input = input_.shape[0]
for a_name in cur_activations:
cur_activations[a_name] = cur_activations[a_name][:num_input]
cur_activations = self.forward_step(
input_, cur_activations, compute_out=compute_out
)
for a_name in all_activations:
all_activations[a_name][:num_input, w_idx] = cur_activations[a_name]
# Batch had been sorted and needs to be unsorted to retain the original order
for a_name, activations in all_activations.items():
all_activations[a_name] = activations[unsorted_indices]
if calc_causal_lm_probs:
output_ids = input_ids[:, 1:].unsqueeze(-1)
logits = all_activations[self.top_layer, "out"]
probs = log_softmax(logits[:, :-1], dim=-1)
all_activations[self.top_layer, "out"] = torch.gather(probs, -1, output_ids)
if only_return_top_embs and compute_out:
return all_activations[self.top_layer, "out"]
elif only_return_top_embs:
return all_activations[self.top_layer, "hx"]
return all_activations
def forward_step(
self,
token_embeds: Tensor,
prev_activations: ActivationDict,
compute_out: bool = False,
) -> ActivationDict:
"""Performs a forward pass of one step across all layers.
Parameters
----------
token_embeds : Tensor
Tensor of word embeddings at the current sentence position.
prev_activations : ActivationDict
Dict mapping the activation names of the previous hidden
and cell states to their corresponding Tensors.
compute_out : bool, optional
Toggles the computation of the final decoder projection.
If set to False this projection is not calculated.
Defaults to True.
Returns
-------
all_activations : ActivationDict
Dictionary mapping activation names to tensors of shape:
batch_size x max_sen_len x nhid.
"""
cur_activations: ActivationDict = {}
input_ = token_embeds
for layer in range(self.num_layers):
prev_hx = prev_activations[layer, "hx"]
prev_cx = prev_activations[layer, "cx"]
layer_activations = self.forward_cell(layer, input_, prev_hx, prev_cx)
cur_activations.update(layer_activations)
input_ = cur_activations[layer, "hx"]
if compute_out:
out = input_ @ self.decoder_w.t()
out += self.decoder_b
cur_activations[self.top_layer, "out"] = out
return cur_activations
def forward_cell(
self, layer: int, input_: Tensor, prev_hx: Tensor, prev_cx: Tensor
) -> ActivationDict:
"""Performs the forward step of 1 LSTM cell.
Parameters
----------
layer : int
Current RNN layer.
input_ : Tensor
Current input embedding. In higher layers this is h^l-1_t.
Size: batch_size x nhid
prev_hx : Tensor
Previous hidden state. Size: batch_size x nhid
prev_cx : Tensor
Previous cell state. Size: batch_size x nhid
Returns
-------
all_activations : ActivationDict
Dictionary mapping activation names to tensors of shape:
batch_size x max_sen_len x nhid.
"""
# Shape: (bsz, nhid_h+emb_size)
if self.ih_concat_order == ["h", "i"]:
ih_concat = torch.cat((prev_hx, input_), dim=1)
else:
ih_concat = torch.cat((input_, prev_hx), dim=1)
# Shape: (bsz, 4*nhid_c)
proj = ih_concat @ self.weight[layer]
if layer in self.bias:
proj += self.bias[layer]
split_proj: Dict[str, Tensor] = dict(
zip(self.split_order, torch.split(proj, self.sizes[layer, "cx"], dim=1))
)
if self.use_peepholes:
split_proj["f"] += prev_cx * self.peepholes[layer, "f"]
split_proj["i"] += prev_cx * self.peepholes[layer, "i"]
# Shapes: (bsz, nhid_c)
f_g = torch.sigmoid(split_proj["f"])
i_g = torch.sigmoid(split_proj["i"])
c_tilde_g = torch.tanh(split_proj["g"])
cx = f_g * prev_cx + i_g * c_tilde_g
if self.use_peepholes:
split_proj["o"] += cx * self.peepholes[layer, "o"]
o_g = torch.sigmoid(split_proj["o"])
hx = o_g * torch.tanh(cx)
if self.sizes[layer, "hx"] != self.sizes[layer, "cx"]:
hx = hx @ self.weight_P[layer]
activation_dict = {
(layer, "hx"): hx,
(layer, "cx"): cx,
(layer, "f_g"): f_g,
(layer, "i_g"): i_g,
(layer, "o_g"): o_g,
(layer, "c_tilde_g"): c_tilde_g,
}
if layer == 0:
activation_dict[0, "emb"] = input_
return activation_dict
@staticmethod
def _create_iterator(
inputs_embeds: Tensor, input_lengths: Optional[Tensor]
) -> Tuple[Tuple[Tensor, ...], Tensor]:
"""Creates a PackedSequence that handles batching for the RNN.
Batch items are sorted based on sentence length, allowing
<pad> tokens to be skipped efficiently during the forward pass.
Returns
-------
iterator : Tuple[Tensor, ...]
Tuple of input tensors for each step in the sequence.
unsorted_indices : Tensor
Original order of the corpus prior to sorting.
"""
if input_lengths is None:
batch_size = inputs_embeds.shape[0]
input_lengths = torch.tensor(batch_size * [inputs_embeds.shape[1]])
packed_batch: PackedSequence = pack_padded_sequence(
inputs_embeds,
lengths=input_lengths.cpu(),
batch_first=True,
enforce_sorted=False,
)
iterator = torch.split(packed_batch.data, list(packed_batch.batch_sizes))
return iterator, packed_batch.unsorted_indices
def _init_activations(
self, inputs_embeds: Tensor, compute_out: bool
) -> ActivationDict:
"""Returns a dictionary mapping activation names to tensors.
If the input is a ShapleyTensor this dict will store the
ShapleyTensors as well.
Returns
-------
all_activations : ActivationDict
Dictionary mapping activation names to tensors of shape:
batch_size x max_sen_len x nhid.
"""
batch_size, max_sen_len = inputs_embeds.shape[:2]
all_activations: ActivationDict = {
a_name: torch.zeros(batch_size, max_sen_len, self.nhid(a_name))
for a_name in self.activation_names(compute_out)
}
if isinstance(inputs_embeds, ShapleyTensor):
for a_name, activations in all_activations.items():
all_activations[a_name] = type(inputs_embeds)(activations)
return all_activations
def init_hidden(self, batch_size: int) -> ActivationDict:
"""Creates a batch of initial states.
Parameters
----------
batch_size : int
Size of batch for which states are created.
Returns
-------
init_states : ActivationTensors
Dictionary mapping hidden and cell state to init tensors.
"""
batch_init_states: ActivationDict = {}
for layer in range(self.num_layers):
for hc in ["hx", "cx"]:
# Shape: (batch_size, nhid)
batched_state = self.init_states[layer, hc].repeat(batch_size, 1)
batch_init_states[layer, hc] = batched_state
return batch_init_states
def final_hidden(self, hidden: ActivationDict) -> Tensor:
"""Returns the final hidden state.
Parameters
----------
hidden : ActivationTensors
Dictionary of extracted activations.
Returns
-------
final_hidden : Tensor
Tensor of the final hidden state.
"""
return hidden[self.top_layer, "hx"].squeeze()
def set_init_states(
self,
pickle_path: Optional[str] = None,
corpus_path: Optional[str] = None,
use_default: bool = False,
tokenizer: Optional[PreTrainedTokenizer] = None,
save_init_states_to: Optional[str] = None,
) -> None:
"""Set up the initial LM states.
If no path is provided 0-valued embeddings will be used.
Note that the loaded init should provide tensors for `hx`
and `cx` in all layers of the LM.
Note that `init_states_pickle` takes precedence over
`init_states_corpus` in case both are provided.
Parameters
----------
pickle_path : str, optional
Path to pickled file with initial lstm states. If not
provided zero-valued init states will be created.
corpus_path : str, optional
Path to corpus of which the final hidden state will be used
as initial states.
use_default : bool
Toggle to use the default initial sentence `. <eos>`.
tokenizer : PreTrainedTokenizer, optional
Tokenizer that must be provided when creating the init
states from a corpus.
save_init_states_to : str, optional
Path to which the newly computed init_states will be saved.
If not provided these states won't be dumped.
Returns
-------
init_states : ActivationTensors
ActivationTensors containing the init states for each layer.
"""
if use_default:
diagnnose_utils_dir = os.path.dirname(diagnnose_utils_init)
corpus_path = os.path.join(diagnnose_utils_dir, "init_sentence.txt")
if pickle_path is not None:
init_states = self._create_init_states_from_pickle(pickle_path)
elif corpus_path is not None:
init_states = self._create_init_states_from_corpus(
corpus_path, tokenizer, save_init_states_to
)
else:
init_states = self._create_zero_states()
self.init_states = init_states
def _create_zero_states(self) -> ActivationDict:
"""Zero-initialized states if no init state is provided.
Returns
-------
init_states : ActivationTensors
Dictionary mapping (layer, name) tuple to zero-tensor.
"""
init_states: ActivationDict = {
a_name: torch.zeros((1, self.nhid(a_name)), device=self.device)
for a_name in product(range(self.num_layers), ["cx", "hx"])
}
return init_states
@suppress_print
def _create_init_states_from_corpus(
self,
init_states_corpus: str,
tokenizer: PreTrainedTokenizer,
save_init_states_to: Optional[str] = None,
) -> ActivationDict:
assert (
tokenizer is not None
), "Tokenizer must be provided when creating init states from corpus"
corpus: Corpus = Corpus.create(init_states_corpus, tokenizer=tokenizer)
activation_names: ActivationNames = [
(layer, name) for layer in range(self.num_layers) for name in ["hx", "cx"]
]
extractor = Extractor(
self,
corpus,
activation_names,
activations_dir=save_init_states_to,
selection_func=final_sen_token,
)
init_states = extractor.extract().activation_dict
return init_states
def _create_init_states_from_pickle(self, pickle_path: str) -> ActivationDict:
init_states: ActivationDict = load_pickle(pickle_path)
self._validate_init_states_from_pickle(init_states)
return init_states
def _validate_init_states_from_pickle(self, init_states: ActivationDict) -> None:
num_init_layers = max(layer for layer, _name in init_states)
assert (
num_init_layers == self.num_layers
), "Number of initial layers not correct"
for (layer, name), size in self.sizes.items():
if name in ["hx", "cx"]:
assert (
layer,
name,
) in init_states.keys(), (
f"Activation {layer},{name} is not found in init states"
)
init_size = init_states[layer, name].size(1)
assert init_size == size, (
f"Initial activation size for {name} is incorrect: "
f"{name}: {init_size}, should be {size}"
)
| 34.852 | 88 | 0.61546 | import os
from itertools import product
from typing import Dict, List, Optional, Tuple, Union
import torch
from torch import Tensor
from torch.nn.functional import log_softmax
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence
from transformers import PreTrainedTokenizer
from diagnnose.activations.selection_funcs import final_sen_token
from diagnnose.attribute import ShapleyTensor
from diagnnose.corpus import Corpus
from diagnnose.extract import Extractor
from diagnnose.models import LanguageModel
from diagnnose.typedefs.activations import (
ActivationDict,
ActivationName,
ActivationNames,
)
from diagnnose.utils import __file__ as diagnnose_utils_init
from diagnnose.utils.misc import suppress_print
from diagnnose.utils.pickle import load_pickle
class RecurrentLM(LanguageModel):
is_causal: bool = True
forget_offset: int = 0
ih_concat_order: List[str] = ["h", "i"]
split_order: List[str]
use_char_embs: bool = False
use_peepholes: bool = False
init_states: ActivationDict = {}
def __init__(self, device: str = "cpu"):
super().__init__(device)
self.weight: Dict[int, Tensor] = {}
self.bias: Dict[int, Tensor] = {}
self.weight_P: Dict[int, Tensor] = {}
self.peepholes: ActivationDict = {}
self.decoder_w: Optional[Tensor] = None
self.decoder_b: Optional[Tensor] = None
def create_inputs_embeds(self, input_ids: Tensor) -> Tensor:
return self.word_embeddings[input_ids]
def decode(self, hidden_state: Tensor) -> Tensor:
return hidden_state @ self.decoder_w.t() + self.decoder_b
@property
def num_layers(self) -> int:
return max(layer for layer, _name in self.sizes) + 1
@property
def top_layer(self) -> int:
return self.num_layers - 1
@property
def output_size(self) -> int:
return self.sizes[self.top_layer, "hx"]
def nhid(self, activation_name: ActivationName) -> int:
layer, name = activation_name
return self.sizes.get((layer, name), self.sizes[layer, "cx"])
def activation_names(self, compute_out: bool = False) -> ActivationNames:
lstm_names = ["hx", "cx", "f_g", "i_g", "o_g", "c_tilde_g"]
activation_names = list(product(range(self.num_layers), lstm_names))
activation_names.append((0, "emb"))
if compute_out:
activation_names.append((self.top_layer, "out"))
return activation_names
def forward(
self,
input_ids: Optional[Tensor] = None,
inputs_embeds: Optional[Union[Tensor, ShapleyTensor]] = None,
input_lengths: Optional[Tensor] = None,
calc_causal_lm_probs: bool = False,
compute_out: bool = False,
only_return_top_embs: bool = False,
) -> Union[ActivationDict, Tensor]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
if inputs_embeds is None and input_ids is None:
raise ValueError("inputs_embeds or input_ids must be provided")
if inputs_embeds is None:
inputs_embeds = self.create_inputs_embeds(input_ids)
if len(inputs_embeds.shape) == 2:
inputs_embeds = inputs_embeds.unsqueeze(0)
inputs_embeds = inputs_embeds.to(self.device)
iterator, unsorted_indices = self._create_iterator(inputs_embeds, input_lengths)
all_activations = self._init_activations(inputs_embeds, compute_out)
cur_activations = self.init_hidden(inputs_embeds.size(0))
for w_idx, input_ in enumerate(iterator):
num_input = input_.shape[0]
for a_name in cur_activations:
cur_activations[a_name] = cur_activations[a_name][:num_input]
cur_activations = self.forward_step(
input_, cur_activations, compute_out=compute_out
)
for a_name in all_activations:
all_activations[a_name][:num_input, w_idx] = cur_activations[a_name]
for a_name, activations in all_activations.items():
all_activations[a_name] = activations[unsorted_indices]
if calc_causal_lm_probs:
output_ids = input_ids[:, 1:].unsqueeze(-1)
logits = all_activations[self.top_layer, "out"]
probs = log_softmax(logits[:, :-1], dim=-1)
all_activations[self.top_layer, "out"] = torch.gather(probs, -1, output_ids)
if only_return_top_embs and compute_out:
return all_activations[self.top_layer, "out"]
elif only_return_top_embs:
return all_activations[self.top_layer, "hx"]
return all_activations
def forward_step(
self,
token_embeds: Tensor,
prev_activations: ActivationDict,
compute_out: bool = False,
) -> ActivationDict:
cur_activations: ActivationDict = {}
input_ = token_embeds
for layer in range(self.num_layers):
prev_hx = prev_activations[layer, "hx"]
prev_cx = prev_activations[layer, "cx"]
layer_activations = self.forward_cell(layer, input_, prev_hx, prev_cx)
cur_activations.update(layer_activations)
input_ = cur_activations[layer, "hx"]
if compute_out:
out = input_ @ self.decoder_w.t()
out += self.decoder_b
cur_activations[self.top_layer, "out"] = out
return cur_activations
def forward_cell(
self, layer: int, input_: Tensor, prev_hx: Tensor, prev_cx: Tensor
) -> ActivationDict:
if self.ih_concat_order == ["h", "i"]:
ih_concat = torch.cat((prev_hx, input_), dim=1)
else:
ih_concat = torch.cat((input_, prev_hx), dim=1)
proj = ih_concat @ self.weight[layer]
if layer in self.bias:
proj += self.bias[layer]
split_proj: Dict[str, Tensor] = dict(
zip(self.split_order, torch.split(proj, self.sizes[layer, "cx"], dim=1))
)
if self.use_peepholes:
split_proj["f"] += prev_cx * self.peepholes[layer, "f"]
split_proj["i"] += prev_cx * self.peepholes[layer, "i"]
f_g = torch.sigmoid(split_proj["f"])
i_g = torch.sigmoid(split_proj["i"])
c_tilde_g = torch.tanh(split_proj["g"])
cx = f_g * prev_cx + i_g * c_tilde_g
if self.use_peepholes:
split_proj["o"] += cx * self.peepholes[layer, "o"]
o_g = torch.sigmoid(split_proj["o"])
hx = o_g * torch.tanh(cx)
if self.sizes[layer, "hx"] != self.sizes[layer, "cx"]:
hx = hx @ self.weight_P[layer]
activation_dict = {
(layer, "hx"): hx,
(layer, "cx"): cx,
(layer, "f_g"): f_g,
(layer, "i_g"): i_g,
(layer, "o_g"): o_g,
(layer, "c_tilde_g"): c_tilde_g,
}
if layer == 0:
activation_dict[0, "emb"] = input_
return activation_dict
@staticmethod
def _create_iterator(
inputs_embeds: Tensor, input_lengths: Optional[Tensor]
) -> Tuple[Tuple[Tensor, ...], Tensor]:
if input_lengths is None:
batch_size = inputs_embeds.shape[0]
input_lengths = torch.tensor(batch_size * [inputs_embeds.shape[1]])
packed_batch: PackedSequence = pack_padded_sequence(
inputs_embeds,
lengths=input_lengths.cpu(),
batch_first=True,
enforce_sorted=False,
)
iterator = torch.split(packed_batch.data, list(packed_batch.batch_sizes))
return iterator, packed_batch.unsorted_indices
def _init_activations(
self, inputs_embeds: Tensor, compute_out: bool
) -> ActivationDict:
batch_size, max_sen_len = inputs_embeds.shape[:2]
all_activations: ActivationDict = {
a_name: torch.zeros(batch_size, max_sen_len, self.nhid(a_name))
for a_name in self.activation_names(compute_out)
}
if isinstance(inputs_embeds, ShapleyTensor):
for a_name, activations in all_activations.items():
all_activations[a_name] = type(inputs_embeds)(activations)
return all_activations
def init_hidden(self, batch_size: int) -> ActivationDict:
batch_init_states: ActivationDict = {}
for layer in range(self.num_layers):
for hc in ["hx", "cx"]:
batched_state = self.init_states[layer, hc].repeat(batch_size, 1)
batch_init_states[layer, hc] = batched_state
return batch_init_states
def final_hidden(self, hidden: ActivationDict) -> Tensor:
return hidden[self.top_layer, "hx"].squeeze()
def set_init_states(
self,
pickle_path: Optional[str] = None,
corpus_path: Optional[str] = None,
use_default: bool = False,
tokenizer: Optional[PreTrainedTokenizer] = None,
save_init_states_to: Optional[str] = None,
) -> None:
if use_default:
diagnnose_utils_dir = os.path.dirname(diagnnose_utils_init)
corpus_path = os.path.join(diagnnose_utils_dir, "init_sentence.txt")
if pickle_path is not None:
init_states = self._create_init_states_from_pickle(pickle_path)
elif corpus_path is not None:
init_states = self._create_init_states_from_corpus(
corpus_path, tokenizer, save_init_states_to
)
else:
init_states = self._create_zero_states()
self.init_states = init_states
def _create_zero_states(self) -> ActivationDict:
init_states: ActivationDict = {
a_name: torch.zeros((1, self.nhid(a_name)), device=self.device)
for a_name in product(range(self.num_layers), ["cx", "hx"])
}
return init_states
@suppress_print
def _create_init_states_from_corpus(
self,
init_states_corpus: str,
tokenizer: PreTrainedTokenizer,
save_init_states_to: Optional[str] = None,
) -> ActivationDict:
assert (
tokenizer is not None
), "Tokenizer must be provided when creating init states from corpus"
corpus: Corpus = Corpus.create(init_states_corpus, tokenizer=tokenizer)
activation_names: ActivationNames = [
(layer, name) for layer in range(self.num_layers) for name in ["hx", "cx"]
]
extractor = Extractor(
self,
corpus,
activation_names,
activations_dir=save_init_states_to,
selection_func=final_sen_token,
)
init_states = extractor.extract().activation_dict
return init_states
def _create_init_states_from_pickle(self, pickle_path: str) -> ActivationDict:
init_states: ActivationDict = load_pickle(pickle_path)
self._validate_init_states_from_pickle(init_states)
return init_states
def _validate_init_states_from_pickle(self, init_states: ActivationDict) -> None:
num_init_layers = max(layer for layer, _name in init_states)
assert (
num_init_layers == self.num_layers
), "Number of initial layers not correct"
for (layer, name), size in self.sizes.items():
if name in ["hx", "cx"]:
assert (
layer,
name,
) in init_states.keys(), (
f"Activation {layer},{name} is not found in init states"
)
init_size = init_states[layer, name].size(1)
assert init_size == size, (
f"Initial activation size for {name} is incorrect: "
f"{name}: {init_size}, should be {size}"
)
| true | true |
1c3cfd10960e8cb0fa9737037f55a8dd619bf48c | 771 | py | Python | var/spack/repos/builtin/packages/py-dbfread/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/py-dbfread/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/py-dbfread/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyDbfread(PythonPackage):
"""DBF is a file format used by databases such dBase, Visual FoxPro, and
FoxBase+. This library reads DBF files and returns the data as native
Python data types for further processing. It is primarily intended for
batch jobs and one-off scripts."""
homepage = "https://dbfread.readthedocs.io/en/latest/"
pypi = "dbfread/dbfread-2.0.7.tar.gz"
version('2.0.7', sha256='07c8a9af06ffad3f6f03e8fe91ad7d2733e31a26d2b72c4dd4cfbae07ee3b73d')
depends_on('py-setuptools', type='build')
| 36.714286 | 95 | 0.747082 |
from spack.package import *
class PyDbfread(PythonPackage):
homepage = "https://dbfread.readthedocs.io/en/latest/"
pypi = "dbfread/dbfread-2.0.7.tar.gz"
version('2.0.7', sha256='07c8a9af06ffad3f6f03e8fe91ad7d2733e31a26d2b72c4dd4cfbae07ee3b73d')
depends_on('py-setuptools', type='build')
| true | true |
1c3cfd853bcc925edfd3cc3a2bc972d073ad2778 | 8,569 | py | Python | sphinx/directives/patches.py | KunKaxx/sphinx | e1b9ebf80c1094ce438e24526ce8d0d178426196 | [
"BSD-2-Clause"
] | 1 | 2021-03-11T16:45:01.000Z | 2021-03-11T16:45:01.000Z | sphinx/directives/patches.py | KunKaxx/sphinx | e1b9ebf80c1094ce438e24526ce8d0d178426196 | [
"BSD-2-Clause"
] | 2 | 2022-02-14T03:20:12.000Z | 2022-03-02T10:44:31.000Z | sphinx/directives/patches.py | KunKaxx/sphinx | e1b9ebf80c1094ce438e24526ce8d0d178426196 | [
"BSD-2-Clause"
] | null | null | null | """
sphinx.directives.patches
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import warnings
from os import path
from typing import TYPE_CHECKING, Any, Dict, List, Tuple, cast
from docutils import nodes
from docutils.nodes import Node, make_id, system_message
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives import html, images, tables
from sphinx import addnodes
from sphinx.deprecation import RemovedInSphinx60Warning
from sphinx.directives import optional_int
from sphinx.domains.math import MathDomain
from sphinx.locale import __
from sphinx.util import logging
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import set_source_info
from sphinx.util.osutil import SEP, os_path, relpath
if TYPE_CHECKING:
from sphinx.application import Sphinx
logger = logging.getLogger(__name__)
class Figure(images.Figure):
"""The figure directive which applies `:name:` option to the figure node
instead of the image node.
"""
def run(self) -> List[Node]:
name = self.options.pop('name', None)
result = super().run()
if len(result) == 2 or isinstance(result[0], nodes.system_message):
return result
assert len(result) == 1
figure_node = cast(nodes.figure, result[0])
if name:
# set ``name`` to figure_node if given
self.options['name'] = name
self.add_name(figure_node)
# copy lineno from image node
if figure_node.line is None and len(figure_node) == 2:
caption = cast(nodes.caption, figure_node[1])
figure_node.line = caption.line
return [figure_node]
class Meta(html.Meta, SphinxDirective):
def run(self) -> List[Node]:
result = super().run()
for node in result:
if (isinstance(node, nodes.pending) and
isinstance(node.details['nodes'][0], html.MetaBody.meta)):
meta = node.details['nodes'][0]
meta.source = self.env.doc2path(self.env.docname)
meta.line = self.lineno
meta.rawcontent = meta['content'] # type: ignore
# docutils' meta nodes aren't picklable because the class is nested
meta.__class__ = addnodes.meta # type: ignore
return result
class RSTTable(tables.RSTTable):
"""The table directive which sets source and line information to its caption.
Only for docutils-0.13 or older version."""
def run(self) -> List[Node]:
warnings.warn('RSTTable is deprecated.',
RemovedInSphinx60Warning)
return super().run()
def make_title(self) -> Tuple[nodes.title, List[system_message]]:
title, message = super().make_title()
if title:
set_source_info(self, title)
return title, message
class CSVTable(tables.CSVTable):
"""The csv-table directive which searches a CSV file from Sphinx project's source
directory when an absolute path is given via :file: option.
"""
def run(self) -> List[Node]:
if 'file' in self.options and self.options['file'].startswith((SEP, os.sep)):
env = self.state.document.settings.env
filename = self.options['file']
if path.exists(filename):
logger.warning(__('":file:" option for csv-table directive now recognizes '
'an absolute path as a relative path from source directory. '
'Please update your document.'),
location=(env.docname, self.lineno))
else:
abspath = path.join(env.srcdir, os_path(self.options['file'][1:]))
docdir = path.dirname(env.doc2path(env.docname))
self.options['file'] = relpath(abspath, docdir)
return super().run()
class ListTable(tables.ListTable):
"""The list-table directive which sets source and line information to its caption.
Only for docutils-0.13 or older version."""
def run(self) -> List[Node]:
warnings.warn('ListTable is deprecated.',
RemovedInSphinx60Warning)
return super().run()
def make_title(self) -> Tuple[nodes.title, List[system_message]]:
title, message = super().make_title()
if title:
set_source_info(self, title)
return title, message
class Code(SphinxDirective):
"""Parse and mark up content of a code block.
This is compatible with docutils' :rst:dir:`code` directive.
"""
optional_arguments = 1
option_spec = {
'class': directives.class_option,
'force': directives.flag,
'name': directives.unchanged,
'number-lines': optional_int,
}
has_content = True
def run(self) -> List[Node]:
self.assert_has_content()
code = '\n'.join(self.content)
node = nodes.literal_block(code, code,
classes=self.options.get('classes', []),
force='force' in self.options,
highlight_args={})
self.add_name(node)
set_source_info(self, node)
if self.arguments:
# highlight language specified
node['language'] = self.arguments[0]
else:
# no highlight language specified. Then this directive refers the current
# highlight setting via ``highlight`` directive or ``highlight_language``
# configuration.
node['language'] = self.env.temp_data.get('highlight_language',
self.config.highlight_language)
if 'number-lines' in self.options:
node['linenos'] = True
# if number given, treat as lineno-start.
if self.options['number-lines']:
node['highlight_args']['linenostart'] = self.options['number-lines']
return [node]
class MathDirective(SphinxDirective):
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {
'label': directives.unchanged,
'name': directives.unchanged,
'class': directives.class_option,
'nowrap': directives.flag,
}
def run(self) -> List[Node]:
latex = '\n'.join(self.content)
if self.arguments and self.arguments[0]:
latex = self.arguments[0] + '\n\n' + latex
label = self.options.get('label', self.options.get('name'))
node = nodes.math_block(latex, latex,
classes=self.options.get('class', []),
docname=self.env.docname,
number=None,
label=label,
nowrap='nowrap' in self.options)
self.add_name(node)
self.set_source_info(node)
ret = [node] # type: List[Node]
self.add_target(ret)
return ret
def add_target(self, ret: List[Node]) -> None:
node = cast(nodes.math_block, ret[0])
# assign label automatically if math_number_all enabled
if node['label'] == '' or (self.config.math_number_all and not node['label']):
seq = self.env.new_serialno('sphinx.ext.math#equations')
node['label'] = "%s:%d" % (self.env.docname, seq)
# no targets and numbers are needed
if not node['label']:
return
# register label to domain
domain = cast(MathDomain, self.env.get_domain('math'))
domain.note_equation(self.env.docname, node['label'], location=node)
node['number'] = domain.get_equation_number_for(node['label'])
# add target node
node_id = make_id('equation-%s' % node['label'])
target = nodes.target('', '', ids=[node_id])
self.state.document.note_explicit_target(target)
ret.insert(0, target)
def setup(app: "Sphinx") -> Dict[str, Any]:
directives.register_directive('figure', Figure)
directives.register_directive('meta', Meta)
directives.register_directive('csv-table', CSVTable)
directives.register_directive('code', Code)
directives.register_directive('math', MathDirective)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 34.552419 | 95 | 0.603688 |
import os
import warnings
from os import path
from typing import TYPE_CHECKING, Any, Dict, List, Tuple, cast
from docutils import nodes
from docutils.nodes import Node, make_id, system_message
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives import html, images, tables
from sphinx import addnodes
from sphinx.deprecation import RemovedInSphinx60Warning
from sphinx.directives import optional_int
from sphinx.domains.math import MathDomain
from sphinx.locale import __
from sphinx.util import logging
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import set_source_info
from sphinx.util.osutil import SEP, os_path, relpath
if TYPE_CHECKING:
from sphinx.application import Sphinx
logger = logging.getLogger(__name__)
class Figure(images.Figure):
def run(self) -> List[Node]:
name = self.options.pop('name', None)
result = super().run()
if len(result) == 2 or isinstance(result[0], nodes.system_message):
return result
assert len(result) == 1
figure_node = cast(nodes.figure, result[0])
if name:
self.options['name'] = name
self.add_name(figure_node)
if figure_node.line is None and len(figure_node) == 2:
caption = cast(nodes.caption, figure_node[1])
figure_node.line = caption.line
return [figure_node]
class Meta(html.Meta, SphinxDirective):
def run(self) -> List[Node]:
result = super().run()
for node in result:
if (isinstance(node, nodes.pending) and
isinstance(node.details['nodes'][0], html.MetaBody.meta)):
meta = node.details['nodes'][0]
meta.source = self.env.doc2path(self.env.docname)
meta.line = self.lineno
meta.rawcontent = meta['content']
meta.__class__ = addnodes.meta
return result
class RSTTable(tables.RSTTable):
def run(self) -> List[Node]:
warnings.warn('RSTTable is deprecated.',
RemovedInSphinx60Warning)
return super().run()
def make_title(self) -> Tuple[nodes.title, List[system_message]]:
title, message = super().make_title()
if title:
set_source_info(self, title)
return title, message
class CSVTable(tables.CSVTable):
def run(self) -> List[Node]:
if 'file' in self.options and self.options['file'].startswith((SEP, os.sep)):
env = self.state.document.settings.env
filename = self.options['file']
if path.exists(filename):
logger.warning(__('":file:" option for csv-table directive now recognizes '
'an absolute path as a relative path from source directory. '
'Please update your document.'),
location=(env.docname, self.lineno))
else:
abspath = path.join(env.srcdir, os_path(self.options['file'][1:]))
docdir = path.dirname(env.doc2path(env.docname))
self.options['file'] = relpath(abspath, docdir)
return super().run()
class ListTable(tables.ListTable):
def run(self) -> List[Node]:
warnings.warn('ListTable is deprecated.',
RemovedInSphinx60Warning)
return super().run()
def make_title(self) -> Tuple[nodes.title, List[system_message]]:
title, message = super().make_title()
if title:
set_source_info(self, title)
return title, message
class Code(SphinxDirective):
optional_arguments = 1
option_spec = {
'class': directives.class_option,
'force': directives.flag,
'name': directives.unchanged,
'number-lines': optional_int,
}
has_content = True
def run(self) -> List[Node]:
self.assert_has_content()
code = '\n'.join(self.content)
node = nodes.literal_block(code, code,
classes=self.options.get('classes', []),
force='force' in self.options,
highlight_args={})
self.add_name(node)
set_source_info(self, node)
if self.arguments:
node['language'] = self.arguments[0]
else:
node['language'] = self.env.temp_data.get('highlight_language',
self.config.highlight_language)
if 'number-lines' in self.options:
node['linenos'] = True
if self.options['number-lines']:
node['highlight_args']['linenostart'] = self.options['number-lines']
return [node]
class MathDirective(SphinxDirective):
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {
'label': directives.unchanged,
'name': directives.unchanged,
'class': directives.class_option,
'nowrap': directives.flag,
}
def run(self) -> List[Node]:
latex = '\n'.join(self.content)
if self.arguments and self.arguments[0]:
latex = self.arguments[0] + '\n\n' + latex
label = self.options.get('label', self.options.get('name'))
node = nodes.math_block(latex, latex,
classes=self.options.get('class', []),
docname=self.env.docname,
number=None,
label=label,
nowrap='nowrap' in self.options)
self.add_name(node)
self.set_source_info(node)
ret = [node]
self.add_target(ret)
return ret
def add_target(self, ret: List[Node]) -> None:
node = cast(nodes.math_block, ret[0])
if node['label'] == '' or (self.config.math_number_all and not node['label']):
seq = self.env.new_serialno('sphinx.ext.math#equations')
node['label'] = "%s:%d" % (self.env.docname, seq)
if not node['label']:
return
domain = cast(MathDomain, self.env.get_domain('math'))
domain.note_equation(self.env.docname, node['label'], location=node)
node['number'] = domain.get_equation_number_for(node['label'])
node_id = make_id('equation-%s' % node['label'])
target = nodes.target('', '', ids=[node_id])
self.state.document.note_explicit_target(target)
ret.insert(0, target)
def setup(app: "Sphinx") -> Dict[str, Any]:
directives.register_directive('figure', Figure)
directives.register_directive('meta', Meta)
directives.register_directive('csv-table', CSVTable)
directives.register_directive('code', Code)
directives.register_directive('math', MathDirective)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| true | true |
1c3cfdec1e0460ac241d2bc240200825dd339bf3 | 6,034 | py | Python | client/tests/coverage_test.py | ekmixon/pyre-check | bd2475cc0cf4bef941f0aea6df10d674e6c907ab | [
"MIT"
] | null | null | null | client/tests/coverage_test.py | ekmixon/pyre-check | bd2475cc0cf4bef941f0aea6df10d674e6c907ab | [
"MIT"
] | null | null | null | client/tests/coverage_test.py | ekmixon/pyre-check | bd2475cc0cf4bef941f0aea6df10d674e6c907ab | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import textwrap
import unittest
from typing import List
import libcst as cst
from ..commands.coverage import _collect_coverage
class CoverageTest(unittest.TestCase):
def assert_coverage_equal(
self,
file_content: str,
expected_covered: List[int],
expected_uncovered: List[int],
) -> None:
module = cst.parse_module(textwrap.dedent(file_content).strip())
actual_coverage = _collect_coverage({"test.py": module})[0]
self.assertEqual(
expected_covered, actual_coverage.covered_lines, "Covered mismatch"
)
self.assertEqual(
expected_uncovered, actual_coverage.uncovered_lines, "Not covered mismatch"
)
def assert_coverage_matches_comments(self, file_content: str) -> None:
file_content = textwrap.dedent(file_content).strip()
lines = file_content.split("\n")
expected_covered = []
expected_uncovered = []
for line_number, line in enumerate(lines):
if line.lower().endswith("# covered"):
expected_covered.append(line_number)
elif line.lower().endswith("# not covered"):
expected_uncovered.append(line_number)
self.assert_coverage_equal(file_content, expected_covered, expected_uncovered)
def test_coverage_covered(self) -> None:
self.assert_coverage_equal(
"""
def foo() -> int:
return 5
""",
expected_covered=[0],
expected_uncovered=[],
)
def test_coverage_uncovered(self) -> None:
self.assert_coverage_equal(
"""
def foo():
return 5
""",
expected_covered=[],
expected_uncovered=[0],
)
def test_coverage_details(self) -> None:
self.assert_coverage_matches_comments(
"""
def foo(x) -> int: # Covered
pass
"""
)
self.assert_coverage_matches_comments(
"""
def bar(x: int, # Covered
y): # Not Covered
pass
"""
)
self.assert_coverage_matches_comments(
"""
a = foo() # Covered
b: int = bar() # Covered
"""
)
self.assert_coverage_matches_comments(
"""
class A:
a: int = 100 # Covered
b = "" # Covered
"""
)
# For now, don't count annotations inside of functions
self.assert_coverage_matches_comments(
"""
def foo() -> None: # Covered
a: int = 100
"""
)
self.assert_coverage_matches_comments(
"""
def foo(): # Not covered
a: int = 100
"""
)
self.assert_coverage_matches_comments(
"""
def foo(): # Not covered
def bar(x: int) -> int: # Covered
pass
"""
)
self.assert_coverage_matches_comments(
"""
class A:
def bar(self, x: int): # Covered
pass
"""
)
self.assert_coverage_matches_comments(
"""
class A:
def bar(this, x: int) -> None: # Covered
pass
"""
)
self.assert_coverage_matches_comments(
"""
class A:
@classmethod
def bar(cls, x: int): # Covered
pass
"""
)
self.assert_coverage_matches_comments(
"""
def bar(self, x: int): # Covered
pass
"""
)
self.assert_coverage_matches_comments(
"""
class A:
@staticmethod
def bar(self, x: int) -> None: # Covered
pass
"""
)
self.assert_coverage_matches_comments(
"""
def foo(x): # Not covered
def bar(x): # Not covered
return x
return bar
class A:
@foo(42)
def baz(self): ... # Not covered
"""
)
self.assert_coverage_matches_comments(
"""
def foo(x: str) -> str: # Covered
return x
"""
)
self.assert_coverage_matches_comments(
"""
class Test:
def foo(self, input: str) -> None: # Covered
class Foo:
pass
pass
def bar(self, input: str) -> None: # Covered
pass
"""
)
# Ensure globals and attributes with literal values are considered annotated.
self.assert_coverage_matches_comments(
"""
x: int = 1 # Covered
y = 2 # Covered
z = foo # Covered
class Foo:
x = 1 # Covered
y = foo # Covered
"""
)
self.assert_coverage_matches_comments(
"""
def a_very_long_function_name(
parameter_1: int, # Covered
parameter_2, # Not covered
parameter_3: str, # Covered
): # Not covered
pass
"""
)
| 30.321608 | 87 | 0.448956 |
import textwrap
import unittest
from typing import List
import libcst as cst
from ..commands.coverage import _collect_coverage
class CoverageTest(unittest.TestCase):
def assert_coverage_equal(
self,
file_content: str,
expected_covered: List[int],
expected_uncovered: List[int],
) -> None:
module = cst.parse_module(textwrap.dedent(file_content).strip())
actual_coverage = _collect_coverage({"test.py": module})[0]
self.assertEqual(
expected_covered, actual_coverage.covered_lines, "Covered mismatch"
)
self.assertEqual(
expected_uncovered, actual_coverage.uncovered_lines, "Not covered mismatch"
)
def assert_coverage_matches_comments(self, file_content: str) -> None:
file_content = textwrap.dedent(file_content).strip()
lines = file_content.split("\n")
expected_covered = []
expected_uncovered = []
for line_number, line in enumerate(lines):
if line.lower().endswith("# covered"):
expected_covered.append(line_number)
elif line.lower().endswith("# not covered"):
expected_uncovered.append(line_number)
self.assert_coverage_equal(file_content, expected_covered, expected_uncovered)
def test_coverage_covered(self) -> None:
self.assert_coverage_equal(
"""
def foo() -> int:
return 5
""",
expected_covered=[0],
expected_uncovered=[],
)
def test_coverage_uncovered(self) -> None:
self.assert_coverage_equal(
"""
def foo():
return 5
""",
expected_covered=[],
expected_uncovered=[0],
)
def test_coverage_details(self) -> None:
self.assert_coverage_matches_comments(
"""
def foo(x) -> int: # Covered
pass
"""
)
self.assert_coverage_matches_comments(
"""
def bar(x: int, # Covered
y): # Not Covered
pass
"""
)
self.assert_coverage_matches_comments(
"""
a = foo() # Covered
b: int = bar() # Covered
"""
)
self.assert_coverage_matches_comments(
"""
class A:
a: int = 100 # Covered
b = "" # Covered
"""
)
self.assert_coverage_matches_comments(
"""
def foo() -> None: # Covered
a: int = 100
"""
)
self.assert_coverage_matches_comments(
"""
def foo(): # Not covered
a: int = 100
"""
)
self.assert_coverage_matches_comments(
"""
def foo(): # Not covered
def bar(x: int) -> int: # Covered
pass
"""
)
self.assert_coverage_matches_comments(
"""
class A:
def bar(self, x: int): # Covered
pass
"""
)
self.assert_coverage_matches_comments(
"""
class A:
def bar(this, x: int) -> None: # Covered
pass
"""
)
self.assert_coverage_matches_comments(
"""
class A:
@classmethod
def bar(cls, x: int): # Covered
pass
"""
)
self.assert_coverage_matches_comments(
"""
def bar(self, x: int): # Covered
pass
"""
)
self.assert_coverage_matches_comments(
"""
class A:
@staticmethod
def bar(self, x: int) -> None: # Covered
pass
"""
)
self.assert_coverage_matches_comments(
"""
def foo(x): # Not covered
def bar(x): # Not covered
return x
return bar
class A:
@foo(42)
def baz(self): ... # Not covered
"""
)
self.assert_coverage_matches_comments(
"""
def foo(x: str) -> str: # Covered
return x
"""
)
self.assert_coverage_matches_comments(
"""
class Test:
def foo(self, input: str) -> None: # Covered
class Foo:
pass
pass
def bar(self, input: str) -> None: # Covered
pass
"""
)
# Ensure globals and attributes with literal values are considered annotated.
self.assert_coverage_matches_comments(
"""
x: int = 1 # Covered
y = 2 # Covered
z = foo # Covered
class Foo:
x = 1 # Covered
y = foo # Covered
"""
)
self.assert_coverage_matches_comments(
"""
def a_very_long_function_name(
parameter_1: int, # Covered
parameter_2, # Not covered
parameter_3: str, # Covered
): # Not covered
pass
"""
)
| true | true |
1c3cfe3579a1bc1564e265b4ea7718ba150055d8 | 17,473 | py | Python | third_party/blink/renderer/build/scripts/core/css/css_properties.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | third_party/blink/renderer/build/scripts/core/css/css_properties.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | third_party/blink/renderer/build/scripts/core/css/css_properties.py | sarang-apps/darshan_browser | 173649bb8a7c656dc60784d19e7bb73e07c20daa | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from blinkbuild.name_style_converter import NameStyleConverter
from core.css.field_alias_expander import FieldAliasExpander
import json5_generator
from make_origin_trials import OriginTrialsWriter
from name_utilities import enum_key_for_css_property, id_for_css_property
from name_utilities import enum_key_for_css_property_alias, id_for_css_property_alias
# These values are converted using CSSPrimitiveValue in the setter function,
# if applicable.
PRIMITIVE_TYPES = [
'short', 'unsigned short', 'int', 'unsigned int', 'unsigned', 'float',
'LineClampValue'
]
# Check properties parameters are valid.
# TODO(jiameng): add more flag checks later.
def check_property_parameters(property_to_check):
# Only longhand properties can be interpolable.
if property_to_check['longhands']:
assert not(property_to_check['interpolable']), \
'Shorthand property (' + property_to_check['name'] + ') ' \
'cannot be interpolable'
if property_to_check['longhands']:
assert 'parseSingleValue' not in property_to_check['property_methods'], \
'Shorthand property (' + property_to_check['name'] + ') ' \
'should not implement parseSingleValue'
else:
assert 'parseShorthand' not in property_to_check['property_methods'], \
'Longhand property (' + property_to_check['name'] + ') ' \
'should not implement parseShorthand'
assert property_to_check['is_descriptor'] or \
property_to_check['is_property'], \
'{} must be a property, descriptor, or both'.format(
property_to_check['name'])
if property_to_check['field_template'] is not None:
assert not property_to_check['longhands'], \
"Shorthand '{}' cannot have a field_template.".format(
property_to_check['name'])
if property_to_check['mutable']:
assert property_to_check['field_template'] == 'monotonic_flag', \
'mutable keyword only implemented for monotonic_flag'
if property_to_check['alias_for']:
assert not property_to_check['is_internal'], \
'Internal aliases is not supported'
if property_to_check['valid_for_first_letter']:
assert not property_to_check['longhands'], \
'Shorthand %s should not be marked as valid_for_first_letter' % \
property_to_check['name']
if property_to_check['valid_for_cue']:
assert not property_to_check['longhands'], \
'Shorthand %s should not be marked as valid_for_cue' % \
property_to_check['name']
if property_to_check['valid_for_marker']:
assert not property_to_check['longhands'], \
'Shorthand %s should not be marked as valid_for_marker' % \
property_to_check['name']
class CSSProperties(object):
def __init__(self, file_paths):
assert len(file_paths) >= 3, \
"CSSProperties at least needs both css_properties.json5, \
computed_style_field_aliases.json5 and \
runtime_enabled_features.json5 to function"
# computed_style_field_aliases.json5. Used to expand out parameters used
# in the various generators for ComputedStyle.
self._field_alias_expander = FieldAliasExpander(file_paths[1])
self._alias_offset = 1024
# 0: CSSPropertyID::kInvalid
# 1: CSSPropertyID::kVariable
self._first_enum_value = 2
self._last_used_enum_value = self._first_enum_value
self._properties_by_id = {}
self._properties_by_name = {}
self._aliases = []
self._longhands = []
self._shorthands = []
self._properties_including_aliases = []
# Add default data in css_properties.json5. This must be consistent
# across instantiations of this class.
css_properties_file = json5_generator.Json5File.load_from_files(
[file_paths[0]])
self._default_parameters = css_properties_file.parameters
# Map of feature name -> origin trial feature name
origin_trial_features = {}
# TODO(crbug/1031309): Refactor OriginTrialsWriter to reuse logic here.
origin_trials_writer = OriginTrialsWriter([file_paths[2]], "")
for feature in origin_trials_writer.origin_trial_features:
origin_trial_features[str(feature['name'])] = True
self.add_properties(css_properties_file.name_dictionaries,
origin_trial_features)
assert self._first_enum_value + len(self._properties_by_id) < \
self._alias_offset, \
'Property aliasing expects fewer than %d properties.' % \
self._alias_offset
self._last_unresolved_property_id = max(
property_["enum_value"] for property_ in self._aliases)
# Process extra files passed in.
self._extra_fields = []
for i in range(3, len(file_paths)):
fields = json5_generator.Json5File.load_from_files(
[file_paths[i]], default_parameters=self._default_parameters)
self._extra_fields.extend(fields.name_dictionaries)
for field in self._extra_fields:
self.expand_parameters(field)
def add_properties(self, properties, origin_trial_features):
for property_ in properties:
self._properties_by_name[property_['name'].original] = property_
for property_ in properties:
self.expand_visited(property_)
property_['in_origin_trial'] = False
self.expand_origin_trials(property_, origin_trial_features)
self.expand_surrogate(property_)
self._aliases = [
property_ for property_ in properties if property_['alias_for']
]
self._shorthands = [
property_ for property_ in properties if property_['longhands']
]
self._longhands = [
property_ for property_ in properties
if (not property_['alias_for'] and not property_['longhands'])
]
# Sort the properties by priority, then alphabetically. Ensure that
# the resulting order is deterministic.
# Sort properties by priority, then alphabetically.
for property_ in self._longhands + self._shorthands:
self.expand_parameters(property_)
check_property_parameters(property_)
# This order must match the order in CSSPropertyPriority.h.
priority_numbers = {'Animation': 0, 'High': 1, 'Low': 2}
priority = priority_numbers[property_['priority']]
name_without_leading_dash = property_['name'].original
if name_without_leading_dash.startswith('-'):
name_without_leading_dash = name_without_leading_dash[1:]
property_['sorting_key'] = (priority, name_without_leading_dash)
sorting_keys = {}
for property_ in self._longhands + self._shorthands:
key = property_['sorting_key']
assert key not in sorting_keys, \
('Collision detected - two properties have the same name and '
'priority, a potentially non-deterministic ordering can '
'occur: {}, {} and {}'.format(
key, property_['name'].original, sorting_keys[key]))
sorting_keys[key] = property_['name'].original
self._longhands.sort(key=lambda p: p['sorting_key'])
self._shorthands.sort(key=lambda p: p['sorting_key'])
# The sorted index becomes the CSSPropertyID enum value.
for property_ in self._longhands + self._shorthands:
property_['enum_value'] = self._last_used_enum_value
self._last_used_enum_value += 1
# Add the new property into the map of properties.
assert property_['property_id'] not in self._properties_by_id, \
('property with ID {} appears more than once in the '
'properties list'.format(property_['property_id']))
self._properties_by_id[property_['property_id']] = property_
self.expand_aliases()
self._properties_including_aliases = self._longhands + \
self._shorthands + self._aliases
def expand_origin_trials(self, property_, origin_trial_features):
if not property_['runtime_flag']:
return
if property_['runtime_flag'] in origin_trial_features:
property_['in_origin_trial'] = True
def expand_visited(self, property_):
if not property_['visited_property_for']:
return
visited_property_for = property_['visited_property_for']
unvisited_property = self._properties_by_name[visited_property_for]
property_['visited'] = True
# The visited property needs a link to the unvisited counterpart.
property_['unvisited_property'] = unvisited_property
# The unvisited property needs a link to the visited counterpart.
assert 'visited_property' not in unvisited_property, \
'A property may not have multiple visited properties'
unvisited_property['visited_property'] = property_
def expand_surrogate(self, property_):
if not property_['surrogate_for']:
return
assert property_['surrogate_for'] in self._properties_by_name, \
'surrogate_for must name a property'
# Upgrade 'surrogate_for' to property reference.
property_['surrogate_for'] = self._properties_by_name[
property_['surrogate_for']]
def expand_aliases(self):
for i, alias in enumerate(self._aliases):
assert not alias['runtime_flag'], \
"Property '{}' is an alias with a runtime_flag, "\
"but runtime flags do not currently work for aliases.".format(
alias['name'])
aliased_property = self._properties_by_id[id_for_css_property(
alias['alias_for'])]
aliased_property.setdefault('aliases', [])
aliased_property['aliases'].append(alias['name'].original)
updated_alias = aliased_property.copy()
updated_alias['name'] = alias['name']
updated_alias['alias_for'] = alias['alias_for']
updated_alias['aliased_property'] = aliased_property[
'name'].to_upper_camel_case()
updated_alias['property_id'] = id_for_css_property_alias(
alias['name'])
updated_alias['enum_key'] = enum_key_for_css_property_alias(
alias['name'])
updated_alias['enum_value'] = aliased_property['enum_value'] + \
self._alias_offset
updated_alias['superclass'] = 'CSSUnresolvedProperty'
updated_alias['namespace_group'] = \
'Shorthand' if aliased_property['longhands'] else 'Longhand'
self._aliases[i] = updated_alias
def expand_parameters(self, property_):
def set_if_none(property_, key, value):
if key not in property_ or property_[key] is None:
property_[key] = value
# Basic info.
name = property_['name']
property_['property_id'] = id_for_css_property(name)
property_['enum_key'] = enum_key_for_css_property(name)
property_['is_internal'] = name.original.startswith('-internal-')
method_name = property_['name_for_methods']
if not method_name:
method_name = name.to_upper_camel_case().replace('Webkit', '')
set_if_none(property_, 'inherited', False)
set_if_none(property_, 'affected_by_forced_colors', False)
# Initial function, Getters and Setters for ComputedStyle.
set_if_none(property_, 'initial', 'Initial' + method_name)
simple_type_name = str(property_['type_name']).split('::')[-1]
set_if_none(property_, 'name_for_methods', method_name)
set_if_none(property_, 'type_name', 'E' + method_name)
set_if_none(
property_, 'getter', method_name
if simple_type_name != method_name else 'Get' + method_name)
set_if_none(property_, 'setter', 'Set' + method_name)
if property_['inherited']:
property_['is_inherited_setter'] = (
'Set' + method_name + 'IsInherited')
property_['is_animation_property'] = (
property_['priority'] == 'Animation')
# Figure out whether this property should have style builders at all.
# E.g. shorthands do not get style builders.
property_['style_builder_declare'] = (property_['is_property']
and not property_['longhands'])
# Figure out whether we should generate style builder implementations.
for x in ['initial', 'inherit', 'value']:
suppressed = x in property_['style_builder_custom_functions']
declared = property_['style_builder_declare']
property_['style_builder_generate_%s' % x] = (declared
and not suppressed)
# Expand StyleBuilderConverter params where necessary.
if property_['type_name'] in PRIMITIVE_TYPES:
set_if_none(property_, 'converter', 'CSSPrimitiveValue')
else:
set_if_none(property_, 'converter', 'CSSIdentifierValue')
assert not property_['alias_for'], \
'Use expand_aliases to expand aliases'
if not property_['longhands']:
property_['superclass'] = 'Longhand'
property_['namespace_group'] = 'Longhand'
elif property_['longhands']:
property_['superclass'] = 'Shorthand'
property_['namespace_group'] = 'Shorthand'
# Expand out field templates.
if property_['field_template']:
self._field_alias_expander.expand_field_alias(property_)
type_name = property_['type_name']
if (property_['field_template'] == 'keyword'
or property_['field_template'] == 'multi_keyword'):
default_value = (type_name + '::' + NameStyleConverter(
property_['default_value']).to_enum_value())
elif (property_['field_template'] == 'external'
or property_['field_template'] == 'primitive'
or property_['field_template'] == 'pointer'):
default_value = property_['default_value']
else:
assert property_['field_template'] == 'monotonic_flag', \
"Please put a valid value for field_template; got " + \
str(property_['field_template'])
property_['type_name'] = 'bool'
default_value = 'false'
property_['default_value'] = default_value
property_['unwrapped_type_name'] = property_['type_name']
if property_['wrapper_pointer_name']:
assert property_['field_template'] in ['pointer', 'external']
if property_['field_template'] == 'external':
property_['type_name'] = '{}<{}>'.format(
property_['wrapper_pointer_name'], type_name)
# Default values for extra parameters in computed_style_extra_fields.json5.
set_if_none(property_, 'custom_copy', False)
set_if_none(property_, 'custom_compare', False)
set_if_none(property_, 'mutable', False)
if property_['direction_aware_options']:
if not property_['style_builder_template']:
property_['style_builder_template'] = 'direction_aware'
options = property_['direction_aware_options']
assert 'resolver' in options, 'resolver option is required'
assert 'physical_group' in options, 'physical_group option is required'
options['resolver_name'] = NameStyleConverter(options['resolver'])
options['physical_group_name'] = NameStyleConverter(
options['physical_group'])
@property
def default_parameters(self):
return self._default_parameters
@property
def aliases(self):
return self._aliases
@property
def shorthands(self):
return self._shorthands
@property
def shorthands_including_aliases(self):
return self._shorthands + [x for x in self._aliases if x['longhands']]
@property
def longhands(self):
return self._longhands
@property
def longhands_including_aliases(self):
return self._longhands + [
x for x in self._aliases if not x['longhands']
]
@property
def properties_by_id(self):
return self._properties_by_id
@property
def properties_including_aliases(self):
return self._properties_including_aliases
@property
def first_property_id(self):
return self._first_enum_value
@property
def last_property_id(self):
return self._first_enum_value + len(self._properties_by_id) - 1
@property
def last_unresolved_property_id(self):
return self._last_unresolved_property_id
@property
def alias_offset(self):
return self._alias_offset
@property
def extra_fields(self):
return self._extra_fields
| 44.917738 | 85 | 0.644366 |
from blinkbuild.name_style_converter import NameStyleConverter
from core.css.field_alias_expander import FieldAliasExpander
import json5_generator
from make_origin_trials import OriginTrialsWriter
from name_utilities import enum_key_for_css_property, id_for_css_property
from name_utilities import enum_key_for_css_property_alias, id_for_css_property_alias
PRIMITIVE_TYPES = [
'short', 'unsigned short', 'int', 'unsigned int', 'unsigned', 'float',
'LineClampValue'
]
def check_property_parameters(property_to_check):
if property_to_check['longhands']:
assert not(property_to_check['interpolable']), \
'Shorthand property (' + property_to_check['name'] + ') ' \
'cannot be interpolable'
if property_to_check['longhands']:
assert 'parseSingleValue' not in property_to_check['property_methods'], \
'Shorthand property (' + property_to_check['name'] + ') ' \
'should not implement parseSingleValue'
else:
assert 'parseShorthand' not in property_to_check['property_methods'], \
'Longhand property (' + property_to_check['name'] + ') ' \
'should not implement parseShorthand'
assert property_to_check['is_descriptor'] or \
property_to_check['is_property'], \
'{} must be a property, descriptor, or both'.format(
property_to_check['name'])
if property_to_check['field_template'] is not None:
assert not property_to_check['longhands'], \
"Shorthand '{}' cannot have a field_template.".format(
property_to_check['name'])
if property_to_check['mutable']:
assert property_to_check['field_template'] == 'monotonic_flag', \
'mutable keyword only implemented for monotonic_flag'
if property_to_check['alias_for']:
assert not property_to_check['is_internal'], \
'Internal aliases is not supported'
if property_to_check['valid_for_first_letter']:
assert not property_to_check['longhands'], \
'Shorthand %s should not be marked as valid_for_first_letter' % \
property_to_check['name']
if property_to_check['valid_for_cue']:
assert not property_to_check['longhands'], \
'Shorthand %s should not be marked as valid_for_cue' % \
property_to_check['name']
if property_to_check['valid_for_marker']:
assert not property_to_check['longhands'], \
'Shorthand %s should not be marked as valid_for_marker' % \
property_to_check['name']
class CSSProperties(object):
def __init__(self, file_paths):
assert len(file_paths) >= 3, \
"CSSProperties at least needs both css_properties.json5, \
computed_style_field_aliases.json5 and \
runtime_enabled_features.json5 to function"
self._field_alias_expander = FieldAliasExpander(file_paths[1])
self._alias_offset = 1024
self._first_enum_value = 2
self._last_used_enum_value = self._first_enum_value
self._properties_by_id = {}
self._properties_by_name = {}
self._aliases = []
self._longhands = []
self._shorthands = []
self._properties_including_aliases = []
css_properties_file = json5_generator.Json5File.load_from_files(
[file_paths[0]])
self._default_parameters = css_properties_file.parameters
origin_trial_features = {}
origin_trials_writer = OriginTrialsWriter([file_paths[2]], "")
for feature in origin_trials_writer.origin_trial_features:
origin_trial_features[str(feature['name'])] = True
self.add_properties(css_properties_file.name_dictionaries,
origin_trial_features)
assert self._first_enum_value + len(self._properties_by_id) < \
self._alias_offset, \
'Property aliasing expects fewer than %d properties.' % \
self._alias_offset
self._last_unresolved_property_id = max(
property_["enum_value"] for property_ in self._aliases)
self._extra_fields = []
for i in range(3, len(file_paths)):
fields = json5_generator.Json5File.load_from_files(
[file_paths[i]], default_parameters=self._default_parameters)
self._extra_fields.extend(fields.name_dictionaries)
for field in self._extra_fields:
self.expand_parameters(field)
def add_properties(self, properties, origin_trial_features):
for property_ in properties:
self._properties_by_name[property_['name'].original] = property_
for property_ in properties:
self.expand_visited(property_)
property_['in_origin_trial'] = False
self.expand_origin_trials(property_, origin_trial_features)
self.expand_surrogate(property_)
self._aliases = [
property_ for property_ in properties if property_['alias_for']
]
self._shorthands = [
property_ for property_ in properties if property_['longhands']
]
self._longhands = [
property_ for property_ in properties
if (not property_['alias_for'] and not property_['longhands'])
]
for property_ in self._longhands + self._shorthands:
self.expand_parameters(property_)
check_property_parameters(property_)
priority_numbers = {'Animation': 0, 'High': 1, 'Low': 2}
priority = priority_numbers[property_['priority']]
name_without_leading_dash = property_['name'].original
if name_without_leading_dash.startswith('-'):
name_without_leading_dash = name_without_leading_dash[1:]
property_['sorting_key'] = (priority, name_without_leading_dash)
sorting_keys = {}
for property_ in self._longhands + self._shorthands:
key = property_['sorting_key']
assert key not in sorting_keys, \
('Collision detected - two properties have the same name and '
'priority, a potentially non-deterministic ordering can '
'occur: {}, {} and {}'.format(
key, property_['name'].original, sorting_keys[key]))
sorting_keys[key] = property_['name'].original
self._longhands.sort(key=lambda p: p['sorting_key'])
self._shorthands.sort(key=lambda p: p['sorting_key'])
for property_ in self._longhands + self._shorthands:
property_['enum_value'] = self._last_used_enum_value
self._last_used_enum_value += 1
assert property_['property_id'] not in self._properties_by_id, \
('property with ID {} appears more than once in the '
'properties list'.format(property_['property_id']))
self._properties_by_id[property_['property_id']] = property_
self.expand_aliases()
self._properties_including_aliases = self._longhands + \
self._shorthands + self._aliases
def expand_origin_trials(self, property_, origin_trial_features):
if not property_['runtime_flag']:
return
if property_['runtime_flag'] in origin_trial_features:
property_['in_origin_trial'] = True
def expand_visited(self, property_):
if not property_['visited_property_for']:
return
visited_property_for = property_['visited_property_for']
unvisited_property = self._properties_by_name[visited_property_for]
property_['visited'] = True
property_['unvisited_property'] = unvisited_property
assert 'visited_property' not in unvisited_property, \
'A property may not have multiple visited properties'
unvisited_property['visited_property'] = property_
def expand_surrogate(self, property_):
if not property_['surrogate_for']:
return
assert property_['surrogate_for'] in self._properties_by_name, \
'surrogate_for must name a property'
property_['surrogate_for'] = self._properties_by_name[
property_['surrogate_for']]
def expand_aliases(self):
for i, alias in enumerate(self._aliases):
assert not alias['runtime_flag'], \
"Property '{}' is an alias with a runtime_flag, "\
"but runtime flags do not currently work for aliases.".format(
alias['name'])
aliased_property = self._properties_by_id[id_for_css_property(
alias['alias_for'])]
aliased_property.setdefault('aliases', [])
aliased_property['aliases'].append(alias['name'].original)
updated_alias = aliased_property.copy()
updated_alias['name'] = alias['name']
updated_alias['alias_for'] = alias['alias_for']
updated_alias['aliased_property'] = aliased_property[
'name'].to_upper_camel_case()
updated_alias['property_id'] = id_for_css_property_alias(
alias['name'])
updated_alias['enum_key'] = enum_key_for_css_property_alias(
alias['name'])
updated_alias['enum_value'] = aliased_property['enum_value'] + \
self._alias_offset
updated_alias['superclass'] = 'CSSUnresolvedProperty'
updated_alias['namespace_group'] = \
'Shorthand' if aliased_property['longhands'] else 'Longhand'
self._aliases[i] = updated_alias
def expand_parameters(self, property_):
def set_if_none(property_, key, value):
if key not in property_ or property_[key] is None:
property_[key] = value
name = property_['name']
property_['property_id'] = id_for_css_property(name)
property_['enum_key'] = enum_key_for_css_property(name)
property_['is_internal'] = name.original.startswith('-internal-')
method_name = property_['name_for_methods']
if not method_name:
method_name = name.to_upper_camel_case().replace('Webkit', '')
set_if_none(property_, 'inherited', False)
set_if_none(property_, 'affected_by_forced_colors', False)
set_if_none(property_, 'initial', 'Initial' + method_name)
simple_type_name = str(property_['type_name']).split('::')[-1]
set_if_none(property_, 'name_for_methods', method_name)
set_if_none(property_, 'type_name', 'E' + method_name)
set_if_none(
property_, 'getter', method_name
if simple_type_name != method_name else 'Get' + method_name)
set_if_none(property_, 'setter', 'Set' + method_name)
if property_['inherited']:
property_['is_inherited_setter'] = (
'Set' + method_name + 'IsInherited')
property_['is_animation_property'] = (
property_['priority'] == 'Animation')
property_['style_builder_declare'] = (property_['is_property']
and not property_['longhands'])
for x in ['initial', 'inherit', 'value']:
suppressed = x in property_['style_builder_custom_functions']
declared = property_['style_builder_declare']
property_['style_builder_generate_%s' % x] = (declared
and not suppressed)
if property_['type_name'] in PRIMITIVE_TYPES:
set_if_none(property_, 'converter', 'CSSPrimitiveValue')
else:
set_if_none(property_, 'converter', 'CSSIdentifierValue')
assert not property_['alias_for'], \
'Use expand_aliases to expand aliases'
if not property_['longhands']:
property_['superclass'] = 'Longhand'
property_['namespace_group'] = 'Longhand'
elif property_['longhands']:
property_['superclass'] = 'Shorthand'
property_['namespace_group'] = 'Shorthand'
if property_['field_template']:
self._field_alias_expander.expand_field_alias(property_)
type_name = property_['type_name']
if (property_['field_template'] == 'keyword'
or property_['field_template'] == 'multi_keyword'):
default_value = (type_name + '::' + NameStyleConverter(
property_['default_value']).to_enum_value())
elif (property_['field_template'] == 'external'
or property_['field_template'] == 'primitive'
or property_['field_template'] == 'pointer'):
default_value = property_['default_value']
else:
assert property_['field_template'] == 'monotonic_flag', \
"Please put a valid value for field_template; got " + \
str(property_['field_template'])
property_['type_name'] = 'bool'
default_value = 'false'
property_['default_value'] = default_value
property_['unwrapped_type_name'] = property_['type_name']
if property_['wrapper_pointer_name']:
assert property_['field_template'] in ['pointer', 'external']
if property_['field_template'] == 'external':
property_['type_name'] = '{}<{}>'.format(
property_['wrapper_pointer_name'], type_name)
set_if_none(property_, 'custom_copy', False)
set_if_none(property_, 'custom_compare', False)
set_if_none(property_, 'mutable', False)
if property_['direction_aware_options']:
if not property_['style_builder_template']:
property_['style_builder_template'] = 'direction_aware'
options = property_['direction_aware_options']
assert 'resolver' in options, 'resolver option is required'
assert 'physical_group' in options, 'physical_group option is required'
options['resolver_name'] = NameStyleConverter(options['resolver'])
options['physical_group_name'] = NameStyleConverter(
options['physical_group'])
@property
def default_parameters(self):
return self._default_parameters
@property
def aliases(self):
return self._aliases
@property
def shorthands(self):
return self._shorthands
@property
def shorthands_including_aliases(self):
return self._shorthands + [x for x in self._aliases if x['longhands']]
@property
def longhands(self):
return self._longhands
@property
def longhands_including_aliases(self):
return self._longhands + [
x for x in self._aliases if not x['longhands']
]
@property
def properties_by_id(self):
return self._properties_by_id
@property
def properties_including_aliases(self):
return self._properties_including_aliases
@property
def first_property_id(self):
return self._first_enum_value
@property
def last_property_id(self):
return self._first_enum_value + len(self._properties_by_id) - 1
@property
def last_unresolved_property_id(self):
return self._last_unresolved_property_id
@property
def alias_offset(self):
return self._alias_offset
@property
def extra_fields(self):
return self._extra_fields
| true | true |
1c3cfe4d91f50f541d857bc7fa981149bea1a637 | 35,461 | py | Python | c7n/resources/account.py | rentacenter/cloud-custodian | 8d581193f851be3e9d988f2ace2cb4061a13417a | [
"Apache-2.0"
] | null | null | null | c7n/resources/account.py | rentacenter/cloud-custodian | 8d581193f851be3e9d988f2ace2cb4061a13417a | [
"Apache-2.0"
] | null | null | null | c7n/resources/account.py | rentacenter/cloud-custodian | 8d581193f851be3e9d988f2ace2cb4061a13417a | [
"Apache-2.0"
] | 1 | 2021-02-16T21:24:09.000Z | 2021-02-16T21:24:09.000Z | # Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AWS Account as a custodian resource.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from botocore.exceptions import ClientError
from datetime import datetime, timedelta
from dateutil.parser import parse as parse_date
from dateutil.tz import tzutc
from c7n.actions import ActionRegistry, BaseAction
from c7n.filters import Filter, FilterRegistry, ValueFilter, FilterValidationError
from c7n.manager import ResourceManager, resources
from c7n.utils import local_session, type_schema
from c7n.resources.iam import CredentialReport
filters = FilterRegistry('aws.account.actions')
actions = ActionRegistry('aws.account.filters')
def get_account(session_factory, config):
session = local_session(session_factory)
client = session.client('iam')
aliases = client.list_account_aliases().get(
'AccountAliases', ('',))
name = aliases and aliases[0] or ""
return {'account_id': config.account_id,
'account_name': name}
@resources.register('account')
class Account(ResourceManager):
filter_registry = filters
action_registry = actions
class resource_type(object):
id = 'account_id'
name = 'account_name'
filter_name = None
@classmethod
def get_permissions(cls):
return ('iam:ListAccountAliases',)
def get_model(self):
return self.resource_type
def resources(self):
return self.filter_resources([get_account(self.session_factory, self.config)])
def get_resources(self, resource_ids):
return [get_account(self.session_factory, self.config)]
@filters.register('credential')
class AccountCredentialReport(CredentialReport):
def process(self, resources, event=None):
super(AccountCredentialReport, self).process(resources, event)
report = self.get_credential_report()
if report is None:
return []
results = []
info = report.get('<root_account>')
for r in resources:
if self.match(info):
r['c7n:credential-report'] = info
results.append(r)
return results
@filters.register('check-cloudtrail')
class CloudTrailEnabled(Filter):
"""Verify cloud trail enabled for this account per specifications.
Returns an annotated account resource if trail is not enabled.
Of particular note, the current-region option will evaluate whether cloudtrail is available
in the current region, either as a multi region trail or as a trail with it as the home region.
:example:
.. code-block: yaml
policies:
- name: account-cloudtrail-enabled
resource: account
region: us-east-1
filters:
- type: check-cloudtrail
global-events: true
multi-region: true
running: true
"""
schema = type_schema(
'check-cloudtrail',
**{'multi-region': {'type': 'boolean'},
'global-events': {'type': 'boolean'},
'current-region': {'type': 'boolean'},
'running': {'type': 'boolean'},
'notifies': {'type': 'boolean'},
'file-digest': {'type': 'boolean'},
'kms': {'type': 'boolean'},
'kms-key': {'type': 'string'}})
permissions = ('cloudtrail:DescribeTrails', 'cloudtrail:GetTrailStatus')
def process(self, resources, event=None):
session = local_session(self.manager.session_factory)
client = session.client('cloudtrail')
trails = client.describe_trails()['trailList']
resources[0]['c7n:cloudtrails'] = trails
if self.data.get('global-events'):
trails = [t for t in trails if t.get('IncludeGlobalServiceEvents')]
if self.data.get('current-region'):
current_region = session.region_name
trails = [t for t in trails if t.get(
'HomeRegion') == current_region or t.get('IsMultiRegionTrail')]
if self.data.get('kms'):
trails = [t for t in trails if t.get('KmsKeyId')]
if self.data.get('kms-key'):
trails = [t for t in trails
if t.get('KmsKeyId', '') == self.data['kms-key']]
if self.data.get('file-digest'):
trails = [t for t in trails
if t.get('LogFileValidationEnabled')]
if self.data.get('multi-region'):
trails = [t for t in trails if t.get('IsMultiRegionTrail')]
if self.data.get('notifies'):
trails = [t for t in trails if t.get('SNSTopicArn')]
if self.data.get('running', True):
running = []
for t in list(trails):
t['Status'] = status = client.get_trail_status(
Name=t['TrailARN'])
if status['IsLogging'] and not status.get(
'LatestDeliveryError'):
running.append(t)
trails = running
if trails:
return []
return resources
@filters.register('check-config')
class ConfigEnabled(Filter):
"""Is config service enabled for this account
:example:
.. code-block: yaml
policies:
- name: account-check-config-services
resource: account
region: us-east-1
filters:
- type: check-config
all-resources: true
global-resources: true
running: true
"""
schema = type_schema(
'check-config', **{
'all-resources': {'type': 'boolean'},
'running': {'type': 'boolean'},
'global-resources': {'type': 'boolean'}})
permissions = ('config:DescribeDeliveryChannels',
'config:DescribeConfigurationRecorders',
'config:DescribeConfigurationRecorderStatus')
def process(self, resources, event=None):
client = local_session(
self.manager.session_factory).client('config')
channels = client.describe_delivery_channels()[
'DeliveryChannels']
recorders = client.describe_configuration_recorders()[
'ConfigurationRecorders']
resources[0]['c7n:config_recorders'] = recorders
resources[0]['c7n:config_channels'] = channels
if self.data.get('global-resources'):
recorders = [
r for r in recorders
if r['recordingGroup'].get('includeGlobalResourceTypes')]
if self.data.get('all-resources'):
recorders = [r for r in recorders
if r['recordingGroup'].get('allSupported')]
if self.data.get('running', True) and recorders:
status = {s['name']: s for
s in client.describe_configuration_recorder_status(
)['ConfigurationRecordersStatus']}
resources[0]['c7n:config_status'] = status
recorders = [r for r in recorders if status[r['name']]['recording'] and
status[r['name']]['lastStatus'].lower() in ('pending', 'success')]
if channels and recorders:
return []
return resources
@filters.register('iam-summary')
class IAMSummary(ValueFilter):
"""Return annotated account resource if iam summary filter matches.
Some use cases include, detecting root api keys or mfa usage.
Example iam summary wrt to matchable fields::
{
"AccessKeysPerUserQuota": 2,
"AccountAccessKeysPresent": 0,
"AccountMFAEnabled": 1,
"AccountSigningCertificatesPresent": 0,
"AssumeRolePolicySizeQuota": 2048,
"AttachedPoliciesPerGroupQuota": 10,
"AttachedPoliciesPerRoleQuota": 10,
"AttachedPoliciesPerUserQuota": 10,
"GroupPolicySizeQuota": 5120,
"Groups": 1,
"GroupsPerUserQuota": 10,
"GroupsQuota": 100,
"InstanceProfiles": 0,
"InstanceProfilesQuota": 100,
"MFADevices": 3,
"MFADevicesInUse": 2,
"Policies": 3,
"PoliciesQuota": 1000,
"PolicySizeQuota": 5120,
"PolicyVersionsInUse": 5,
"PolicyVersionsInUseQuota": 10000,
"Providers": 0,
"RolePolicySizeQuota": 10240,
"Roles": 4,
"RolesQuota": 250,
"ServerCertificates": 0,
"ServerCertificatesQuota": 20,
"SigningCertificatesPerUserQuota": 2,
"UserPolicySizeQuota": 2048,
"Users": 5,
"UsersQuota": 5000,
"VersionsPerPolicyQuota": 5,
}
For example to determine if an account has either not been
enabled with root mfa or has root api keys.
.. code-block: yaml
policies:
- name: root-keys-or-no-mfa
resource: account
filters:
- type: iam-summary
key: AccountMFAEnabled
value: true
op: eq
value_type: swap
"""
schema = type_schema('iam-summary', rinherit=ValueFilter.schema)
permissions = ('iam:GetAccountSummary',)
def process(self, resources, event=None):
if not resources[0].get('c7n:iam_summary'):
client = local_session(
self.manager.session_factory).client('iam')
resources[0]['c7n:iam_summary'] = client.get_account_summary(
)['SummaryMap']
if self.match(resources[0]['c7n:iam_summary']):
return resources
return []
@filters.register('password-policy')
class AccountPasswordPolicy(ValueFilter):
"""Check an account's password policy.
Note that on top of the default password policy fields, we also add an extra key,
PasswordPolicyConfigured which will be set to true or false to signify if the given
account has attempted to set a policy at all.
:example:
.. code-block: yaml
policies:
- name: password-policy-check
resource: account
region: us-east-1
filters:
- type: password-policy
key: MinimumPasswordLength
value: 10
op: ge
- type: password-policy
key: RequireSymbols
value: true
"""
schema = type_schema('password-policy', rinherit=ValueFilter.schema)
permissions = ('iam:GetAccountPasswordPolicy',)
def process(self, resources, event=None):
account = resources[0]
if not account.get('c7n:password_policy'):
client = local_session(self.manager.session_factory).client('iam')
policy = {}
try:
policy = client.get_account_password_policy().get('PasswordPolicy', {})
policy['PasswordPolicyConfigured'] = True
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
policy['PasswordPolicyConfigured'] = False
else:
raise
account['c7n:password_policy'] = policy
if self.match(account['c7n:password_policy']):
return resources
return []
@filters.register('service-limit')
class ServiceLimit(Filter):
"""Check if account's service limits are past a given threshold.
Supported limits are per trusted advisor, which is variable based
on usage in the account and support level enabled on the account.
- service: AutoScaling limit: Auto Scaling groups
- service: AutoScaling limit: Launch configurations
- service: EBS limit: Active snapshots
- service: EBS limit: Active volumes
- service: EBS limit: General Purpose (SSD) volume storage (GiB)
- service: EBS limit: Magnetic volume storage (GiB)
- service: EBS limit: Provisioned IOPS
- service: EBS limit: Provisioned IOPS (SSD) storage (GiB)
- service: EC2 limit: Elastic IP addresses (EIPs)
# Note this is extant for each active instance type in the account
# however the total value is against sum of all instance types.
# see issue https://github.com/capitalone/cloud-custodian/issues/516
- service: EC2 limit: On-Demand instances - m3.medium
- service: EC2 limit: Reserved Instances - purchase limit (monthly)
- service: ELB limit: Active load balancers
- service: IAM limit: Groups
- service: IAM limit: Instance profiles
- service: IAM limit: Roles
- service: IAM limit: Server certificates
- service: IAM limit: Users
- service: RDS limit: DB instances
- service: RDS limit: DB parameter groups
- service: RDS limit: DB security groups
- service: RDS limit: DB snapshots per user
- service: RDS limit: Storage quota (GB)
- service: RDS limit: Internet gateways
- service: SES limit: Daily sending quota
- service: VPC limit: VPCs
- service: VPC limit: VPC Elastic IP addresses (EIPs)
:example:
.. code-block: yaml
policies:
- name: account-service-limits
resource: account
filters:
- type: service-limit
services:
- EC2
threshold: 1.0
- name: specify-region-for-global-service
region: us-east-1
resource: account
filters:
- type: service-limit
services:
- IAM
limits:
- Roles
"""
schema = type_schema(
'service-limit',
threshold={'type': 'number'},
refresh_period={'type': 'integer'},
limits={'type': 'array', 'items': {'type': 'string'}},
services={'type': 'array', 'items': {
'enum': ['EC2', 'ELB', 'VPC', 'AutoScaling',
'RDS', 'EBS', 'SES', 'IAM']}})
permissions = ('support:DescribeTrustedAdvisorCheckResult',)
check_id = 'eW7HH0l7J9'
check_limit = ('region', 'service', 'check', 'limit', 'extant', 'color')
global_services = set(['IAM'])
def validate(self):
region = self.manager.data.get('region', '')
if len(self.global_services.intersection(self.data.get('services', []))):
if region != 'us-east-1':
raise FilterValidationError(
"Global services: %s must be targeted in us-east-1 on the policy"
% ', '.join(self.global_services))
return self
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client(
'support', region_name='us-east-1')
checks = client.describe_trusted_advisor_check_result(
checkId=self.check_id, language='en')['result']
region = self.manager.config.region
checks['flaggedResources'] = [r for r in checks['flaggedResources']
if r['metadata'][0] == region or (r['metadata'][0] == '-' and region == 'us-east-1')]
resources[0]['c7n:ServiceLimits'] = checks
delta = timedelta(self.data.get('refresh_period', 1))
check_date = parse_date(checks['timestamp'])
if datetime.now(tz=tzutc()) - delta > check_date:
client.refresh_trusted_advisor_check(checkId=self.check_id)
threshold = self.data.get('threshold')
services = self.data.get('services')
limits = self.data.get('limits')
exceeded = []
for resource in checks['flaggedResources']:
if threshold is None and resource['status'] == 'ok':
continue
limit = dict(zip(self.check_limit, resource['metadata']))
if services and limit['service'] not in services:
continue
if limits and limit['check'] not in limits:
continue
limit['status'] = resource['status']
limit['percentage'] = float(limit['extant'] or 0) / float(
limit['limit']) * 100
if threshold and limit['percentage'] < threshold:
continue
exceeded.append(limit)
if exceeded:
resources[0]['c7n:ServiceLimitsExceeded'] = exceeded
return resources
return []
@actions.register('request-limit-increase')
class RequestLimitIncrease(BaseAction):
r"""File support ticket to raise limit.
:Example:
.. code-block: yaml
policies:
- name: account-service-limits
resource: account
filters:
- type: service-limit
services:
- EBS
limits:
- Provisioned IOPS (SSD) storage (GiB)
threshold: 60.5
actions:
- type: request-limit-increase
notify: [email, email2]
## You can use one of either percent-increase or an amount-increase.
percent-increase: 50
message: "Please raise the below account limit(s); \n {limits}"
"""
schema = {
'type': 'object',
'notify': {'type': 'array'},
'properties': {
'type': {'enum': ['request-limit-increase']},
'percent-increase': {'type': 'number', 'minimum': 1},
'amount-increase': {'type': 'number', 'minimum': 1},
'subject': {'type': 'string'},
'message': {'type': 'string'},
'severity': {'type': 'string', 'enum': ['urgent', 'high', 'normal', 'low']}
},
'oneOf': [
{'required': ['type', 'percent-increase']},
{'required': ['type', 'amount-increase']}
]
}
permissions = ('support:CreateCase',)
default_subject = '[Account:{account}]Raise the following limit(s) of {service} in {region}'
default_template = 'Please raise the below account limit(s); \n {limits}'
default_severity = 'normal'
service_code_mapping = {
'AutoScaling': 'auto-scaling',
'ELB': 'elastic-load-balancing',
'EBS': 'amazon-elastic-block-store',
'EC2': 'amazon-elastic-compute-cloud-linux',
'RDS': 'amazon-relational-database-service-aurora',
'VPC': 'amazon-virtual-private-cloud',
}
def process(self, resources):
session = local_session(self.manager.session_factory)
client = session.client('support', region_name='us-east-1')
account_id = self.manager.config.account_id
service_map = {}
region_map = {}
limit_exceeded = resources[0].get('c7n:ServiceLimitsExceeded', [])
percent_increase = self.data.get('percent-increase')
amount_increase = self.data.get('amount-increase')
for s in limit_exceeded:
current_limit = int(s['limit'])
if percent_increase:
increase_by = current_limit * float(percent_increase) / 100
increase_by = max(increase_by, 1)
else:
increase_by = amount_increase
increase_by = round(increase_by)
msg = '\nIncrease %s by %d in %s \n\t Current Limit: %s\n\t Current Usage: %s\n\t ' \
'Set New Limit to: %d' % (
s['check'], increase_by, s['region'], s['limit'], s['extant'],
(current_limit + increase_by))
service_map.setdefault(s['service'], []).append(msg)
region_map.setdefault(s['service'], s['region'])
for service in service_map:
subject = self.data.get('subject', self.default_subject).format(
service=service, region=region_map[service], account=account_id)
service_code = self.service_code_mapping.get(service)
body = self.data.get('message', self.default_template)
body = body.format(**{
'service': service,
'limits': '\n\t'.join(service_map[service]),
})
client.create_case(
subject=subject,
communicationBody=body,
serviceCode=service_code,
categoryCode='general-guidance',
severityCode=self.data.get('severity', self.default_severity),
ccEmailAddresses=self.data.get('notify', []))
def cloudtrail_policy(original, bucket_name, account_id):
'''add CloudTrail permissions to an S3 policy, preserving existing'''
ct_actions = [
{
'Action': 's3:GetBucketAcl',
'Effect': 'Allow',
'Principal': {'Service': 'cloudtrail.amazonaws.com'},
'Resource': 'arn:aws:s3:::' + bucket_name,
'Sid': 'AWSCloudTrailAclCheck20150319',
},
{
'Action': 's3:PutObject',
'Condition': {
'StringEquals':
{'s3:x-amz-acl': 'bucket-owner-full-control'},
},
'Effect': 'Allow',
'Principal': {'Service': 'cloudtrail.amazonaws.com'},
'Resource': 'arn:aws:s3:::%s/AWSLogs/%s/*' % (
bucket_name, account_id
),
'Sid': 'AWSCloudTrailWrite20150319',
},
]
# parse original policy
if original is None:
policy = {
'Statement': [],
'Version': '2012-10-17',
}
else:
policy = json.loads(original['Policy'])
original_actions = [a.get('Action') for a in policy['Statement']]
for cta in ct_actions:
if cta['Action'] not in original_actions:
policy['Statement'].append(cta)
return json.dumps(policy)
@actions.register('enable-cloudtrail')
class EnableTrail(BaseAction):
"""Enables logging on the trail(s) named in the policy
:Example:
.. code-block: yaml
policies:
- name: trail-test
description: Ensure CloudTrail logging is enabled
resource: account
actions:
- type: enable-cloudtrail
trail: mytrail
bucket: trails
"""
permissions = (
'cloudtrail:CreateTrail',
'cloudtrail:DescribeTrails',
'cloudtrail:GetTrailStatus',
'cloudtrail:StartLogging',
'cloudtrail:UpdateTrail',
's3:CreateBucket',
's3:GetBucketPolicy',
's3:PutBucketPolicy',
)
schema = type_schema(
'enable-cloudtrail',
**{
'trail': {'type': 'string'},
'bucket': {'type': 'string'},
'bucket-region': {'type': 'string'},
'multi-region': {'type': 'boolean'},
'global-events': {'type': 'boolean'},
'notify': {'type': 'string'},
'file-digest': {'type': 'boolean'},
'kms': {'type': 'boolean'},
'kms-key': {'type': 'string'},
'required': ('bucket',),
}
)
def process(self, accounts):
"""Create or enable CloudTrail"""
session = local_session(self.manager.session_factory)
client = session.client('cloudtrail')
bucket_name = self.data['bucket']
bucket_region = self.data.get('bucket-region', 'us-east-1')
trail_name = self.data.get('trail', 'default-trail')
multi_region = self.data.get('multi-region', True)
global_events = self.data.get('global-events', True)
notify = self.data.get('notify', '')
file_digest = self.data.get('file-digest', False)
kms = self.data.get('kms', False)
kms_key = self.data.get('kms-key', '')
s3client = session.client('s3')
try:
s3client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={'LocationConstraint': bucket_region}
)
except ClientError as ce:
if not ('Error' in ce.response and
ce.response['Error']['Code'] == 'BucketAlreadyOwnedByYou'):
raise ce
try:
current_policy = s3client.get_bucket_policy(Bucket=bucket_name)
except ClientError:
current_policy = None
policy_json = cloudtrail_policy(
current_policy, bucket_name, self.manager.config.account_id)
s3client.put_bucket_policy(Bucket=bucket_name, Policy=policy_json)
trails = client.describe_trails().get('trailList', ())
if trail_name not in [t.get('Name') for t in trails]:
new_trail = client.create_trail(
Name=trail_name,
S3BucketName=bucket_name,
)
if new_trail:
trails.append(new_trail)
# the loop below will configure the new trail
for trail in trails:
if trail.get('Name') != trail_name:
continue
# enable
arn = trail['TrailARN']
status = client.get_trail_status(Name=arn)
if not status['IsLogging']:
client.start_logging(Name=arn)
# apply configuration changes (if any)
update_args = {}
if multi_region != trail.get('IsMultiRegionTrail'):
update_args['IsMultiRegionTrail'] = multi_region
if global_events != trail.get('IncludeGlobalServiceEvents'):
update_args['IncludeGlobalServiceEvents'] = global_events
if notify != trail.get('SNSTopicArn'):
update_args['SnsTopicName'] = notify
if file_digest != trail.get('LogFileValidationEnabled'):
update_args['EnableLogFileValidation'] = file_digest
if kms_key != trail.get('KmsKeyId'):
if not kms and 'KmsKeyId' in trail:
kms_key = ''
update_args['KmsKeyId'] = kms_key
if update_args:
update_args['Name'] = trail_name
client.update_trail(**update_args)
@filters.register('has-virtual-mfa')
class HasVirtualMFA(Filter):
"""Is the account configured with a virtual MFA device?
:example:
.. code-block: yaml
policies:
- name: account-with-virtual-mfa
resource: account
region: us-east-1
filters:
- type: has-virtual-mfa
value: true
"""
schema = type_schema('has-virtual-mfa', **{'value': {'type': 'boolean'}})
permissions = ('iam:ListVirtualMFADevices',)
def mfa_belongs_to_root_account(self, mfa):
return mfa['SerialNumber'].endswith(':mfa/root-account-mfa-device')
def account_has_virtual_mfa(self, account):
if not account.get('c7n:VirtualMFADevices'):
client = local_session(self.manager.session_factory).client('iam')
paginator = client.get_paginator('list_virtual_mfa_devices')
raw_list = paginator.paginate().build_full_result()['VirtualMFADevices']
account['c7n:VirtualMFADevices'] = list(filter(
self.mfa_belongs_to_root_account, raw_list))
expect_virtual_mfa = self.data.get('value', True)
has_virtual_mfa = any(account['c7n:VirtualMFADevices'])
return expect_virtual_mfa == has_virtual_mfa
def process(self, resources, event=None):
return list(filter(self.account_has_virtual_mfa, resources))
@actions.register('enable-data-events')
class EnableDataEvents(BaseAction):
"""Ensure all buckets in account are setup to log data events.
Note this works via a single trail for data events per
(https://goo.gl/1ux7RG).
This trail should NOT be used for api management events, the
configuration here is soley for data events. If directed to create
a trail this will do so without management events.
:example:
.. code-block: yaml
policies:
- name: s3-remove-owner-tag
resource: actions
actions:
- type: enable-data-events
data-trail:
name: s3-events
multi-region: us-east-1
"""
schema = type_schema(
'enable-data-events', required=['data-trail'], **{
'data-trail': {
'type': 'object',
'additionalProperties': False,
'required': ['name'],
'properties': {
'create': {
'title': 'Should we create trail if needed for events?',
'type': 'boolean'},
'type': {'enum': ['ReadOnly', 'WriteOnly', 'All']},
'name': {
'title': 'The name of the event trail',
'type': 'string'},
'topic': {
'title': 'If creating, the sns topic for the trail to send updates',
'type': 'string'},
's3-bucket': {
'title': 'If creating, the bucket to store trail event data',
'type': 'string'},
's3-prefix': {'type': 'string'},
'key-id': {
'title': 'If creating, Enable kms on the trail',
'type': 'string'},
# region that we're aggregating via trails.
'multi-region': {
'title': 'If creating, use this region for all data trails',
'type': 'string'}}}})
def validate(self):
if self.data['data-trail'].get('create'):
if 's3-bucket' not in self.data['data-trail']:
raise FilterValidationError(
"If creating data trails, an s3-bucket is required")
return self
def get_permissions(self):
perms = [
'cloudtrail:DescribeTrails',
'cloudtrail:GetEventSelectors',
'cloudtrail:PutEventSelectors']
if self.data.get('data-trail', {}).get('create'):
perms.extend([
'cloudtrail:CreateTrail', 'cloudtrail:StartLogging'])
return perms
def add_data_trail(self, client, trail_cfg):
if not trail_cfg.get('create'):
raise ValueError(
"s3 data event trail missing and not configured to create")
params = dict(
Name=trail_cfg['name'],
S3BucketName=trail_cfg['s3-bucket'],
EnableLogFileValidation=True)
if 'key-id' in trail_cfg:
params['KmsKeyId'] = trail_cfg['key-id']
if 's3-prefix' in trail_cfg:
params['S3KeyPrefix'] = trail_cfg['s3-prefix']
if 'topic' in trail_cfg:
params['SnsTopicName'] = trail_cfg['topic']
if 'multi-region' in trail_cfg:
params['IsMultiRegionTrail'] = True
client.create_trail(**params)
return {'Name': trail_cfg['name']}
def process(self, resources):
session = local_session(self.manager.session_factory)
region = self.data['data-trail'].get('multi-region')
if region:
client = session.client('cloudtrail', region_name=region)
else:
client = session.client('cloudtrail')
added = False
tconfig = self.data['data-trail']
trails = client.describe_trails(
trailNameList=[tconfig['name']]).get('trailList', ())
if not trails:
trail = self.add_data_trail(client, tconfig)
added = True
else:
trail = trails[0]
events = client.get_event_selectors(
TrailName=trail['Name']).get('EventSelectors', [])
for e in events:
found = False
if not e.get('DataResources'):
continue
for data_events in e['DataResources']:
if data_events['Type'] != 'AWS::S3::Object':
continue
for b in data_events['Values']:
if b.rsplit(':')[-1].strip('/') == '':
found = True
break
if found:
resources[0]['c7n_data_trail'] = trail
return
# Opinionated choice, separate api and data events.
event_count = len(events)
events = [e for e in events if not e.get('IncludeManagementEvents')]
if len(events) != event_count:
self.log.warning("removing api trail from data trail")
# future proof'd for other data events, for s3 this trail
# encompasses all the buckets in the account.
events.append({
'IncludeManagementEvents': False,
'ReadWriteType': tconfig.get('type', 'All'),
'DataResources': [{
'Type': 'AWS::S3::Object',
'Values': ['arn:aws:s3:::']}]})
client.put_event_selectors(
TrailName=trail['Name'],
EventSelectors=events)
if added:
client.start_logging(Name=tconfig['name'])
resources[0]['c7n_data_trail'] = trail
@filters.register('shield-enabled')
class ShieldEnabled(Filter):
permissions = ('shield:DescribeSubscription',)
schema = type_schema(
'shield-enabled',
state={'type': 'boolean'})
def process(self, resources, event=None):
state = self.data.get('state', False)
client = self.manager.session_factory().client('shield')
try:
subscription = client.describe_subscription().get(
'Subscription', None)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceNotFoundException':
raise
subscription = None
resources[0]['c7n:ShieldSubscription'] = subscription
if state and subscription:
return resources
elif not state and not subscription:
return resources
return []
@actions.register('set-shield-advanced')
class SetShieldAdvanced(BaseAction):
"""Enable/disable Shield Advanced on an account."""
permissions = (
'shield:CreateSubscription', 'shield:DeleteSubscription')
schema = type_schema(
'set-shield-advanced',
state={'type': 'boolean'})
def process(self, resources):
client = self.manager.session_factory().client('shield')
state = self.data.get('state', True)
if state:
client.create_subscription()
else:
try:
client.delete_subscription()
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
return
raise
| 36.671148 | 99 | 0.567666 |
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from botocore.exceptions import ClientError
from datetime import datetime, timedelta
from dateutil.parser import parse as parse_date
from dateutil.tz import tzutc
from c7n.actions import ActionRegistry, BaseAction
from c7n.filters import Filter, FilterRegistry, ValueFilter, FilterValidationError
from c7n.manager import ResourceManager, resources
from c7n.utils import local_session, type_schema
from c7n.resources.iam import CredentialReport
filters = FilterRegistry('aws.account.actions')
actions = ActionRegistry('aws.account.filters')
def get_account(session_factory, config):
session = local_session(session_factory)
client = session.client('iam')
aliases = client.list_account_aliases().get(
'AccountAliases', ('',))
name = aliases and aliases[0] or ""
return {'account_id': config.account_id,
'account_name': name}
@resources.register('account')
class Account(ResourceManager):
filter_registry = filters
action_registry = actions
class resource_type(object):
id = 'account_id'
name = 'account_name'
filter_name = None
@classmethod
def get_permissions(cls):
return ('iam:ListAccountAliases',)
def get_model(self):
return self.resource_type
def resources(self):
return self.filter_resources([get_account(self.session_factory, self.config)])
def get_resources(self, resource_ids):
return [get_account(self.session_factory, self.config)]
@filters.register('credential')
class AccountCredentialReport(CredentialReport):
def process(self, resources, event=None):
super(AccountCredentialReport, self).process(resources, event)
report = self.get_credential_report()
if report is None:
return []
results = []
info = report.get('<root_account>')
for r in resources:
if self.match(info):
r['c7n:credential-report'] = info
results.append(r)
return results
@filters.register('check-cloudtrail')
class CloudTrailEnabled(Filter):
schema = type_schema(
'check-cloudtrail',
**{'multi-region': {'type': 'boolean'},
'global-events': {'type': 'boolean'},
'current-region': {'type': 'boolean'},
'running': {'type': 'boolean'},
'notifies': {'type': 'boolean'},
'file-digest': {'type': 'boolean'},
'kms': {'type': 'boolean'},
'kms-key': {'type': 'string'}})
permissions = ('cloudtrail:DescribeTrails', 'cloudtrail:GetTrailStatus')
def process(self, resources, event=None):
session = local_session(self.manager.session_factory)
client = session.client('cloudtrail')
trails = client.describe_trails()['trailList']
resources[0]['c7n:cloudtrails'] = trails
if self.data.get('global-events'):
trails = [t for t in trails if t.get('IncludeGlobalServiceEvents')]
if self.data.get('current-region'):
current_region = session.region_name
trails = [t for t in trails if t.get(
'HomeRegion') == current_region or t.get('IsMultiRegionTrail')]
if self.data.get('kms'):
trails = [t for t in trails if t.get('KmsKeyId')]
if self.data.get('kms-key'):
trails = [t for t in trails
if t.get('KmsKeyId', '') == self.data['kms-key']]
if self.data.get('file-digest'):
trails = [t for t in trails
if t.get('LogFileValidationEnabled')]
if self.data.get('multi-region'):
trails = [t for t in trails if t.get('IsMultiRegionTrail')]
if self.data.get('notifies'):
trails = [t for t in trails if t.get('SNSTopicArn')]
if self.data.get('running', True):
running = []
for t in list(trails):
t['Status'] = status = client.get_trail_status(
Name=t['TrailARN'])
if status['IsLogging'] and not status.get(
'LatestDeliveryError'):
running.append(t)
trails = running
if trails:
return []
return resources
@filters.register('check-config')
class ConfigEnabled(Filter):
schema = type_schema(
'check-config', **{
'all-resources': {'type': 'boolean'},
'running': {'type': 'boolean'},
'global-resources': {'type': 'boolean'}})
permissions = ('config:DescribeDeliveryChannels',
'config:DescribeConfigurationRecorders',
'config:DescribeConfigurationRecorderStatus')
def process(self, resources, event=None):
client = local_session(
self.manager.session_factory).client('config')
channels = client.describe_delivery_channels()[
'DeliveryChannels']
recorders = client.describe_configuration_recorders()[
'ConfigurationRecorders']
resources[0]['c7n:config_recorders'] = recorders
resources[0]['c7n:config_channels'] = channels
if self.data.get('global-resources'):
recorders = [
r for r in recorders
if r['recordingGroup'].get('includeGlobalResourceTypes')]
if self.data.get('all-resources'):
recorders = [r for r in recorders
if r['recordingGroup'].get('allSupported')]
if self.data.get('running', True) and recorders:
status = {s['name']: s for
s in client.describe_configuration_recorder_status(
)['ConfigurationRecordersStatus']}
resources[0]['c7n:config_status'] = status
recorders = [r for r in recorders if status[r['name']]['recording'] and
status[r['name']]['lastStatus'].lower() in ('pending', 'success')]
if channels and recorders:
return []
return resources
@filters.register('iam-summary')
class IAMSummary(ValueFilter):
schema = type_schema('iam-summary', rinherit=ValueFilter.schema)
permissions = ('iam:GetAccountSummary',)
def process(self, resources, event=None):
if not resources[0].get('c7n:iam_summary'):
client = local_session(
self.manager.session_factory).client('iam')
resources[0]['c7n:iam_summary'] = client.get_account_summary(
)['SummaryMap']
if self.match(resources[0]['c7n:iam_summary']):
return resources
return []
@filters.register('password-policy')
class AccountPasswordPolicy(ValueFilter):
schema = type_schema('password-policy', rinherit=ValueFilter.schema)
permissions = ('iam:GetAccountPasswordPolicy',)
def process(self, resources, event=None):
account = resources[0]
if not account.get('c7n:password_policy'):
client = local_session(self.manager.session_factory).client('iam')
policy = {}
try:
policy = client.get_account_password_policy().get('PasswordPolicy', {})
policy['PasswordPolicyConfigured'] = True
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
policy['PasswordPolicyConfigured'] = False
else:
raise
account['c7n:password_policy'] = policy
if self.match(account['c7n:password_policy']):
return resources
return []
@filters.register('service-limit')
class ServiceLimit(Filter):
schema = type_schema(
'service-limit',
threshold={'type': 'number'},
refresh_period={'type': 'integer'},
limits={'type': 'array', 'items': {'type': 'string'}},
services={'type': 'array', 'items': {
'enum': ['EC2', 'ELB', 'VPC', 'AutoScaling',
'RDS', 'EBS', 'SES', 'IAM']}})
permissions = ('support:DescribeTrustedAdvisorCheckResult',)
check_id = 'eW7HH0l7J9'
check_limit = ('region', 'service', 'check', 'limit', 'extant', 'color')
global_services = set(['IAM'])
def validate(self):
region = self.manager.data.get('region', '')
if len(self.global_services.intersection(self.data.get('services', []))):
if region != 'us-east-1':
raise FilterValidationError(
"Global services: %s must be targeted in us-east-1 on the policy"
% ', '.join(self.global_services))
return self
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client(
'support', region_name='us-east-1')
checks = client.describe_trusted_advisor_check_result(
checkId=self.check_id, language='en')['result']
region = self.manager.config.region
checks['flaggedResources'] = [r for r in checks['flaggedResources']
if r['metadata'][0] == region or (r['metadata'][0] == '-' and region == 'us-east-1')]
resources[0]['c7n:ServiceLimits'] = checks
delta = timedelta(self.data.get('refresh_period', 1))
check_date = parse_date(checks['timestamp'])
if datetime.now(tz=tzutc()) - delta > check_date:
client.refresh_trusted_advisor_check(checkId=self.check_id)
threshold = self.data.get('threshold')
services = self.data.get('services')
limits = self.data.get('limits')
exceeded = []
for resource in checks['flaggedResources']:
if threshold is None and resource['status'] == 'ok':
continue
limit = dict(zip(self.check_limit, resource['metadata']))
if services and limit['service'] not in services:
continue
if limits and limit['check'] not in limits:
continue
limit['status'] = resource['status']
limit['percentage'] = float(limit['extant'] or 0) / float(
limit['limit']) * 100
if threshold and limit['percentage'] < threshold:
continue
exceeded.append(limit)
if exceeded:
resources[0]['c7n:ServiceLimitsExceeded'] = exceeded
return resources
return []
@actions.register('request-limit-increase')
class RequestLimitIncrease(BaseAction):
schema = {
'type': 'object',
'notify': {'type': 'array'},
'properties': {
'type': {'enum': ['request-limit-increase']},
'percent-increase': {'type': 'number', 'minimum': 1},
'amount-increase': {'type': 'number', 'minimum': 1},
'subject': {'type': 'string'},
'message': {'type': 'string'},
'severity': {'type': 'string', 'enum': ['urgent', 'high', 'normal', 'low']}
},
'oneOf': [
{'required': ['type', 'percent-increase']},
{'required': ['type', 'amount-increase']}
]
}
permissions = ('support:CreateCase',)
default_subject = '[Account:{account}]Raise the following limit(s) of {service} in {region}'
default_template = 'Please raise the below account limit(s); \n {limits}'
default_severity = 'normal'
service_code_mapping = {
'AutoScaling': 'auto-scaling',
'ELB': 'elastic-load-balancing',
'EBS': 'amazon-elastic-block-store',
'EC2': 'amazon-elastic-compute-cloud-linux',
'RDS': 'amazon-relational-database-service-aurora',
'VPC': 'amazon-virtual-private-cloud',
}
def process(self, resources):
session = local_session(self.manager.session_factory)
client = session.client('support', region_name='us-east-1')
account_id = self.manager.config.account_id
service_map = {}
region_map = {}
limit_exceeded = resources[0].get('c7n:ServiceLimitsExceeded', [])
percent_increase = self.data.get('percent-increase')
amount_increase = self.data.get('amount-increase')
for s in limit_exceeded:
current_limit = int(s['limit'])
if percent_increase:
increase_by = current_limit * float(percent_increase) / 100
increase_by = max(increase_by, 1)
else:
increase_by = amount_increase
increase_by = round(increase_by)
msg = '\nIncrease %s by %d in %s \n\t Current Limit: %s\n\t Current Usage: %s\n\t ' \
'Set New Limit to: %d' % (
s['check'], increase_by, s['region'], s['limit'], s['extant'],
(current_limit + increase_by))
service_map.setdefault(s['service'], []).append(msg)
region_map.setdefault(s['service'], s['region'])
for service in service_map:
subject = self.data.get('subject', self.default_subject).format(
service=service, region=region_map[service], account=account_id)
service_code = self.service_code_mapping.get(service)
body = self.data.get('message', self.default_template)
body = body.format(**{
'service': service,
'limits': '\n\t'.join(service_map[service]),
})
client.create_case(
subject=subject,
communicationBody=body,
serviceCode=service_code,
categoryCode='general-guidance',
severityCode=self.data.get('severity', self.default_severity),
ccEmailAddresses=self.data.get('notify', []))
def cloudtrail_policy(original, bucket_name, account_id):
ct_actions = [
{
'Action': 's3:GetBucketAcl',
'Effect': 'Allow',
'Principal': {'Service': 'cloudtrail.amazonaws.com'},
'Resource': 'arn:aws:s3:::' + bucket_name,
'Sid': 'AWSCloudTrailAclCheck20150319',
},
{
'Action': 's3:PutObject',
'Condition': {
'StringEquals':
{'s3:x-amz-acl': 'bucket-owner-full-control'},
},
'Effect': 'Allow',
'Principal': {'Service': 'cloudtrail.amazonaws.com'},
'Resource': 'arn:aws:s3:::%s/AWSLogs/%s/*' % (
bucket_name, account_id
),
'Sid': 'AWSCloudTrailWrite20150319',
},
]
if original is None:
policy = {
'Statement': [],
'Version': '2012-10-17',
}
else:
policy = json.loads(original['Policy'])
original_actions = [a.get('Action') for a in policy['Statement']]
for cta in ct_actions:
if cta['Action'] not in original_actions:
policy['Statement'].append(cta)
return json.dumps(policy)
@actions.register('enable-cloudtrail')
class EnableTrail(BaseAction):
permissions = (
'cloudtrail:CreateTrail',
'cloudtrail:DescribeTrails',
'cloudtrail:GetTrailStatus',
'cloudtrail:StartLogging',
'cloudtrail:UpdateTrail',
's3:CreateBucket',
's3:GetBucketPolicy',
's3:PutBucketPolicy',
)
schema = type_schema(
'enable-cloudtrail',
**{
'trail': {'type': 'string'},
'bucket': {'type': 'string'},
'bucket-region': {'type': 'string'},
'multi-region': {'type': 'boolean'},
'global-events': {'type': 'boolean'},
'notify': {'type': 'string'},
'file-digest': {'type': 'boolean'},
'kms': {'type': 'boolean'},
'kms-key': {'type': 'string'},
'required': ('bucket',),
}
)
def process(self, accounts):
session = local_session(self.manager.session_factory)
client = session.client('cloudtrail')
bucket_name = self.data['bucket']
bucket_region = self.data.get('bucket-region', 'us-east-1')
trail_name = self.data.get('trail', 'default-trail')
multi_region = self.data.get('multi-region', True)
global_events = self.data.get('global-events', True)
notify = self.data.get('notify', '')
file_digest = self.data.get('file-digest', False)
kms = self.data.get('kms', False)
kms_key = self.data.get('kms-key', '')
s3client = session.client('s3')
try:
s3client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={'LocationConstraint': bucket_region}
)
except ClientError as ce:
if not ('Error' in ce.response and
ce.response['Error']['Code'] == 'BucketAlreadyOwnedByYou'):
raise ce
try:
current_policy = s3client.get_bucket_policy(Bucket=bucket_name)
except ClientError:
current_policy = None
policy_json = cloudtrail_policy(
current_policy, bucket_name, self.manager.config.account_id)
s3client.put_bucket_policy(Bucket=bucket_name, Policy=policy_json)
trails = client.describe_trails().get('trailList', ())
if trail_name not in [t.get('Name') for t in trails]:
new_trail = client.create_trail(
Name=trail_name,
S3BucketName=bucket_name,
)
if new_trail:
trails.append(new_trail)
for trail in trails:
if trail.get('Name') != trail_name:
continue
arn = trail['TrailARN']
status = client.get_trail_status(Name=arn)
if not status['IsLogging']:
client.start_logging(Name=arn)
update_args = {}
if multi_region != trail.get('IsMultiRegionTrail'):
update_args['IsMultiRegionTrail'] = multi_region
if global_events != trail.get('IncludeGlobalServiceEvents'):
update_args['IncludeGlobalServiceEvents'] = global_events
if notify != trail.get('SNSTopicArn'):
update_args['SnsTopicName'] = notify
if file_digest != trail.get('LogFileValidationEnabled'):
update_args['EnableLogFileValidation'] = file_digest
if kms_key != trail.get('KmsKeyId'):
if not kms and 'KmsKeyId' in trail:
kms_key = ''
update_args['KmsKeyId'] = kms_key
if update_args:
update_args['Name'] = trail_name
client.update_trail(**update_args)
@filters.register('has-virtual-mfa')
class HasVirtualMFA(Filter):
schema = type_schema('has-virtual-mfa', **{'value': {'type': 'boolean'}})
permissions = ('iam:ListVirtualMFADevices',)
def mfa_belongs_to_root_account(self, mfa):
return mfa['SerialNumber'].endswith(':mfa/root-account-mfa-device')
def account_has_virtual_mfa(self, account):
if not account.get('c7n:VirtualMFADevices'):
client = local_session(self.manager.session_factory).client('iam')
paginator = client.get_paginator('list_virtual_mfa_devices')
raw_list = paginator.paginate().build_full_result()['VirtualMFADevices']
account['c7n:VirtualMFADevices'] = list(filter(
self.mfa_belongs_to_root_account, raw_list))
expect_virtual_mfa = self.data.get('value', True)
has_virtual_mfa = any(account['c7n:VirtualMFADevices'])
return expect_virtual_mfa == has_virtual_mfa
def process(self, resources, event=None):
return list(filter(self.account_has_virtual_mfa, resources))
@actions.register('enable-data-events')
class EnableDataEvents(BaseAction):
schema = type_schema(
'enable-data-events', required=['data-trail'], **{
'data-trail': {
'type': 'object',
'additionalProperties': False,
'required': ['name'],
'properties': {
'create': {
'title': 'Should we create trail if needed for events?',
'type': 'boolean'},
'type': {'enum': ['ReadOnly', 'WriteOnly', 'All']},
'name': {
'title': 'The name of the event trail',
'type': 'string'},
'topic': {
'title': 'If creating, the sns topic for the trail to send updates',
'type': 'string'},
's3-bucket': {
'title': 'If creating, the bucket to store trail event data',
'type': 'string'},
's3-prefix': {'type': 'string'},
'key-id': {
'title': 'If creating, Enable kms on the trail',
'type': 'string'},
'multi-region': {
'title': 'If creating, use this region for all data trails',
'type': 'string'}}}})
def validate(self):
if self.data['data-trail'].get('create'):
if 's3-bucket' not in self.data['data-trail']:
raise FilterValidationError(
"If creating data trails, an s3-bucket is required")
return self
def get_permissions(self):
perms = [
'cloudtrail:DescribeTrails',
'cloudtrail:GetEventSelectors',
'cloudtrail:PutEventSelectors']
if self.data.get('data-trail', {}).get('create'):
perms.extend([
'cloudtrail:CreateTrail', 'cloudtrail:StartLogging'])
return perms
def add_data_trail(self, client, trail_cfg):
if not trail_cfg.get('create'):
raise ValueError(
"s3 data event trail missing and not configured to create")
params = dict(
Name=trail_cfg['name'],
S3BucketName=trail_cfg['s3-bucket'],
EnableLogFileValidation=True)
if 'key-id' in trail_cfg:
params['KmsKeyId'] = trail_cfg['key-id']
if 's3-prefix' in trail_cfg:
params['S3KeyPrefix'] = trail_cfg['s3-prefix']
if 'topic' in trail_cfg:
params['SnsTopicName'] = trail_cfg['topic']
if 'multi-region' in trail_cfg:
params['IsMultiRegionTrail'] = True
client.create_trail(**params)
return {'Name': trail_cfg['name']}
def process(self, resources):
session = local_session(self.manager.session_factory)
region = self.data['data-trail'].get('multi-region')
if region:
client = session.client('cloudtrail', region_name=region)
else:
client = session.client('cloudtrail')
added = False
tconfig = self.data['data-trail']
trails = client.describe_trails(
trailNameList=[tconfig['name']]).get('trailList', ())
if not trails:
trail = self.add_data_trail(client, tconfig)
added = True
else:
trail = trails[0]
events = client.get_event_selectors(
TrailName=trail['Name']).get('EventSelectors', [])
for e in events:
found = False
if not e.get('DataResources'):
continue
for data_events in e['DataResources']:
if data_events['Type'] != 'AWS::S3::Object':
continue
for b in data_events['Values']:
if b.rsplit(':')[-1].strip('/') == '':
found = True
break
if found:
resources[0]['c7n_data_trail'] = trail
return
# Opinionated choice, separate api and data events.
event_count = len(events)
events = [e for e in events if not e.get('IncludeManagementEvents')]
if len(events) != event_count:
self.log.warning("removing api trail from data trail")
# future proof'd for other data events, for s3 this trail
events.append({
'IncludeManagementEvents': False,
'ReadWriteType': tconfig.get('type', 'All'),
'DataResources': [{
'Type': 'AWS::S3::Object',
'Values': ['arn:aws:s3:::']}]})
client.put_event_selectors(
TrailName=trail['Name'],
EventSelectors=events)
if added:
client.start_logging(Name=tconfig['name'])
resources[0]['c7n_data_trail'] = trail
@filters.register('shield-enabled')
class ShieldEnabled(Filter):
permissions = ('shield:DescribeSubscription',)
schema = type_schema(
'shield-enabled',
state={'type': 'boolean'})
def process(self, resources, event=None):
state = self.data.get('state', False)
client = self.manager.session_factory().client('shield')
try:
subscription = client.describe_subscription().get(
'Subscription', None)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceNotFoundException':
raise
subscription = None
resources[0]['c7n:ShieldSubscription'] = subscription
if state and subscription:
return resources
elif not state and not subscription:
return resources
return []
@actions.register('set-shield-advanced')
class SetShieldAdvanced(BaseAction):
permissions = (
'shield:CreateSubscription', 'shield:DeleteSubscription')
schema = type_schema(
'set-shield-advanced',
state={'type': 'boolean'})
def process(self, resources):
client = self.manager.session_factory().client('shield')
state = self.data.get('state', True)
if state:
client.create_subscription()
else:
try:
client.delete_subscription()
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
return
raise
| true | true |
1c3cfe6ec01ec9abd995a703e40d53328df8d86a | 10,503 | py | Python | coapthon/client/helperclient.py | 8749236/CoAPthon3 | 995425c8a895408535261e1838824efa06d0adb1 | [
"MIT"
] | null | null | null | coapthon/client/helperclient.py | 8749236/CoAPthon3 | 995425c8a895408535261e1838824efa06d0adb1 | [
"MIT"
] | null | null | null | coapthon/client/helperclient.py | 8749236/CoAPthon3 | 995425c8a895408535261e1838824efa06d0adb1 | [
"MIT"
] | null | null | null | import random
from multiprocessing import Queue
from queue import Empty
import threading
from coapthon.messages.message import Message
from coapthon import defines
from coapthon.client.coap import CoAP
from coapthon.messages.request import Request
from coapthon.utils import generate_random_token
__author__ = 'Giacomo Tanganelli'
class HelperClient(object):
"""
Helper Client class to perform requests to remote servers in a simplified way.
"""
def __init__(self, server, sock=None, cb_ignore_read_exception=None, cb_ignore_write_exception=None):
"""
Initialize a client to perform request to a server.
:param server: the remote CoAP server
:param sock: if a socket has been created externally, it can be used directly
:param cb_ignore_read_exception: Callback function to handle exception raised during the socket read operation
:param cb_ignore_write_exception: Callback function to handle exception raised during the socket write operation
"""
self.server = server
self.protocol = CoAP(self.server, random.randint(1, 65535), self._wait_response, sock=sock,
cb_ignore_read_exception=cb_ignore_read_exception, cb_ignore_write_exception=cb_ignore_write_exception)
self.queue = Queue()
self.observe_token = None
self.observe_path = None
def _wait_response(self, message):
"""
Private function to get responses from the server.
:param message: the received message
"""
if message is None or message.code != defines.Codes.CONTINUE.number:
self.queue.put(message)
def stop(self):
"""
Stop the client.
"""
self.protocol.close()
self.queue.put(None)
def close(self):
"""
Close the client.
"""
self.stop()
def _thread_body(self, request, callback):
"""
Private function. Send a request, wait for response and call the callback function.
:param request: the request to send
:param callback: the callback function
"""
self.protocol.send_message(request)
while not self.protocol.stopped.isSet():
response = self.queue.get(block=True)
callback(response)
def cancel_observe_token(self, token=None, explicit=False, timeout=None): # pragma: no cover
"""
Delete observing on the remote server.
:param token: the observe token
:param explicit: if explicitly cancel
"""
if token is None:
token = self.observe_token
if self.observe_token is None:
return None
self.protocol.end_observation(token)
if not explicit:
self.observe_path = None
return
request = self.mk_request(defines.Codes.GET, self.observe_path)
# RFC7641 explicit cancel is by sending OBSERVE=1 with the same token,
# not by an unsolicited RST (which would be ignored)
request.token = token
request.observe = 1
resp = self.send_request(request, callback=None, timeout=timeout)
if resp:
self.observe_token = None
self.observe_path = None
return resp
def cancel_observing(self, response, explicit): # pragma: no cover
"""
Delete observing on the remote server.
:param response: the last received response
:param explicit: if explicitly cancel using token
"""
self.cancel_observe_token(self, response.token, explicit)
def get(self, path, callback=None, timeout=None, **kwargs): # pragma: no cover
"""
Perform a GET on a certain path.
:param path: the path
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request(defines.Codes.GET, path)
request.token = generate_random_token(2)
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def get_non(self, path, callback=None, timeout=None, **kwargs): # pragma: no cover
"""
Perform a GET on a certain path.
:param path: the path
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request_non(defines.Codes.GET, path)
request.token = generate_random_token(2)
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def observe(self, path, callback, timeout=None, **kwargs): # pragma: no cover
"""
Perform a GET with observe on a certain path.
:param path: the path
:param callback: the callback function to invoke upon notifications
:param timeout: the timeout of the request
:return: the response to the observe request
"""
if self.observe_token is not None:
raise RuntimeError("Only one active observation is allowed per client")
self.observe_token = generate_random_token(2)
self.observe_path = path
request = self.mk_request(defines.Codes.GET, path)
request.token = self.observe_token
request.observe = 0
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
self.send_request(request, callback, timeout)
return self.observe_token
def delete(self, path, callback=None, timeout=None, **kwargs): # pragma: no cover
"""
Perform a DELETE on a certain path.
:param path: the path
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request(defines.Codes.DELETE, path)
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def post(self, path, payload, callback=None, timeout=None, no_response=False, **kwargs): # pragma: no cover
"""
Perform a POST on a certain path.
:param path: the path
:param payload: the request payload
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request(defines.Codes.POST, path)
request.token = generate_random_token(2)
request.payload = payload
if no_response:
request.add_no_response()
request.type = defines.Types["NON"]
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout, no_response=no_response)
def put(self, path, payload, callback=None, timeout=None, no_response=False, **kwargs): # pragma: no cover
"""
Perform a PUT on a certain path.
:param path: the path
:param payload: the request payload
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request(defines.Codes.PUT, path)
request.token = generate_random_token(2)
request.payload = payload
if no_response:
request.add_no_response()
request.type = defines.Types["NON"]
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout, no_response=no_response)
def discover(self, callback=None, timeout=None, **kwargs): # pragma: no cover
"""
Perform a Discover request on the server.
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request(defines.Codes.GET, defines.DISCOVERY_URL)
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def send_request(self, request, callback=None, timeout=None, no_response=False): # pragma: no cover
"""
Send a request to the remote server.
:param request: the request to send
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response (synchronous), or the token (for asynchronous callback)
"""
if callback is not None:
thread = threading.Thread(target=self._thread_body, args=(request, callback))
thread.start()
else:
self.protocol.send_message(request)
if no_response:
return
try:
response = self.queue.get(block=True, timeout=timeout)
except Empty:
#if timeout is set
response = None
return response
def send_empty(self, empty): # pragma: no cover
"""
Send empty message.
:param empty: the empty message
"""
self.protocol.send_message(empty)
def mk_request(self, method, path):
"""
Create a request.
:param method: the CoAP method
:param path: the path of the request
:return: the request
"""
request = Request()
request.destination = self.server
request.code = method.number
request.uri_path = path
return request
def mk_request_non(self, method, path):
"""
Create a request.
:param method: the CoAP method
:param path: the path of the request
:return: the request
"""
request = Request()
request.destination = self.server
request.code = method.number
request.uri_path = path
request.type = defines.Types["NON"]
return request
| 34.100649 | 132 | 0.618776 | import random
from multiprocessing import Queue
from queue import Empty
import threading
from coapthon.messages.message import Message
from coapthon import defines
from coapthon.client.coap import CoAP
from coapthon.messages.request import Request
from coapthon.utils import generate_random_token
__author__ = 'Giacomo Tanganelli'
class HelperClient(object):
def __init__(self, server, sock=None, cb_ignore_read_exception=None, cb_ignore_write_exception=None):
self.server = server
self.protocol = CoAP(self.server, random.randint(1, 65535), self._wait_response, sock=sock,
cb_ignore_read_exception=cb_ignore_read_exception, cb_ignore_write_exception=cb_ignore_write_exception)
self.queue = Queue()
self.observe_token = None
self.observe_path = None
def _wait_response(self, message):
if message is None or message.code != defines.Codes.CONTINUE.number:
self.queue.put(message)
def stop(self):
self.protocol.close()
self.queue.put(None)
def close(self):
self.stop()
def _thread_body(self, request, callback):
self.protocol.send_message(request)
while not self.protocol.stopped.isSet():
response = self.queue.get(block=True)
callback(response)
def cancel_observe_token(self, token=None, explicit=False, timeout=None):
if token is None:
token = self.observe_token
if self.observe_token is None:
return None
self.protocol.end_observation(token)
if not explicit:
self.observe_path = None
return
request = self.mk_request(defines.Codes.GET, self.observe_path)
request.token = token
request.observe = 1
resp = self.send_request(request, callback=None, timeout=timeout)
if resp:
self.observe_token = None
self.observe_path = None
return resp
def cancel_observing(self, response, explicit):
self.cancel_observe_token(self, response.token, explicit)
def get(self, path, callback=None, timeout=None, **kwargs):
request = self.mk_request(defines.Codes.GET, path)
request.token = generate_random_token(2)
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def get_non(self, path, callback=None, timeout=None, **kwargs):
request = self.mk_request_non(defines.Codes.GET, path)
request.token = generate_random_token(2)
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def observe(self, path, callback, timeout=None, **kwargs):
if self.observe_token is not None:
raise RuntimeError("Only one active observation is allowed per client")
self.observe_token = generate_random_token(2)
self.observe_path = path
request = self.mk_request(defines.Codes.GET, path)
request.token = self.observe_token
request.observe = 0
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
self.send_request(request, callback, timeout)
return self.observe_token
def delete(self, path, callback=None, timeout=None, **kwargs):
request = self.mk_request(defines.Codes.DELETE, path)
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def post(self, path, payload, callback=None, timeout=None, no_response=False, **kwargs):
request = self.mk_request(defines.Codes.POST, path)
request.token = generate_random_token(2)
request.payload = payload
if no_response:
request.add_no_response()
request.type = defines.Types["NON"]
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout, no_response=no_response)
def put(self, path, payload, callback=None, timeout=None, no_response=False, **kwargs):
request = self.mk_request(defines.Codes.PUT, path)
request.token = generate_random_token(2)
request.payload = payload
if no_response:
request.add_no_response()
request.type = defines.Types["NON"]
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout, no_response=no_response)
def discover(self, callback=None, timeout=None, **kwargs):
request = self.mk_request(defines.Codes.GET, defines.DISCOVERY_URL)
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def send_request(self, request, callback=None, timeout=None, no_response=False):
if callback is not None:
thread = threading.Thread(target=self._thread_body, args=(request, callback))
thread.start()
else:
self.protocol.send_message(request)
if no_response:
return
try:
response = self.queue.get(block=True, timeout=timeout)
except Empty:
response = None
return response
def send_empty(self, empty):
self.protocol.send_message(empty)
def mk_request(self, method, path):
request = Request()
request.destination = self.server
request.code = method.number
request.uri_path = path
return request
def mk_request_non(self, method, path):
request = Request()
request.destination = self.server
request.code = method.number
request.uri_path = path
request.type = defines.Types["NON"]
return request
| true | true |
1c3cfed7abfd74cc40d5ab87af96c1b18c682ab1 | 4,934 | py | Python | research/object_detection/models/embedded_ssd_mobilenet_v2_feature_extractor.py | alexaway/object_detection_tf | b564b0a0b4e2bbfa82daf0b88becbd271296aff4 | [
"Apache-2.0"
] | null | null | null | research/object_detection/models/embedded_ssd_mobilenet_v2_feature_extractor.py | alexaway/object_detection_tf | b564b0a0b4e2bbfa82daf0b88becbd271296aff4 | [
"Apache-2.0"
] | null | null | null | research/object_detection/models/embedded_ssd_mobilenet_v2_feature_extractor.py | alexaway/object_detection_tf | b564b0a0b4e2bbfa82daf0b88becbd271296aff4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Embedded-friendly SSDFeatureExtractor for MobilenetV1 features."""
import tensorflow as tf
from object_detection.models import feature_map_generators
from object_detection.models import ssd_mobilenet_v1_feature_extractor
from object_detection.utils import ops
from nets import mobilenet_v1
slim = tf.contrib.slim
class EmbeddedSSDMobileNetV2FeatureExtractor(
ssd_mobilenet_v1_feature_extractor.SSDMobileNetV1FeatureExtractor):
"""Embedded-friendly SSD Feature Extractor using MobilenetV1 features.
This feature extractor is similar to SSD MobileNetV1 feature extractor, and
it fixes input resolution to be 256x256, reduces the number of feature maps
used for box prediction and ensures convolution kernel to be no larger
than input tensor in spatial dimensions.
This feature extractor requires support of the following ops if used in
embedded devices:
- Conv
- DepthwiseConv
- Relu6
All conv/depthwiseconv use SAME padding, and no additional spatial padding is
needed.
"""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
batch_norm_trainable=True,
reuse_weights=None):
"""MobileNetV1 Feature Extractor for Embedded-friendly SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to. For EmbeddedSSD it must be set to 1.
conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops.
batch_norm_trainable: Whether to update batch norm parameters during
training or not. When training with a small batch size
(e.g. 1), it is desirable to disable batch norm update and use
pretrained batch norm params.
reuse_weights: Whether to reuse variables. Default is None.
Raises:
ValueError: upon invalid `pad_to_multiple` values.
"""
if pad_to_multiple != 1:
raise ValueError('Embedded-specific SSD only supports `pad_to_multiple` '
'of 1.')
super(EmbeddedSSDMobileNetV2FeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams, batch_norm_trainable, reuse_weights)
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs.get_shape().assert_has_rank(4)
shape_assert = tf.Assert(
tf.logical_and(
tf.equal(tf.shape(preprocessed_inputs)[1], 256),
tf.equal(tf.shape(preprocessed_inputs)[2], 256)),
['image size must be 224 in both height and width.'])
feature_map_layout = {
'from_layer': [
'Conv2d_9_pointwise', 'Conv2d_11_pointwise', '', '', ''
],
'layer_depth': [-1, -1, 512, 512, 256],
'conv_kernel_size': [-1, -1, 3, 3, 2],
}
with tf.control_dependencies([shape_assert]):
with slim.arg_scope(self._conv_hyperparams):
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
_, image_features = mobilenet_v1.mobilenet_v1_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Conv2d_11_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return feature_maps.values()
| 39.472 | 80 | 0.689704 |
import tensorflow as tf
from object_detection.models import feature_map_generators
from object_detection.models import ssd_mobilenet_v1_feature_extractor
from object_detection.utils import ops
from nets import mobilenet_v1
slim = tf.contrib.slim
class EmbeddedSSDMobileNetV2FeatureExtractor(
ssd_mobilenet_v1_feature_extractor.SSDMobileNetV1FeatureExtractor):
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
batch_norm_trainable=True,
reuse_weights=None):
if pad_to_multiple != 1:
raise ValueError('Embedded-specific SSD only supports `pad_to_multiple` '
'of 1.')
super(EmbeddedSSDMobileNetV2FeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams, batch_norm_trainable, reuse_weights)
def extract_features(self, preprocessed_inputs):
preprocessed_inputs.get_shape().assert_has_rank(4)
shape_assert = tf.Assert(
tf.logical_and(
tf.equal(tf.shape(preprocessed_inputs)[1], 256),
tf.equal(tf.shape(preprocessed_inputs)[2], 256)),
['image size must be 224 in both height and width.'])
feature_map_layout = {
'from_layer': [
'Conv2d_9_pointwise', 'Conv2d_11_pointwise', '', '', ''
],
'layer_depth': [-1, -1, 512, 512, 256],
'conv_kernel_size': [-1, -1, 3, 3, 2],
}
with tf.control_dependencies([shape_assert]):
with slim.arg_scope(self._conv_hyperparams):
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
_, image_features = mobilenet_v1.mobilenet_v1_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Conv2d_11_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return feature_maps.values()
| true | true |
1c3cfee585e6e9b770943f5d2db88201edc2498c | 487 | py | Python | 0582 Kill Process.py | MdAbedin/leetcode | e835f2e716ea5fe87f30b84801ede9bc023749e7 | [
"MIT"
] | 4 | 2020-09-11T02:36:11.000Z | 2021-09-29T20:47:11.000Z | 0582 Kill Process.py | MdAbedin/leetcode | e835f2e716ea5fe87f30b84801ede9bc023749e7 | [
"MIT"
] | 3 | 2020-09-10T03:51:42.000Z | 2021-09-25T01:41:57.000Z | 0582 Kill Process.py | MdAbedin/leetcode | e835f2e716ea5fe87f30b84801ede9bc023749e7 | [
"MIT"
] | 6 | 2020-09-10T03:46:15.000Z | 2021-09-25T01:24:48.000Z | class Solution:
def killProcess(self, pid: List[int], ppid: List[int], kill: int) -> List[int]:
children = defaultdict(set)
for i in range(len(ppid)):
children[ppid[i]].add(pid[i])
bfs = deque([kill])
ans = []
while bfs:
cur = bfs.popleft()
ans.append(cur)
for child in children[cur]:
bfs.append(child)
return ans
| 25.631579 | 83 | 0.447639 | class Solution:
def killProcess(self, pid: List[int], ppid: List[int], kill: int) -> List[int]:
children = defaultdict(set)
for i in range(len(ppid)):
children[ppid[i]].add(pid[i])
bfs = deque([kill])
ans = []
while bfs:
cur = bfs.popleft()
ans.append(cur)
for child in children[cur]:
bfs.append(child)
return ans
| true | true |
1c3cfffd2b5bd2349a0342b066c4b18a649ef76a | 632 | py | Python | orca/topology/alerts/properties.py | grzechukol/orca | 374f0b1a0f43c723aef702d8cc39d318020583a0 | [
"Apache-2.0"
] | 74 | 2019-12-01T21:15:37.000Z | 2022-03-22T09:33:12.000Z | orca/topology/alerts/properties.py | grzechukol/orca | 374f0b1a0f43c723aef702d8cc39d318020583a0 | [
"Apache-2.0"
] | 80 | 2020-03-25T08:56:52.000Z | 2021-07-11T09:53:27.000Z | orca/topology/alerts/properties.py | grzechukol/orca | 374f0b1a0f43c723aef702d8cc39d318020583a0 | [
"Apache-2.0"
] | 16 | 2020-02-24T06:13:04.000Z | 2021-05-30T03:36:33.000Z | # Copyright 2020 OpenRCA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class AlertStatus:
UP = 'up'
DOWN = 'down'
| 33.263158 | 74 | 0.745253 |
class AlertStatus:
UP = 'up'
DOWN = 'down'
| true | true |
1c3d0020a906b798dea8dbc1ac98c15bbe83e11a | 3,250 | py | Python | geometry/geometry.py | nulano/osm-map-viewer | 1a4a4f3473cb83ce714fe3de370c7a0e904a5ea9 | [
"Apache-2.0"
] | null | null | null | geometry/geometry.py | nulano/osm-map-viewer | 1a4a4f3473cb83ce714fe3de370c7a0e904a5ea9 | [
"Apache-2.0"
] | null | null | null | geometry/geometry.py | nulano/osm-map-viewer | 1a4a4f3473cb83ce714fe3de370c7a0e904a5ea9 | [
"Apache-2.0"
] | null | null | null | from collections import namedtuple
from typing import List, Tuple
import numpy as np
_Point = Tuple[float, float]
_Polygon = List[_Point]
def _norm_polygon(polygon: _Polygon):
if len(polygon) == 0:
raise ValueError('Empty polygon')
if polygon[0] != polygon[-1]:
polygon = polygon + [polygon[0]]
return np.array(polygon)
def distance(a: _Point, b: _Point):
a, b = np.array(a), np.array(b)
return np.linalg.norm(b - a)
def _polygon_raw_area(polygon: np.ndarray):
return np.dot(polygon[:-1, 0], polygon[1:, 1]) - np.dot(polygon[:-1, 1], polygon[1:, 0])
def polygon_area(polygon: List[_Point]):
polygon = _norm_polygon(polygon) - polygon[0]
return 0.5 * np.abs(_polygon_raw_area(polygon))
def polygon_centroid(polygon: List[_Point]):
polygon = _norm_polygon(polygon)
# this function very sensitive to rounding errors, shift whole polygon to near (0, 0), then shift result back:
offset, polygon = polygon[0], polygon - polygon[0]
return np.sum((polygon[:-1] + polygon[1:]) *
((polygon[:-1, 0] * polygon[1:, 1]) - (polygon[1:, 0] * polygon[:-1, 1]))[:, None], axis=0) \
/ 3 / _polygon_raw_area(polygon) + offset
def _ray_trace(point: np.ndarray, polygon: np.ndarray):
poly = polygon - point
hits = []
for i, (a, b) in enumerate(zip(poly[:-1], poly[1:])):
# ensure Ay <= By
if a[1] > b[1]:
b, a = a, b
# 0 <= Ax - Ay * (Bx - Ax) / (By - Ay) <==> 0 <= Ax * (By - Ay) - Ay * (Bx - Ax)
x_over_dy = np.cross(a, b - a)
if a[1] <= 0 < b[1] and 0 <= x_over_dy:
hit = np.array([x_over_dy / (b - a)[1], 0]) + point
hits.append(namedtuple('Hit', ['point', 'a', 'b'])(hit, i, i + 1))
return hits
def point_in_polygon(point: _Point, polygon: List[_Point]):
return len(_ray_trace(np.array(point), _norm_polygon(polygon))) % 2 == 1
def polygons_to_wsps(multipolygon: List[List[_Point]]):
polygons = []
for polygon in multipolygon:
polygon = _norm_polygon(polygon)
# ensure points are CCW:
if _polygon_raw_area(polygon) < 0:
polygon = np.flip(polygon, axis=0)
# ensure right-most point is first:
polygon = np.roll(polygon[:-1], -polygon[:-1, 0].argmax(), axis=0)
polygons.append(np.concatenate([polygon, [polygon[0]]]))
out = []
for polygon in sorted(polygons, key=lambda p: p[0, 0], reverse=True):
best_hit, best_hit_poly = None, None
for poly_out_index, poly_out in enumerate(out):
for hit in _ray_trace(polygon[0], poly_out):
if best_hit is None or hit.point[0] < best_hit.point[0]:
best_hit, best_hit_poly = hit, poly_out_index
if best_hit is not None and out[best_hit_poly][best_hit.a][1] < out[best_hit_poly][best_hit.b][1]:
out[best_hit_poly] = np.concatenate([
out[best_hit_poly][:best_hit.a + 1, ],
[best_hit.point],
polygon[::-1, ],
[best_hit.point],
out[best_hit_poly][best_hit.b:, ]
], axis=0)
else:
out.append(polygon)
return [[(x, y) for x, y in polygon] for polygon in out]
| 36.516854 | 114 | 0.585538 | from collections import namedtuple
from typing import List, Tuple
import numpy as np
_Point = Tuple[float, float]
_Polygon = List[_Point]
def _norm_polygon(polygon: _Polygon):
if len(polygon) == 0:
raise ValueError('Empty polygon')
if polygon[0] != polygon[-1]:
polygon = polygon + [polygon[0]]
return np.array(polygon)
def distance(a: _Point, b: _Point):
a, b = np.array(a), np.array(b)
return np.linalg.norm(b - a)
def _polygon_raw_area(polygon: np.ndarray):
return np.dot(polygon[:-1, 0], polygon[1:, 1]) - np.dot(polygon[:-1, 1], polygon[1:, 0])
def polygon_area(polygon: List[_Point]):
polygon = _norm_polygon(polygon) - polygon[0]
return 0.5 * np.abs(_polygon_raw_area(polygon))
def polygon_centroid(polygon: List[_Point]):
polygon = _norm_polygon(polygon)
offset, polygon = polygon[0], polygon - polygon[0]
return np.sum((polygon[:-1] + polygon[1:]) *
((polygon[:-1, 0] * polygon[1:, 1]) - (polygon[1:, 0] * polygon[:-1, 1]))[:, None], axis=0) \
/ 3 / _polygon_raw_area(polygon) + offset
def _ray_trace(point: np.ndarray, polygon: np.ndarray):
poly = polygon - point
hits = []
for i, (a, b) in enumerate(zip(poly[:-1], poly[1:])):
if a[1] > b[1]:
b, a = a, b
x_over_dy = np.cross(a, b - a)
if a[1] <= 0 < b[1] and 0 <= x_over_dy:
hit = np.array([x_over_dy / (b - a)[1], 0]) + point
hits.append(namedtuple('Hit', ['point', 'a', 'b'])(hit, i, i + 1))
return hits
def point_in_polygon(point: _Point, polygon: List[_Point]):
return len(_ray_trace(np.array(point), _norm_polygon(polygon))) % 2 == 1
def polygons_to_wsps(multipolygon: List[List[_Point]]):
polygons = []
for polygon in multipolygon:
polygon = _norm_polygon(polygon)
if _polygon_raw_area(polygon) < 0:
polygon = np.flip(polygon, axis=0)
polygon = np.roll(polygon[:-1], -polygon[:-1, 0].argmax(), axis=0)
polygons.append(np.concatenate([polygon, [polygon[0]]]))
out = []
for polygon in sorted(polygons, key=lambda p: p[0, 0], reverse=True):
best_hit, best_hit_poly = None, None
for poly_out_index, poly_out in enumerate(out):
for hit in _ray_trace(polygon[0], poly_out):
if best_hit is None or hit.point[0] < best_hit.point[0]:
best_hit, best_hit_poly = hit, poly_out_index
if best_hit is not None and out[best_hit_poly][best_hit.a][1] < out[best_hit_poly][best_hit.b][1]:
out[best_hit_poly] = np.concatenate([
out[best_hit_poly][:best_hit.a + 1, ],
[best_hit.point],
polygon[::-1, ],
[best_hit.point],
out[best_hit_poly][best_hit.b:, ]
], axis=0)
else:
out.append(polygon)
return [[(x, y) for x, y in polygon] for polygon in out]
| true | true |
1c3d00deadda82cbcaac774421c60a2ba9c80f49 | 2,242 | py | Python | py_wake/deflection_models/jimenez.py | aemoser/PyWake | 889a2c10882195af21339e9bcf2ede0db9b58319 | [
"MIT"
] | 30 | 2019-03-18T14:10:27.000Z | 2022-03-13T17:39:04.000Z | py_wake/deflection_models/jimenez.py | aemoser/PyWake | 889a2c10882195af21339e9bcf2ede0db9b58319 | [
"MIT"
] | 1 | 2020-11-12T06:13:00.000Z | 2020-11-12T06:43:26.000Z | py_wake/deflection_models/jimenez.py | aemoser/PyWake | 889a2c10882195af21339e9bcf2ede0db9b58319 | [
"MIT"
] | 20 | 2019-01-11T14:45:13.000Z | 2021-12-13T19:55:29.000Z | from numpy import newaxis as na
import numpy as np
from py_wake.deflection_models import DeflectionModel
class JimenezWakeDeflection(DeflectionModel):
"""Implemented according to
Jiménez, Á., Crespo, A. and Migoya, E. (2010), Application of a LES technique to characterize
the wake deflection of a wind turbine in yaw. Wind Energ., 13: 559-572. doi:10.1002/we.380
"""
args4deflection = ['D_src_il', 'yaw_ilk', 'ct_ilk', 'tilt_ilk']
def __init__(self, N=20, beta=.1):
self.beta = beta
self.N = N
def calc_deflection(self, dw_ijl, hcw_ijl, dh_ijl, D_src_il, yaw_ilk, tilt_ilk, ct_ilk, **kwargs):
dw_lst = (np.logspace(0, 1.1, self.N) - 1) / (10**1.1 - 1)
dw_ijxl = dw_ijl[:, :, na] * dw_lst[na, na, :, na]
theta_yaw_ilk, theta_tilt_ilk = np.deg2rad(yaw_ilk), np.deg2rad(-tilt_ilk)
theta_ilk = np.sqrt(theta_yaw_ilk**2 + theta_tilt_ilk**2)
theta_deflection_ilk = np.arctan2(theta_tilt_ilk, theta_yaw_ilk)
denominator_ilk = np.cos(theta_ilk)**2 * np.sin(theta_ilk) * (ct_ilk / 2)
nominator_ijxl = (1 + (self.beta / D_src_il)[:, na, na, :] * np.maximum(dw_ijxl, 0))**2
alpha = denominator_ilk[:, na, na] / nominator_ijxl[..., na]
deflection_ijlk = np.trapz(np.sin(alpha), dw_ijxl[..., na], axis=2)
self.hcw_ijlk = hcw_ijl[..., na] + deflection_ijlk * np.cos(theta_deflection_ilk[:, na])
self.dh_ijlk = dh_ijl[..., na] + deflection_ijlk * np.sin(theta_deflection_ilk[:, na])
return dw_ijl[..., na], self.hcw_ijlk, self.dh_ijlk
def main():
if __name__ == '__main__':
from py_wake import Fuga
from py_wake.examples.data.iea37._iea37 import IEA37Site, IEA37_WindTurbines
site = IEA37Site(16)
x, y = [0, 600, 1200], [0, 0, 0] # site.initial_position[:2].T
windTurbines = IEA37_WindTurbines()
from py_wake.tests.test_files import tfp
path = tfp + 'fuga/2MW/Z0=0.03000000Zi=00401Zeta0=0.00E+00/'
noj = Fuga(path, site, windTurbines, deflectionModel=JimenezWakeDeflection())
yaw = [-30, 30, 0]
noj(x, y, yaw=yaw, wd=270, ws=10).flow_map().plot_wake_map()
import matplotlib.pyplot as plt
plt.show()
main()
| 44.84 | 102 | 0.641838 | from numpy import newaxis as na
import numpy as np
from py_wake.deflection_models import DeflectionModel
class JimenezWakeDeflection(DeflectionModel):
args4deflection = ['D_src_il', 'yaw_ilk', 'ct_ilk', 'tilt_ilk']
def __init__(self, N=20, beta=.1):
self.beta = beta
self.N = N
def calc_deflection(self, dw_ijl, hcw_ijl, dh_ijl, D_src_il, yaw_ilk, tilt_ilk, ct_ilk, **kwargs):
dw_lst = (np.logspace(0, 1.1, self.N) - 1) / (10**1.1 - 1)
dw_ijxl = dw_ijl[:, :, na] * dw_lst[na, na, :, na]
theta_yaw_ilk, theta_tilt_ilk = np.deg2rad(yaw_ilk), np.deg2rad(-tilt_ilk)
theta_ilk = np.sqrt(theta_yaw_ilk**2 + theta_tilt_ilk**2)
theta_deflection_ilk = np.arctan2(theta_tilt_ilk, theta_yaw_ilk)
denominator_ilk = np.cos(theta_ilk)**2 * np.sin(theta_ilk) * (ct_ilk / 2)
nominator_ijxl = (1 + (self.beta / D_src_il)[:, na, na, :] * np.maximum(dw_ijxl, 0))**2
alpha = denominator_ilk[:, na, na] / nominator_ijxl[..., na]
deflection_ijlk = np.trapz(np.sin(alpha), dw_ijxl[..., na], axis=2)
self.hcw_ijlk = hcw_ijl[..., na] + deflection_ijlk * np.cos(theta_deflection_ilk[:, na])
self.dh_ijlk = dh_ijl[..., na] + deflection_ijlk * np.sin(theta_deflection_ilk[:, na])
return dw_ijl[..., na], self.hcw_ijlk, self.dh_ijlk
def main():
if __name__ == '__main__':
from py_wake import Fuga
from py_wake.examples.data.iea37._iea37 import IEA37Site, IEA37_WindTurbines
site = IEA37Site(16)
x, y = [0, 600, 1200], [0, 0, 0]
windTurbines = IEA37_WindTurbines()
from py_wake.tests.test_files import tfp
path = tfp + 'fuga/2MW/Z0=0.03000000Zi=00401Zeta0=0.00E+00/'
noj = Fuga(path, site, windTurbines, deflectionModel=JimenezWakeDeflection())
yaw = [-30, 30, 0]
noj(x, y, yaw=yaw, wd=270, ws=10).flow_map().plot_wake_map()
import matplotlib.pyplot as plt
plt.show()
main()
| true | true |
1c3d014598670507432398bcd06b1750ca4b0e5f | 2,337 | py | Python | src/encoded/types/antibody.py | 4dn-dcic/fourfron | 29601961706d2371b982e57ae085e8ebec3b2714 | [
"MIT"
] | 11 | 2016-11-23T02:33:13.000Z | 2021-06-18T14:21:20.000Z | src/encoded/types/antibody.py | 4dn-dcic/fourfron | 29601961706d2371b982e57ae085e8ebec3b2714 | [
"MIT"
] | 1,159 | 2016-11-21T15:40:24.000Z | 2022-03-29T03:18:38.000Z | src/encoded/types/antibody.py | 4dn-dcic/fourfron | 29601961706d2371b982e57ae085e8ebec3b2714 | [
"MIT"
] | 5 | 2017-01-27T16:36:15.000Z | 2019-06-14T14:39:54.000Z | """The type file for the collection Antibody.
logic for autopopulating 'antibody_id' unique key upon update or create
"""
from snovault import (
calculated_property,
collection,
load_schema,
)
from .base import (
Item,
get_item_or_none,
lab_award_attribution_embed_list
)
from .dependencies import DependencyEmbedder
import string
import re
def _build_antibody_embedded_list():
""" Helper function intended to be used to create the embedded list for antibody.
All types should implement a function like this going forward.
"""
antibody_target_embeds = DependencyEmbedder.embed_defaults_for_type(base_path='antibody_target', t='bio_feature')
return Item.embedded_list + lab_award_attribution_embed_list + antibody_target_embeds + [
# Vendor linkTo
'antibody_vendor.title'
]
@collection(
name='antibodys',
unique_key='antibody:antibody_id',
properties={
'title': 'Antibodies',
'description': 'Listing of antibodies',
})
class Antibody(Item):
"""Antibody class."""
item_type = 'antibody'
schema = load_schema('encoded:schemas/antibody.json')
name_key = 'antibody_id'
embedded_list = _build_antibody_embedded_list()
def _update(self, properties, sheets=None):
# set antibody_id based on values of antibody_name and product_no
exclude = re.escape(string.punctuation)
regex = r"[" + exclude + r"\s]+"
abid = properties['antibody_name'] + '-' + properties['antibody_product_no']
abid = re.sub(regex, "-", abid)
properties['antibody_id'] = abid
super(Antibody, self)._update(properties, sheets)
@calculated_property(schema={
"title": "Display Title",
"description": "A calculated title for every object in 4DN",
"type": "string"
})
def display_title(self, request, antibody_name, antibody_product_no=None, antibody_vendor=None):
antibody_details = []
if antibody_vendor:
antibody_details.append(get_item_or_none(request, antibody_vendor, 'vendor').get('display_title'))
if antibody_product_no:
antibody_details.append(antibody_product_no)
if antibody_details:
antibody_name += ' ({})'.format(', '.join(antibody_details))
return antibody_name
| 33.869565 | 117 | 0.685066 | from snovault import (
calculated_property,
collection,
load_schema,
)
from .base import (
Item,
get_item_or_none,
lab_award_attribution_embed_list
)
from .dependencies import DependencyEmbedder
import string
import re
def _build_antibody_embedded_list():
antibody_target_embeds = DependencyEmbedder.embed_defaults_for_type(base_path='antibody_target', t='bio_feature')
return Item.embedded_list + lab_award_attribution_embed_list + antibody_target_embeds + [
'antibody_vendor.title'
]
@collection(
name='antibodys',
unique_key='antibody:antibody_id',
properties={
'title': 'Antibodies',
'description': 'Listing of antibodies',
})
class Antibody(Item):
item_type = 'antibody'
schema = load_schema('encoded:schemas/antibody.json')
name_key = 'antibody_id'
embedded_list = _build_antibody_embedded_list()
def _update(self, properties, sheets=None):
exclude = re.escape(string.punctuation)
regex = r"[" + exclude + r"\s]+"
abid = properties['antibody_name'] + '-' + properties['antibody_product_no']
abid = re.sub(regex, "-", abid)
properties['antibody_id'] = abid
super(Antibody, self)._update(properties, sheets)
@calculated_property(schema={
"title": "Display Title",
"description": "A calculated title for every object in 4DN",
"type": "string"
})
def display_title(self, request, antibody_name, antibody_product_no=None, antibody_vendor=None):
antibody_details = []
if antibody_vendor:
antibody_details.append(get_item_or_none(request, antibody_vendor, 'vendor').get('display_title'))
if antibody_product_no:
antibody_details.append(antibody_product_no)
if antibody_details:
antibody_name += ' ({})'.format(', '.join(antibody_details))
return antibody_name
| true | true |
1c3d02c39b937cff1418c4a345c46b0064bb3a26 | 10,965 | py | Python | models/2-Gleb/train/src/sampler.py | navekshasood/HuBMAP---Hacking-the-Kidney | 018100fe4bfa5e8764b9df5a9d188e2c670ac061 | [
"MIT"
] | null | null | null | models/2-Gleb/train/src/sampler.py | navekshasood/HuBMAP---Hacking-the-Kidney | 018100fe4bfa5e8764b9df5a9d188e2c670ac061 | [
"MIT"
] | null | null | null | models/2-Gleb/train/src/sampler.py | navekshasood/HuBMAP---Hacking-the-Kidney | 018100fe4bfa5e8764b9df5a9d188e2c670ac061 | [
"MIT"
] | null | null | null | import random
import numpy as np
from PIL import Image
from typing import List, Tuple
from functools import partial
import rasterio
from shapely import geometry
from rasterio.windows import Window
from tf_reader import TFReader
from utils import jread, get_basics_rasterio, json_record_to_poly, flatten_2dlist, get_cortex_polygons, gen_pt_in_poly
class GdalSampler:
"""Iterates over img with annotation, returns tuples of img, mask
"""
def __init__(self, img_path: str,
mask_path: str,
img_polygons_path: str,
img_wh: Tuple[int, int],
border_path=None,
rand_shift_range: Tuple[int, int] = (0, 0)) -> Tuple[np.ndarray, np.ndarray]:
"""If rand_shift_range ~ (0,0), then centroid of glomerulus corresponds centroid of output sample
"""
self._records_json = jread(img_polygons_path)
self._mask = TFReader(mask_path)
self._img = TFReader(img_path)
self._border = TFReader(border_path) if border_path is not None else None
self._wh = img_wh
self._count = -1
self._rand_shift_range = rand_shift_range
# Get 1d list of polygons
polygons = flatten_2dlist([json_record_to_poly(record) for record in self._records_json])
self._polygons_centroid = [np.round(polygon.centroid) for polygon in polygons]
def __iter__(self):
return self
def __len__(self):
return len(self._records_json)
def __next__(self):
self._count += 1
if self._count < len(self._records_json):
return self.__getitem__(self._count)
else:
self._count = -1
raise StopIteration("Failed to proceed to the next step")
def __getitem__(self, idx: int) -> Tuple[np.ndarray, np.ndarray]:
y,x = self._polygons_centroid[idx]
w,h = self._wh
y,x = y-h//2, x-w//2 # align center of crop with poly
window = ((x, x+w),(y, y+h))
img = self._img.read(window=window, boundless=True)
mask = self._mask.read(window=window, boundless=True)
if self._border is not None:
return img, mask, self._border.read(window=window, boundless=True)
return img, mask
def __del__(self):
del self._mask
del self._img
class BackgroundSampler:
"""Generates tuples of img and mask without glomeruli.
"""
def __init__(self,
img_path: str,
mask_path: str,
polygons: List[geometry.Polygon],
img_wh: Tuple[int, int],
num_samples: int,
step: int = 25,
max_trials: int = 25,
mask_glom_val: int = 255,
buffer_dist: int = 0,
border_path=None,
strict_mode=True
) -> Tuple[np.ndarray, np.ndarray]:
"""
max_trials: max number of trials per one iteration
step: num of glomeruli between iterations
mask_glom_value: mask pixel value containing glomerulus
Example:
# Get list of cortex polygons
polygons = utils.get_cortex_polygons(utils.jread(img_anot_struct_path))
"""
self._mask = TFReader(mask_path)
self.mask_path = mask_path
self._img = TFReader(img_path)
self._border = rasterio.open(border_path) if border_path is not None else None
self._polygons = [poly.buffer(buffer_dist) for poly in polygons] if polygons else None # Dilate if any
self._w, self._h = img_wh
self._num_samples = num_samples
self._mask_glom_val = mask_glom_val
self._boundless = True
self._count = -1
self._step = step
self._max_trials = max_trials
self._strict_mode = strict_mode
# Get list of centroids
self._centroids = [self.gen_backgr_pt() for _ in range(num_samples)]
def gen_pt_in_img(self):
W, H = self._img.shape
pt = np.random.random() * W + self._w, np.random.random() * H + self._h # lazy
return pt
def gen_backgr_pt(self) -> Tuple[int, int]:
"""Generates background point.
Idea is to take only <self._max_trials> trials, if point has not been found, then increment permissible
num of glomeruli inside background by <self._step>.
"""
glom_presence_in_backgr, trial = 0, 0
gen = partial(gen_pt_in_poly, polygon=random.choice(self._polygons), max_num_attempts=200) \
if self._polygons is not None else self.gen_pt_in_img
while True:
rand_pt = gen()
x_cent, y_cent = np.array(rand_pt).astype(int)
x_off, y_off = x_cent - self._w // 2, y_cent - self._h // 2
# Reverse x and y, because gdal return C H W
window = Window(x_off, y_off, self._w, self._h)
sample_mask = self._mask.read(window=window, boundless=self._boundless)
trial += 1
if self._strict_mode:
if np.sum(sample_mask) <= glom_presence_in_backgr * self._mask_glom_val:
return x_cent, y_cent
elif trial == self._max_trials:
trial, glom_presence_in_backgr = 0, glom_presence_in_backgr + self._step
else:
return x_cent, y_cent
def __iter__(self):
return self
def __len__(self):
return self._num_samples
def __next__(self):
self._count += 1
if self._count < self._num_samples:
return self.__getitem__(self._count)
else:
self._count = -1
raise StopIteration("Failed to proceed to the next step")
def __getitem__(self, idx: int) -> Tuple[np.ndarray, np.ndarray]:
x_off = self._centroids[idx][0] - self._w // 2
y_off = self._centroids[idx][1] - self._h // 2
window = Window(x_off, y_off, self._w, self._h)
img = self._img.read(window=window, boundless=self._boundless)
mask = self._mask.read(window=window, boundless=self._boundless)
if self._border is not None:
return img, mask, self._border.read(window=window, boundless=True)
return img, mask
def __del__(self):
del self._mask
del self._img
class PolySampler:
"""Generates images from polygon
"""
def __init__(self,
img_path: str,
polygons: List[geometry.Polygon],
img_wh: Tuple[int, int],
num_samples: int,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Example:
# Get list of cortex polygons
polygons = utils.get_cortex_polygons(utils.jread(img_anot_struct_path))
"""
buffer_dist = 0
self._img = rasterio.open(img_path)
self._polygons = [poly.buffer(buffer_dist) for poly in polygons]
self._w, self._h = img_wh
self._num_samples = num_samples
self._boundless = True
self._count = -1
def gen_pt(self) -> Tuple[int, int]:
# TODO refact
gen = partial(gen_pt_in_poly, random.choice(self._polygons))
rand_pt = gen()
x_cent, y_cent = np.array(rand_pt).astype(int)
return x_cent, y_cent
def __next__(self):
self._count += 1
if self._count < self._num_samples:
return self.__getitem__(self._count)
else:
self._count = -1
raise StopIteration("Failed to proceed to the next step")
def __getitem__(self, idx: int) -> Tuple[np.ndarray, np.ndarray]:
x_cent, y_cent = self.gen_pt()
x_off = x_cent - self._w // 2
y_off = y_cent - self._h // 2
window= Window(x_off, y_off, self._w, self._h)
img = self._img.read(window=window, boundless=self._boundless)
return img
def __iter__(self): return self
def __len__(self): return self._num_samples
def __del__(self): del self._img
class GridSampler:
def __init__(self,
img_path: str,
mask_path: str,
img_wh: Tuple[int, int],
) -> Tuple[np.ndarray, np.ndarray]:
self._mask = TFReader(mask_path)
self._img = TFReader(img_path)
self._w, self._h = img_wh
self._boundless = True
self._count = -1
_, dims, *_ = get_basics_rasterio(img_path)
self.block_cds = list(generate_block_coords(dims[0], dims[1], img_wh))
self._num_samples = len(self.block_cds)
def __iter__(self): return self
def __len__(self): return self._num_samples
def __next__(self):
self._count += 1
if self._count < self._num_samples:
return self.__getitem__(self._count)
else:
self._count = -1
raise StopIteration("Failed to proceed to the next step")
def __getitem__(self, idx: int) -> Tuple[np.ndarray, np.ndarray]:
y_off, x_off, _, _ = self.block_cds[idx]
window = Window(x_off, y_off, self._w, self._h)
img = self._img.read(window=window, boundless=self._boundless)
mask = self._mask.read(window=window, boundless=self._boundless)
return img, mask
def __del__(self):
del self._mask
del self._img
def _write_block(block, name):
x, y, block_data = block
#print(name, x,y,block_data.shape, block_data.dtype)
t = Image.fromarray(block_data.transpose((1,2,0)))
t.save(f'output/{name}_{x}_{y}.png')
def tif_block_read(name, block_size=None):
if block_size is None: block_size = (256, 256)
input_file, (W,H), _ = get_basics_rasterio(name)
nXBlocks, nYBlocks = _count_blocks(name, block_size=block_size)
nXValid, nYValid = block_size[0], block_size[1]
for X in range(nXBlocks):
if X == nXBlocks - 1: nXValid = W - X * block_size[0]
myX = X * block_size[0]
nYValid = block_size[1]
for Y in range(nYBlocks):
if Y == nYBlocks - 1: nYValid = H - Y * block_size[1]
myY = Y * block_size[1]
window = Window(myY, myX, nYValid, nXValid)
block = input_file.read([1,2,3], window=window)
#print(myX, myY, nXValid, nYValid, W, H, block.shape)
yield X, Y, block
del input_file
def _count_blocks(name, block_size=(256, 256)):
# find total x and y blocks to be read
_, dims, *_ = get_basics_rasterio(name)
nXBlocks = (int)((dims[0] + block_size[0] - 1) / block_size[0])
nYBlocks = (int)((dims[1] + block_size[1] - 1) / block_size[1])
return nXBlocks, nYBlocks
def generate_block_coords(H, W, block_size):
h,w = block_size
nYBlocks = (int)((H + h - 1) / h)
nXBlocks = (int)((W + w - 1) / w)
for X in range(nXBlocks):
cx = X * h
for Y in range(nYBlocks):
cy = Y * w
yield cy, cx, h, w
| 34.158879 | 118 | 0.599088 | import random
import numpy as np
from PIL import Image
from typing import List, Tuple
from functools import partial
import rasterio
from shapely import geometry
from rasterio.windows import Window
from tf_reader import TFReader
from utils import jread, get_basics_rasterio, json_record_to_poly, flatten_2dlist, get_cortex_polygons, gen_pt_in_poly
class GdalSampler:
def __init__(self, img_path: str,
mask_path: str,
img_polygons_path: str,
img_wh: Tuple[int, int],
border_path=None,
rand_shift_range: Tuple[int, int] = (0, 0)) -> Tuple[np.ndarray, np.ndarray]:
self._records_json = jread(img_polygons_path)
self._mask = TFReader(mask_path)
self._img = TFReader(img_path)
self._border = TFReader(border_path) if border_path is not None else None
self._wh = img_wh
self._count = -1
self._rand_shift_range = rand_shift_range
polygons = flatten_2dlist([json_record_to_poly(record) for record in self._records_json])
self._polygons_centroid = [np.round(polygon.centroid) for polygon in polygons]
def __iter__(self):
return self
def __len__(self):
return len(self._records_json)
def __next__(self):
self._count += 1
if self._count < len(self._records_json):
return self.__getitem__(self._count)
else:
self._count = -1
raise StopIteration("Failed to proceed to the next step")
def __getitem__(self, idx: int) -> Tuple[np.ndarray, np.ndarray]:
y,x = self._polygons_centroid[idx]
w,h = self._wh
y,x = y-h//2, x-w//2
window = ((x, x+w),(y, y+h))
img = self._img.read(window=window, boundless=True)
mask = self._mask.read(window=window, boundless=True)
if self._border is not None:
return img, mask, self._border.read(window=window, boundless=True)
return img, mask
def __del__(self):
del self._mask
del self._img
class BackgroundSampler:
def __init__(self,
img_path: str,
mask_path: str,
polygons: List[geometry.Polygon],
img_wh: Tuple[int, int],
num_samples: int,
step: int = 25,
max_trials: int = 25,
mask_glom_val: int = 255,
buffer_dist: int = 0,
border_path=None,
strict_mode=True
) -> Tuple[np.ndarray, np.ndarray]:
self._mask = TFReader(mask_path)
self.mask_path = mask_path
self._img = TFReader(img_path)
self._border = rasterio.open(border_path) if border_path is not None else None
self._polygons = [poly.buffer(buffer_dist) for poly in polygons] if polygons else None
self._w, self._h = img_wh
self._num_samples = num_samples
self._mask_glom_val = mask_glom_val
self._boundless = True
self._count = -1
self._step = step
self._max_trials = max_trials
self._strict_mode = strict_mode
self._centroids = [self.gen_backgr_pt() for _ in range(num_samples)]
def gen_pt_in_img(self):
W, H = self._img.shape
pt = np.random.random() * W + self._w, np.random.random() * H + self._h
return pt
def gen_backgr_pt(self) -> Tuple[int, int]:
glom_presence_in_backgr, trial = 0, 0
gen = partial(gen_pt_in_poly, polygon=random.choice(self._polygons), max_num_attempts=200) \
if self._polygons is not None else self.gen_pt_in_img
while True:
rand_pt = gen()
x_cent, y_cent = np.array(rand_pt).astype(int)
x_off, y_off = x_cent - self._w // 2, y_cent - self._h // 2
window = Window(x_off, y_off, self._w, self._h)
sample_mask = self._mask.read(window=window, boundless=self._boundless)
trial += 1
if self._strict_mode:
if np.sum(sample_mask) <= glom_presence_in_backgr * self._mask_glom_val:
return x_cent, y_cent
elif trial == self._max_trials:
trial, glom_presence_in_backgr = 0, glom_presence_in_backgr + self._step
else:
return x_cent, y_cent
def __iter__(self):
return self
def __len__(self):
return self._num_samples
def __next__(self):
self._count += 1
if self._count < self._num_samples:
return self.__getitem__(self._count)
else:
self._count = -1
raise StopIteration("Failed to proceed to the next step")
def __getitem__(self, idx: int) -> Tuple[np.ndarray, np.ndarray]:
x_off = self._centroids[idx][0] - self._w // 2
y_off = self._centroids[idx][1] - self._h // 2
window = Window(x_off, y_off, self._w, self._h)
img = self._img.read(window=window, boundless=self._boundless)
mask = self._mask.read(window=window, boundless=self._boundless)
if self._border is not None:
return img, mask, self._border.read(window=window, boundless=True)
return img, mask
def __del__(self):
del self._mask
del self._img
class PolySampler:
def __init__(self,
img_path: str,
polygons: List[geometry.Polygon],
img_wh: Tuple[int, int],
num_samples: int,
) -> Tuple[np.ndarray, np.ndarray]:
buffer_dist = 0
self._img = rasterio.open(img_path)
self._polygons = [poly.buffer(buffer_dist) for poly in polygons]
self._w, self._h = img_wh
self._num_samples = num_samples
self._boundless = True
self._count = -1
def gen_pt(self) -> Tuple[int, int]:
gen = partial(gen_pt_in_poly, random.choice(self._polygons))
rand_pt = gen()
x_cent, y_cent = np.array(rand_pt).astype(int)
return x_cent, y_cent
def __next__(self):
self._count += 1
if self._count < self._num_samples:
return self.__getitem__(self._count)
else:
self._count = -1
raise StopIteration("Failed to proceed to the next step")
def __getitem__(self, idx: int) -> Tuple[np.ndarray, np.ndarray]:
x_cent, y_cent = self.gen_pt()
x_off = x_cent - self._w // 2
y_off = y_cent - self._h // 2
window= Window(x_off, y_off, self._w, self._h)
img = self._img.read(window=window, boundless=self._boundless)
return img
def __iter__(self): return self
def __len__(self): return self._num_samples
def __del__(self): del self._img
class GridSampler:
def __init__(self,
img_path: str,
mask_path: str,
img_wh: Tuple[int, int],
) -> Tuple[np.ndarray, np.ndarray]:
self._mask = TFReader(mask_path)
self._img = TFReader(img_path)
self._w, self._h = img_wh
self._boundless = True
self._count = -1
_, dims, *_ = get_basics_rasterio(img_path)
self.block_cds = list(generate_block_coords(dims[0], dims[1], img_wh))
self._num_samples = len(self.block_cds)
def __iter__(self): return self
def __len__(self): return self._num_samples
def __next__(self):
self._count += 1
if self._count < self._num_samples:
return self.__getitem__(self._count)
else:
self._count = -1
raise StopIteration("Failed to proceed to the next step")
def __getitem__(self, idx: int) -> Tuple[np.ndarray, np.ndarray]:
y_off, x_off, _, _ = self.block_cds[idx]
window = Window(x_off, y_off, self._w, self._h)
img = self._img.read(window=window, boundless=self._boundless)
mask = self._mask.read(window=window, boundless=self._boundless)
return img, mask
def __del__(self):
del self._mask
del self._img
def _write_block(block, name):
x, y, block_data = block
t = Image.fromarray(block_data.transpose((1,2,0)))
t.save(f'output/{name}_{x}_{y}.png')
def tif_block_read(name, block_size=None):
if block_size is None: block_size = (256, 256)
input_file, (W,H), _ = get_basics_rasterio(name)
nXBlocks, nYBlocks = _count_blocks(name, block_size=block_size)
nXValid, nYValid = block_size[0], block_size[1]
for X in range(nXBlocks):
if X == nXBlocks - 1: nXValid = W - X * block_size[0]
myX = X * block_size[0]
nYValid = block_size[1]
for Y in range(nYBlocks):
if Y == nYBlocks - 1: nYValid = H - Y * block_size[1]
myY = Y * block_size[1]
window = Window(myY, myX, nYValid, nXValid)
block = input_file.read([1,2,3], window=window)
yield X, Y, block
del input_file
def _count_blocks(name, block_size=(256, 256)):
_, dims, *_ = get_basics_rasterio(name)
nXBlocks = (int)((dims[0] + block_size[0] - 1) / block_size[0])
nYBlocks = (int)((dims[1] + block_size[1] - 1) / block_size[1])
return nXBlocks, nYBlocks
def generate_block_coords(H, W, block_size):
h,w = block_size
nYBlocks = (int)((H + h - 1) / h)
nXBlocks = (int)((W + w - 1) / w)
for X in range(nXBlocks):
cx = X * h
for Y in range(nYBlocks):
cy = Y * w
yield cy, cx, h, w
| true | true |
1c3d03151846589500c26ac25d22bdf4a186706b | 449 | py | Python | mayan/apps/views/utils.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 2 | 2021-09-12T19:41:19.000Z | 2021-09-12T19:41:20.000Z | mayan/apps/views/utils.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 37 | 2021-09-13T01:00:12.000Z | 2021-10-02T03:54:30.000Z | mayan/apps/views/utils.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 1 | 2021-09-22T13:17:30.000Z | 2021-09-22T13:17:30.000Z | import logging
from django.urls import resolve as django_resolve
from django.urls.base import get_script_prefix
from django.utils.encoding import force_text
logger = logging.getLogger(name=__name__)
def convert_to_id_list(items):
return ','.join(map(force_text, items))
def resolve(path, urlconf=None):
path = '/{}'.format(path.replace(get_script_prefix(), '', 1))
return django_resolve(path=path, urlconf=urlconf)
| 26.411765 | 66 | 0.734967 | import logging
from django.urls import resolve as django_resolve
from django.urls.base import get_script_prefix
from django.utils.encoding import force_text
logger = logging.getLogger(name=__name__)
def convert_to_id_list(items):
return ','.join(map(force_text, items))
def resolve(path, urlconf=None):
path = '/{}'.format(path.replace(get_script_prefix(), '', 1))
return django_resolve(path=path, urlconf=urlconf)
| true | true |
1c3d0388362e683a680943e188572f9095ce0cb5 | 409 | py | Python | search.py | defigueredor/biorobotics_codes | 11b121edee86906eed2da729424172ac64f14c12 | [
"MIT"
] | null | null | null | search.py | defigueredor/biorobotics_codes | 11b121edee86906eed2da729424172ac64f14c12 | [
"MIT"
] | null | null | null | search.py | defigueredor/biorobotics_codes | 11b121edee86906eed2da729424172ac64f14c12 | [
"MIT"
] | null | null | null | import pyrosim
import matplotlib.pyplot as plt
from robot import ROBOT
import random
for i in range(0,10):
sim=pyrosim.Simulator(eval_time=10000)
robot=ROBOT(sim,random.random()*2-1)
sim.start()
#sim.wait_to_fnish()
#sensorData = sim.get_sensor_data( sensor_id = P2)
#print(sensorData)
#f=plt.figure()
#panel= f.add_subplot(111)
#plt.plot(sensorData)
#panel.set_ylim(0,+1.5)
#plt.show()
| 21.526316 | 54 | 0.718826 | import pyrosim
import matplotlib.pyplot as plt
from robot import ROBOT
import random
for i in range(0,10):
sim=pyrosim.Simulator(eval_time=10000)
robot=ROBOT(sim,random.random()*2-1)
sim.start()
| true | true |
1c3d03e6ad3a3d490aac5455ed5ac59ca9bf751a | 5,154 | py | Python | tests/test_primary_key_retrieval.py | edupo/sqlathanor | a5cfd349d092b25a3ffb3950b996b13878e1db17 | [
"MIT"
] | 101 | 2018-07-21T00:20:59.000Z | 2022-02-09T21:33:09.000Z | tests/test_primary_key_retrieval.py | edupo/sqlathanor | a5cfd349d092b25a3ffb3950b996b13878e1db17 | [
"MIT"
] | 85 | 2018-06-16T02:15:08.000Z | 2022-02-24T14:57:24.000Z | tests/test_primary_key_retrieval.py | edupo/sqlathanor | a5cfd349d092b25a3ffb3950b996b13878e1db17 | [
"MIT"
] | 6 | 2018-07-25T09:51:02.000Z | 2022-02-24T14:04:27.000Z | # -*- coding: utf-8 -*-
"""
***********************************
tests.test_primary_key_retrieval
***********************************
Tests for the :class:`BaseModel` ability to retrieve primary key data.
"""
import pytest
from sqlathanor import Column
from tests.fixtures import db_engine, tables, base_model, db_session, \
model_single_pk, model_composite_pk, instance_single_pk, instance_composite_pk
@pytest.mark.parametrize('expected_count', [
(1),
(3),
])
@pytest.mark.filterwarnings('ignore:This declarative base already contains a class')
def test_get_primary_key_columns_classmethod(request,
model_single_pk,
model_composite_pk,
expected_count):
if expected_count == 1:
target = model_single_pk[0]
elif expected_count > 1:
target = model_composite_pk[0]
primary_key_columns = target.get_primary_key_columns()
assert len(primary_key_columns) == expected_count
for column in primary_key_columns:
assert isinstance(column, Column)
@pytest.mark.parametrize('expected_count', [
(1),
(3),
])
@pytest.mark.filterwarnings('ignore:This declarative base already contains a class')
def test_get_primary_key_columns_instance(request,
instance_single_pk,
instance_composite_pk,
expected_count):
if expected_count == 1:
instances = instance_single_pk
elif expected_count > 1:
instances = instance_composite_pk
target = instances[0]
instance_values = instances[1]
primary_key_columns = target.get_primary_key_columns()
assert len(primary_key_columns) == expected_count
for column in primary_key_columns:
assert isinstance(column, Column)
@pytest.mark.parametrize('expected_count, expected_names', [
(1, ['id']),
(3, ['id', 'id2', 'id3']),
])
@pytest.mark.filterwarnings('ignore:This declarative base already contains a class')
def test_get_primary_key_column_names_classmethod(request,
model_single_pk,
model_composite_pk,
expected_count,
expected_names):
if expected_count == 1:
target = model_single_pk[0]
elif expected_count > 1:
target = model_composite_pk[0]
pk_column_names = target.get_primary_key_column_names()
assert len(pk_column_names) == expected_count
for column in pk_column_names:
assert isinstance(column, str)
assert column in expected_names
@pytest.mark.parametrize('expected_count, expected_names', [
(1, ['id']),
(3, ['id', 'id2', 'id3']),
])
@pytest.mark.filterwarnings('ignore:This declarative base already contains a class')
def test_get_primary_key_column_names_instance(request,
instance_single_pk,
instance_composite_pk,
expected_count,
expected_names):
if expected_count == 1:
target = instance_single_pk[0]
instance_values = instance_single_pk[1]
elif expected_count > 1:
target = instance_composite_pk[0]
instance_values = instance_composite_pk[1]
pk_column_names = target.get_primary_key_column_names()
assert len(pk_column_names) == expected_count
for column in pk_column_names:
assert isinstance(column, str)
assert column in expected_names
@pytest.mark.parametrize('is_composite, expected_count', [
(False, None),
(True, None),
(False, 1),
(True, 3),
])
@pytest.mark.filterwarnings('ignore:This declarative base already contains a class')
def test_primary_key_value(request,
db_session,
instance_single_pk,
instance_composite_pk,
is_composite,
expected_count):
if is_composite or (expected_count is not None and expected_count > 1):
target = instance_composite_pk[0]
instance_values = instance_composite_pk[1]
else:
target = instance_single_pk[0]
instance_values = instance_single_pk[1]
if expected_count:
db_session.add(target)
db_session.commit()
pk_values = target.primary_key_value
if expected_count is None:
assert pk_values is None
elif expected_count == 1:
assert not isinstance(pk_values, tuple)
assert pk_values == instance_values['id']
elif expected_count > 1:
assert isinstance(pk_values, tuple)
assert len(pk_values) == expected_count
id_values = [instance_values[key] for key in instance_values
if 'id' in key]
for value in pk_values:
assert value in id_values
| 32.415094 | 84 | 0.59934 |
import pytest
from sqlathanor import Column
from tests.fixtures import db_engine, tables, base_model, db_session, \
model_single_pk, model_composite_pk, instance_single_pk, instance_composite_pk
@pytest.mark.parametrize('expected_count', [
(1),
(3),
])
@pytest.mark.filterwarnings('ignore:This declarative base already contains a class')
def test_get_primary_key_columns_classmethod(request,
model_single_pk,
model_composite_pk,
expected_count):
if expected_count == 1:
target = model_single_pk[0]
elif expected_count > 1:
target = model_composite_pk[0]
primary_key_columns = target.get_primary_key_columns()
assert len(primary_key_columns) == expected_count
for column in primary_key_columns:
assert isinstance(column, Column)
@pytest.mark.parametrize('expected_count', [
(1),
(3),
])
@pytest.mark.filterwarnings('ignore:This declarative base already contains a class')
def test_get_primary_key_columns_instance(request,
instance_single_pk,
instance_composite_pk,
expected_count):
if expected_count == 1:
instances = instance_single_pk
elif expected_count > 1:
instances = instance_composite_pk
target = instances[0]
instance_values = instances[1]
primary_key_columns = target.get_primary_key_columns()
assert len(primary_key_columns) == expected_count
for column in primary_key_columns:
assert isinstance(column, Column)
@pytest.mark.parametrize('expected_count, expected_names', [
(1, ['id']),
(3, ['id', 'id2', 'id3']),
])
@pytest.mark.filterwarnings('ignore:This declarative base already contains a class')
def test_get_primary_key_column_names_classmethod(request,
model_single_pk,
model_composite_pk,
expected_count,
expected_names):
if expected_count == 1:
target = model_single_pk[0]
elif expected_count > 1:
target = model_composite_pk[0]
pk_column_names = target.get_primary_key_column_names()
assert len(pk_column_names) == expected_count
for column in pk_column_names:
assert isinstance(column, str)
assert column in expected_names
@pytest.mark.parametrize('expected_count, expected_names', [
(1, ['id']),
(3, ['id', 'id2', 'id3']),
])
@pytest.mark.filterwarnings('ignore:This declarative base already contains a class')
def test_get_primary_key_column_names_instance(request,
instance_single_pk,
instance_composite_pk,
expected_count,
expected_names):
if expected_count == 1:
target = instance_single_pk[0]
instance_values = instance_single_pk[1]
elif expected_count > 1:
target = instance_composite_pk[0]
instance_values = instance_composite_pk[1]
pk_column_names = target.get_primary_key_column_names()
assert len(pk_column_names) == expected_count
for column in pk_column_names:
assert isinstance(column, str)
assert column in expected_names
@pytest.mark.parametrize('is_composite, expected_count', [
(False, None),
(True, None),
(False, 1),
(True, 3),
])
@pytest.mark.filterwarnings('ignore:This declarative base already contains a class')
def test_primary_key_value(request,
db_session,
instance_single_pk,
instance_composite_pk,
is_composite,
expected_count):
if is_composite or (expected_count is not None and expected_count > 1):
target = instance_composite_pk[0]
instance_values = instance_composite_pk[1]
else:
target = instance_single_pk[0]
instance_values = instance_single_pk[1]
if expected_count:
db_session.add(target)
db_session.commit()
pk_values = target.primary_key_value
if expected_count is None:
assert pk_values is None
elif expected_count == 1:
assert not isinstance(pk_values, tuple)
assert pk_values == instance_values['id']
elif expected_count > 1:
assert isinstance(pk_values, tuple)
assert len(pk_values) == expected_count
id_values = [instance_values[key] for key in instance_values
if 'id' in key]
for value in pk_values:
assert value in id_values
| true | true |
1c3d040f4315201e67a78d0383fc6b4002959c8a | 5,843 | py | Python | neuralnetwork/prepareData.py | Jpe230/DDatingApp | b515d35e63ac137ed5b3eefecf992d67f3c28eee | [
"MIT"
] | 10 | 2021-08-14T23:16:10.000Z | 2021-09-04T13:29:42.000Z | neuralnetwork/prepareData.py | JaimeTR/DDatingApp | b515d35e63ac137ed5b3eefecf992d67f3c28eee | [
"MIT"
] | null | null | null | neuralnetwork/prepareData.py | JaimeTR/DDatingApp | b515d35e63ac137ed5b3eefecf992d67f3c28eee | [
"MIT"
] | 1 | 2021-08-15T00:59:01.000Z | 2021-08-15T00:59:01.000Z | # System lib
import os
# Libraries for manipulating Dataset
import cv2
import pickle
import numpy as np
import numpy
from PIL import ImageEnhance
# Libraries for downloading Dataset
import zipfile
import gdown
import random
from numpy.core.fromnumeric import resize
# User-defined const
import helpers
import const
def extract_zipfile():
with zipfile.ZipFile(const.ZFILE) as zip_file:
zip_file.extractall(os.path.join(const.CURRENT_PATH, "dataset"))
def download_data():
# Download Dataset
if os.path.isfile(const.ZFILE) or os.path.isfile(os.path.join(const.DATASET_PATH, "All_Ratings.xlsx")):
print('data already downloaded')
else:
print("data does not exist. downloading it.")
gdown.download(const.DATA_URL, const.ZFILE, quiet=False)
# Extract ZipFile
if os.path.isfile(os.path.join(const.DATASET_PATH, "All_Ratings.xlsx")):
print("data already extracted.")
else:
print("extracting data.")
if not os.path.exists(const.DATA_PATH):
os.mkdir(os.path.join(const.CURRENT_PATH, "dataset"))
extract_zipfile()
# Remove ZipFile
os.remove(const.ZFILE)
# Download and extract Data
download_data()
# Load NN to detect face
face_cascade = cv2.CascadeClassifier(const.MODEL_PATH)
def getFace(detector, imgPath, imgName):
imgFullPath = os.path.join(imgPath, imgName)
img = cv2.imread(imgFullPath)
# Convert img to grayscale to remove colour skin discrimination
if img.ndim == 3:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img
w = img.shape[1]
faces = detector.detectMultiScale(gray, 1.1, 5, 0, (w//2, w//2))
resized_img = 0
# Discard imgs with several faces
if len(faces) == 1:
face = faces[0]
croped_img = img[face[1]:face[1]+face[3], face[0]:face[0]+face[2], :]
resized_img = cv2.resize(croped_img, (224, 224))
if resized_img.shape[0] != 224 or resized_img.shape[1] != 224:
print("Invalid WxH")
else:
# Try resizing still, since our data is kinda normalized
resized_img = cv2.resize(img, (224, 224))
#print("Error detecting faces, file:" + imgName)
return resized_img
def randomizeImage(img):
img = helpers.toimage(img)
# Rotate Image
image_rotated = img.rotate(random.random() * 30 - 30)
# Brightness
image_brigth = ImageEnhance.Brightness(
image_rotated).enhance(random.random() * .8 + .6)
# Contrast
image_contrast = ImageEnhance.Contrast(
image_brigth).enhance(random.random() * .6 + .7)
# Color
image_color = ImageEnhance.Color(
image_contrast).enhance(random.random() * .6 + .7)
randomImg = np.asarray_chkfinite(image_color)
return randomImg
label_dist = []
rating_files = [ const.RATING_PATH,
const.URATING_PATH]
# Normalized values in 5 cat.
prVoteImgName = ''
prVoteImgScr1 = 0
prVoteImgScr2 = 0
prVoteImgScr3 = 0
prVoteImgScr4 = 0
prVoteImgScr5 = 0
for file in rating_files:
# Read Labels
ratingFile = open(file, 'r')
lines = ratingFile.readlines()
currentIndex = 0
for line in lines:
line = line.replace('\n', '').split(' ')
currentIndex += 1
imgFileName = line[0]
imgScore = int(float(line[1]))
# Everybody needs love
imgScore = 1 if imgScore == 0 else imgScore
# print("Reading Img: " + imgFileName + " Score: " +
# str(imgScore) + " CIndex: " + str(currentIndex) + "/" + str(lines.__len__()))
if prVoteImgName == '':
prVoteImgName = imgFileName
if (imgFileName != prVoteImgName) or (currentIndex == lines.__len__()):
totalVotes = prVoteImgScr1 + prVoteImgScr2 + \
prVoteImgScr3 + prVoteImgScr4 + prVoteImgScr5
score1 = prVoteImgScr1 / totalVotes
score2 = prVoteImgScr2 / totalVotes
score3 = prVoteImgScr3 / totalVotes
score4 = prVoteImgScr4 / totalVotes
score5 = prVoteImgScr5 / totalVotes
im = getFace(face_cascade, const.DATA_PATH, prVoteImgName)
if isinstance(im, numpy.ndarray):
normed_img = (im - 127.5) / 127.5
ld = []
ld.append(score1)
ld.append(score2)
ld.append(score3)
ld.append(score4)
ld.append(score5)
label_dist.append([prVoteImgName, normed_img, ld])
else:
print("Error getting face or reading img")
prVoteImgName = imgFileName
prVoteImgScr1 = 0
prVoteImgScr2 = 0
prVoteImgScr3 = 0
prVoteImgScr4 = 0
prVoteImgScr5 = 0
if imgScore == 1:
prVoteImgScr1 += 1
elif imgScore == 2:
prVoteImgScr2 += 1
elif imgScore == 3:
prVoteImgScr3 += 1
elif imgScore == 4:
prVoteImgScr4 += 1
elif imgScore == 5:
prVoteImgScr5 += 1
ratingFile.close()
# Split data for training + testing
dataSplitIndex = int(label_dist.__len__() - label_dist.__len__()*0.1)
# Shuffle Array
random.shuffle(label_dist)
testLabelDist = label_dist[dataSplitIndex:]
trainLabelDist = label_dist[:dataSplitIndex]
trainDataLen = trainLabelDist.__len__()
# Randomize training data
for i in range(0, trainDataLen):
img = trainLabelDist[i][1]
rndImg = randomizeImage(img)
normedRndImg = (rndImg - 127.5) / 127.5
trainLabelDist.append([prVoteImgName, normed_img, ld])
# Shuffle and dump data for NN
random.shuffle(trainLabelDist)
pickle.dump(trainLabelDist, open(const.TRAINING_FILE, 'wb'))
random.shuffle(testLabelDist)
pickle.dump(testLabelDist, open(const.TESTING_FILE, 'wb'))
| 27.176744 | 107 | 0.6293 |
import os
import cv2
import pickle
import numpy as np
import numpy
from PIL import ImageEnhance
import zipfile
import gdown
import random
from numpy.core.fromnumeric import resize
import helpers
import const
def extract_zipfile():
with zipfile.ZipFile(const.ZFILE) as zip_file:
zip_file.extractall(os.path.join(const.CURRENT_PATH, "dataset"))
def download_data():
if os.path.isfile(const.ZFILE) or os.path.isfile(os.path.join(const.DATASET_PATH, "All_Ratings.xlsx")):
print('data already downloaded')
else:
print("data does not exist. downloading it.")
gdown.download(const.DATA_URL, const.ZFILE, quiet=False)
if os.path.isfile(os.path.join(const.DATASET_PATH, "All_Ratings.xlsx")):
print("data already extracted.")
else:
print("extracting data.")
if not os.path.exists(const.DATA_PATH):
os.mkdir(os.path.join(const.CURRENT_PATH, "dataset"))
extract_zipfile()
os.remove(const.ZFILE)
download_data()
face_cascade = cv2.CascadeClassifier(const.MODEL_PATH)
def getFace(detector, imgPath, imgName):
imgFullPath = os.path.join(imgPath, imgName)
img = cv2.imread(imgFullPath)
if img.ndim == 3:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img
w = img.shape[1]
faces = detector.detectMultiScale(gray, 1.1, 5, 0, (w//2, w//2))
resized_img = 0
if len(faces) == 1:
face = faces[0]
croped_img = img[face[1]:face[1]+face[3], face[0]:face[0]+face[2], :]
resized_img = cv2.resize(croped_img, (224, 224))
if resized_img.shape[0] != 224 or resized_img.shape[1] != 224:
print("Invalid WxH")
else:
resized_img = cv2.resize(img, (224, 224))
return resized_img
def randomizeImage(img):
img = helpers.toimage(img)
image_rotated = img.rotate(random.random() * 30 - 30)
image_brigth = ImageEnhance.Brightness(
image_rotated).enhance(random.random() * .8 + .6)
image_contrast = ImageEnhance.Contrast(
image_brigth).enhance(random.random() * .6 + .7)
image_color = ImageEnhance.Color(
image_contrast).enhance(random.random() * .6 + .7)
randomImg = np.asarray_chkfinite(image_color)
return randomImg
label_dist = []
rating_files = [ const.RATING_PATH,
const.URATING_PATH]
prVoteImgName = ''
prVoteImgScr1 = 0
prVoteImgScr2 = 0
prVoteImgScr3 = 0
prVoteImgScr4 = 0
prVoteImgScr5 = 0
for file in rating_files:
ratingFile = open(file, 'r')
lines = ratingFile.readlines()
currentIndex = 0
for line in lines:
line = line.replace('\n', '').split(' ')
currentIndex += 1
imgFileName = line[0]
imgScore = int(float(line[1]))
imgScore = 1 if imgScore == 0 else imgScore
if prVoteImgName == '':
prVoteImgName = imgFileName
if (imgFileName != prVoteImgName) or (currentIndex == lines.__len__()):
totalVotes = prVoteImgScr1 + prVoteImgScr2 + \
prVoteImgScr3 + prVoteImgScr4 + prVoteImgScr5
score1 = prVoteImgScr1 / totalVotes
score2 = prVoteImgScr2 / totalVotes
score3 = prVoteImgScr3 / totalVotes
score4 = prVoteImgScr4 / totalVotes
score5 = prVoteImgScr5 / totalVotes
im = getFace(face_cascade, const.DATA_PATH, prVoteImgName)
if isinstance(im, numpy.ndarray):
normed_img = (im - 127.5) / 127.5
ld = []
ld.append(score1)
ld.append(score2)
ld.append(score3)
ld.append(score4)
ld.append(score5)
label_dist.append([prVoteImgName, normed_img, ld])
else:
print("Error getting face or reading img")
prVoteImgName = imgFileName
prVoteImgScr1 = 0
prVoteImgScr2 = 0
prVoteImgScr3 = 0
prVoteImgScr4 = 0
prVoteImgScr5 = 0
if imgScore == 1:
prVoteImgScr1 += 1
elif imgScore == 2:
prVoteImgScr2 += 1
elif imgScore == 3:
prVoteImgScr3 += 1
elif imgScore == 4:
prVoteImgScr4 += 1
elif imgScore == 5:
prVoteImgScr5 += 1
ratingFile.close()
dataSplitIndex = int(label_dist.__len__() - label_dist.__len__()*0.1)
random.shuffle(label_dist)
testLabelDist = label_dist[dataSplitIndex:]
trainLabelDist = label_dist[:dataSplitIndex]
trainDataLen = trainLabelDist.__len__()
for i in range(0, trainDataLen):
img = trainLabelDist[i][1]
rndImg = randomizeImage(img)
normedRndImg = (rndImg - 127.5) / 127.5
trainLabelDist.append([prVoteImgName, normed_img, ld])
random.shuffle(trainLabelDist)
pickle.dump(trainLabelDist, open(const.TRAINING_FILE, 'wb'))
random.shuffle(testLabelDist)
pickle.dump(testLabelDist, open(const.TESTING_FILE, 'wb'))
| true | true |
1c3d0517292c37ca593463a29b3878b0db396f4d | 810 | py | Python | activatable_model/tests/migrations/0002_activatablemodelwrelandcascade.py | ambitioninc/django-activatable-model | e1967e44d97a03b1a6f1723aa3241bc56ab23eb7 | [
"MIT"
] | 16 | 2015-02-15T18:41:17.000Z | 2021-04-13T15:53:45.000Z | activatable_model/tests/migrations/0002_activatablemodelwrelandcascade.py | ambitioninc/django-activatable-model | e1967e44d97a03b1a6f1723aa3241bc56ab23eb7 | [
"MIT"
] | 5 | 2015-03-30T17:40:10.000Z | 2021-12-18T12:55:30.000Z | activatable_model/tests/migrations/0002_activatablemodelwrelandcascade.py | ambitioninc/django-activatable-model | e1967e44d97a03b1a6f1723aa3241bc56ab23eb7 | [
"MIT"
] | 9 | 2015-03-30T16:21:20.000Z | 2018-10-08T14:38:33.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-06-28 17:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tests', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ActivatableModelWRelAndCascade',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=False)),
('rel_field', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='tests.Rel')),
],
options={
'abstract': False,
},
),
]
| 28.928571 | 114 | 0.595062 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tests', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ActivatableModelWRelAndCascade',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=False)),
('rel_field', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='tests.Rel')),
],
options={
'abstract': False,
},
),
]
| true | true |
1c3d05300ac651d2f2b1fc6dfee427ed62c3c89f | 4,160 | py | Python | mistune/plugins/footnotes.py | hroncok/mistune | e8fc67cc73bc8afcb0a620d2d4b762efff182516 | [
"BSD-3-Clause"
] | 1,957 | 2015-01-01T02:47:09.000Z | 2022-03-26T04:09:23.000Z | mistune/plugins/footnotes.py | hroncok/mistune | e8fc67cc73bc8afcb0a620d2d4b762efff182516 | [
"BSD-3-Clause"
] | 872 | 2015-01-02T01:43:52.000Z | 2019-04-02T20:30:10.000Z | mistune/plugins/footnotes.py | hroncok/mistune | e8fc67cc73bc8afcb0a620d2d4b762efff182516 | [
"BSD-3-Clause"
] | 278 | 2015-01-03T12:50:20.000Z | 2022-02-14T18:58:37.000Z | import re
from ..inline_parser import LINK_LABEL
from ..util import unikey
__all__ = ['plugin_footnotes']
#: inline footnote syntax looks like::
#:
#: [^key]
INLINE_FOOTNOTE_PATTERN = r'\[\^(' + LINK_LABEL + r')\]'
#: define a footnote item like::
#:
#: [^key]: paragraph text to describe the note
DEF_FOOTNOTE = re.compile(
r'( {0,3})\[\^(' + LINK_LABEL + r')\]:[ \t]*('
r'[^\n]*\n+'
r'(?:\1 {1,3}(?! )[^\n]*\n+)*'
r')'
)
def parse_inline_footnote(inline, m, state):
key = unikey(m.group(1))
def_footnotes = state.get('def_footnotes')
if not def_footnotes or key not in def_footnotes:
return 'text', m.group(0)
index = state.get('footnote_index', 0)
index += 1
state['footnote_index'] = index
state['footnotes'].append(key)
return 'footnote_ref', key, index
def parse_def_footnote(block, m, state):
key = unikey(m.group(2))
if key not in state['def_footnotes']:
state['def_footnotes'][key] = m.group(3)
def parse_footnote_item(block, k, i, state):
def_footnotes = state['def_footnotes']
text = def_footnotes[k]
stripped_text = text.strip()
if '\n' not in stripped_text:
children = [{'type': 'paragraph', 'text': stripped_text}]
else:
lines = text.splitlines()
for second_line in lines[1:]:
if second_line:
break
spaces = len(second_line) - len(second_line.lstrip())
pattern = re.compile(r'^ {' + str(spaces) + r',}', flags=re.M)
text = pattern.sub('', text)
children = block.parse_text(text, state)
if not isinstance(children, list):
children = [children]
return {
'type': 'footnote_item',
'children': children,
'params': (k, i)
}
def md_footnotes_hook(md, result, state):
footnotes = state.get('footnotes')
if not footnotes:
return result
children = [
parse_footnote_item(md.block, k, i + 1, state)
for i, k in enumerate(footnotes)
]
tokens = [{'type': 'footnotes', 'children': children}]
output = md.block.render(tokens, md.inline, state)
return result + output
def render_ast_footnote_ref(key, index):
return {'type': 'footnote_ref', 'key': key, 'index': index}
def render_ast_footnote_item(children, key, index):
return {
'type': 'footnote_item',
'children': children,
'key': key,
'index': index,
}
def render_html_footnote_ref(key, index):
i = str(index)
html = '<sup class="footnote-ref" id="fnref-' + i + '">'
return html + '<a href="#fn-' + i + '">' + i + '</a></sup>'
def render_html_footnotes(text):
return (
'<section class="footnotes">\n<ol>\n'
+ text +
'</ol>\n</section>\n'
)
def render_html_footnote_item(text, key, index):
i = str(index)
back = '<a href="#fnref-' + i + '" class="footnote">↩</a>'
text = text.rstrip()
if text.endswith('</p>'):
text = text[:-4] + back + '</p>'
else:
text = text + back
return '<li id="fn-' + i + '">' + text + '</li>\n'
def plugin_footnotes(md):
md.inline.register_rule(
'footnote',
INLINE_FOOTNOTE_PATTERN,
parse_inline_footnote
)
index = md.inline.rules.index('std_link')
if index != -1:
md.inline.rules.insert(index, 'footnote')
else:
md.inline.rules.append('footnote')
md.block.register_rule('def_footnote', DEF_FOOTNOTE, parse_def_footnote)
index = md.block.rules.index('def_link')
if index != -1:
md.block.rules.insert(index, 'def_footnote')
else:
md.block.rules.append('def_footnote')
if md.renderer.NAME == 'html':
md.renderer.register('footnote_ref', render_html_footnote_ref)
md.renderer.register('footnote_item', render_html_footnote_item)
md.renderer.register('footnotes', render_html_footnotes)
elif md.renderer.NAME == 'ast':
md.renderer.register('footnote_ref', render_ast_footnote_ref)
md.renderer.register('footnote_item', render_ast_footnote_item)
md.after_render_hooks.append(md_footnotes_hook)
| 27.733333 | 76 | 0.603606 | import re
from ..inline_parser import LINK_LABEL
from ..util import unikey
__all__ = ['plugin_footnotes']
INLINE_FOOTNOTE_PATTERN = r'\[\^(' + LINK_LABEL + r')\]'
DEF_FOOTNOTE = re.compile(
r'( {0,3})\[\^(' + LINK_LABEL + r')\]:[ \t]*('
r'[^\n]*\n+'
r'(?:\1 {1,3}(?! )[^\n]*\n+)*'
r')'
)
def parse_inline_footnote(inline, m, state):
key = unikey(m.group(1))
def_footnotes = state.get('def_footnotes')
if not def_footnotes or key not in def_footnotes:
return 'text', m.group(0)
index = state.get('footnote_index', 0)
index += 1
state['footnote_index'] = index
state['footnotes'].append(key)
return 'footnote_ref', key, index
def parse_def_footnote(block, m, state):
key = unikey(m.group(2))
if key not in state['def_footnotes']:
state['def_footnotes'][key] = m.group(3)
def parse_footnote_item(block, k, i, state):
def_footnotes = state['def_footnotes']
text = def_footnotes[k]
stripped_text = text.strip()
if '\n' not in stripped_text:
children = [{'type': 'paragraph', 'text': stripped_text}]
else:
lines = text.splitlines()
for second_line in lines[1:]:
if second_line:
break
spaces = len(second_line) - len(second_line.lstrip())
pattern = re.compile(r'^ {' + str(spaces) + r',}', flags=re.M)
text = pattern.sub('', text)
children = block.parse_text(text, state)
if not isinstance(children, list):
children = [children]
return {
'type': 'footnote_item',
'children': children,
'params': (k, i)
}
def md_footnotes_hook(md, result, state):
footnotes = state.get('footnotes')
if not footnotes:
return result
children = [
parse_footnote_item(md.block, k, i + 1, state)
for i, k in enumerate(footnotes)
]
tokens = [{'type': 'footnotes', 'children': children}]
output = md.block.render(tokens, md.inline, state)
return result + output
def render_ast_footnote_ref(key, index):
return {'type': 'footnote_ref', 'key': key, 'index': index}
def render_ast_footnote_item(children, key, index):
return {
'type': 'footnote_item',
'children': children,
'key': key,
'index': index,
}
def render_html_footnote_ref(key, index):
i = str(index)
html = '<sup class="footnote-ref" id="fnref-' + i + '">'
return html + '<a href="#fn-' + i + '">' + i + '</a></sup>'
def render_html_footnotes(text):
return (
'<section class="footnotes">\n<ol>\n'
+ text +
'</ol>\n</section>\n'
)
def render_html_footnote_item(text, key, index):
i = str(index)
back = '<a href="#fnref-' + i + '" class="footnote">↩</a>'
text = text.rstrip()
if text.endswith('</p>'):
text = text[:-4] + back + '</p>'
else:
text = text + back
return '<li id="fn-' + i + '">' + text + '</li>\n'
def plugin_footnotes(md):
md.inline.register_rule(
'footnote',
INLINE_FOOTNOTE_PATTERN,
parse_inline_footnote
)
index = md.inline.rules.index('std_link')
if index != -1:
md.inline.rules.insert(index, 'footnote')
else:
md.inline.rules.append('footnote')
md.block.register_rule('def_footnote', DEF_FOOTNOTE, parse_def_footnote)
index = md.block.rules.index('def_link')
if index != -1:
md.block.rules.insert(index, 'def_footnote')
else:
md.block.rules.append('def_footnote')
if md.renderer.NAME == 'html':
md.renderer.register('footnote_ref', render_html_footnote_ref)
md.renderer.register('footnote_item', render_html_footnote_item)
md.renderer.register('footnotes', render_html_footnotes)
elif md.renderer.NAME == 'ast':
md.renderer.register('footnote_ref', render_ast_footnote_ref)
md.renderer.register('footnote_item', render_ast_footnote_item)
md.after_render_hooks.append(md_footnotes_hook)
| true | true |
1c3d06b67b97624b61f4873867362c81c5177431 | 3,682 | py | Python | aiida/engine/processes/futures.py | csadorf/aiida-core | dffff843e38ff6aa4819e521a1d51bb12e483ada | [
"MIT",
"BSD-3-Clause"
] | null | null | null | aiida/engine/processes/futures.py | csadorf/aiida-core | dffff843e38ff6aa4819e521a1d51bb12e483ada | [
"MIT",
"BSD-3-Clause"
] | 17 | 2020-03-11T17:04:05.000Z | 2020-05-01T09:34:45.000Z | aiida/engine/processes/futures.py | csadorf/aiida-core | dffff843e38ff6aa4819e521a1d51bb12e483ada | [
"MIT",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=cyclic-import
"""Futures that can poll or receive broadcasted messages while waiting for a task to be completed."""
import asyncio
from typing import Optional, Union
import kiwipy
from aiida.orm import Node, load_node
__all__ = ('ProcessFuture',)
class ProcessFuture(asyncio.Future):
"""Future that waits for a process to complete using both polling and listening for broadcast events if possible."""
_filtered = None
def __init__(
self,
pk: int,
loop: Optional[asyncio.AbstractEventLoop] = None,
poll_interval: Union[None, int, float] = None,
communicator: Optional[kiwipy.Communicator] = None
):
"""Construct a future for a process node being finished.
If a None poll_interval is supplied polling will not be used.
If a communicator is supplied it will be used to listen for broadcast messages.
:param pk: process pk
:param loop: An event loop
:param poll_interval: optional polling interval, if None, polling is not activated.
:param communicator: optional communicator, if None, will not subscribe to broadcasts.
"""
from .process import ProcessState
# create future in specified event loop
loop = loop if loop is not None else asyncio.get_event_loop()
super().__init__(loop=loop)
assert not (poll_interval is None and communicator is None), 'Must poll or have a communicator to use'
node = load_node(pk=pk)
if node.is_terminated:
self.set_result(node)
else:
self._communicator = communicator
self.add_done_callback(lambda _: self.cleanup())
# Try setting up a filtered broadcast subscriber
if self._communicator is not None:
broadcast_filter = kiwipy.BroadcastFilter(lambda *args, **kwargs: self.set_result(node), sender=pk)
for state in [ProcessState.FINISHED, ProcessState.KILLED, ProcessState.EXCEPTED]:
broadcast_filter.add_subject_filter(f'state_changed.*.{state.value}')
self._broadcast_identifier = self._communicator.add_broadcast_subscriber(broadcast_filter)
# Start polling
if poll_interval is not None:
loop.create_task(self._poll_process(node, poll_interval))
def cleanup(self) -> None:
"""Clean up the future by removing broadcast subscribers from the communicator if it still exists."""
if self._communicator is not None:
self._communicator.remove_broadcast_subscriber(self._broadcast_identifier)
self._communicator = None
self._broadcast_identifier = None
async def _poll_process(self, node: Node, poll_interval: Union[int, float]) -> None:
"""Poll whether the process node has reached a terminal state."""
while not self.done() and not node.is_terminated:
await asyncio.sleep(poll_interval)
if not self.done():
self.set_result(node)
| 43.317647 | 120 | 0.620043 | true | true | |
1c3d071bb0d2fd667a0aa39a332e66b11b21250f | 2,696 | py | Python | robo/maximizers/direct.py | lebrice/RoBO | 0cb58a1622d3a540f7714b239f0cedf048b6fd9f | [
"BSD-3-Clause"
] | 455 | 2015-04-02T06:12:13.000Z | 2022-02-28T10:54:29.000Z | robo/maximizers/direct.py | lebrice/RoBO | 0cb58a1622d3a540f7714b239f0cedf048b6fd9f | [
"BSD-3-Clause"
] | 66 | 2015-04-07T15:20:55.000Z | 2021-06-04T16:40:46.000Z | robo/maximizers/direct.py | lebrice/RoBO | 0cb58a1622d3a540f7714b239f0cedf048b6fd9f | [
"BSD-3-Clause"
] | 188 | 2015-04-14T09:42:34.000Z | 2022-03-31T21:04:53.000Z | import os
import sys
try:
import DIRECT
except ImportError:
raise ImportError("""
In order to use this module, DIRECT need to be installed. Try running
pip install direct
""")
import numpy as np
from robo.maximizers.base_maximizer import BaseMaximizer
class Direct(BaseMaximizer):
def __init__(self, objective_function, lower, upper,
n_func_evals=400, n_iters=200, verbose=True):
"""
Interface for the DIRECT algorithm by D. R. Jones, C. D. Perttunen
and B. E. Stuckmann
Parameters
----------
objective_function: acquisition function
The acquisition function which will be maximized
lower: np.ndarray (D)
Lower bounds of the input space
upper: np.ndarray (D)
Upper bounds of the input space
n_func_evals: int
The maximum number of function evaluations
n_iters: int
The maximum number of iterations
verbose: bool
Suppress Direct's output.
"""
self.n_func_evals = n_func_evals
self.n_iters = n_iters
self.verbose = verbose
super(Direct, self).__init__(objective_function, lower, upper)
def _direct_acquisition_fkt_wrapper(self, acq_f):
def _l(x, user_data):
return -acq_f(np.array([x])), 0
return _l
def maximize(self):
"""
Maximizes the given acquisition function.
Returns
-------
np.ndarray(N,D)
Point with highest acquisition value.
"""
if self.verbose:
x, _, _ = DIRECT.solve(self._direct_acquisition_fkt_wrapper(self.objective_func),
l=[self.lower],
u=[self.upper],
maxT=self.n_iters,
maxf=self.n_func_evals)
else:
fileno = sys.stdout.fileno()
with os.fdopen(os.dup(fileno), 'wb') as stdout:
with os.fdopen(os.open(os.devnull, os.O_WRONLY), 'wb') as devnull:
sys.stdout.flush();
os.dup2(devnull.fileno(), fileno) # redirect
x, _, _ = DIRECT.solve(self._direct_acquisition_fkt_wrapper(self.objective_func),
l=[self.lower],
u=[self.upper],
maxT=self.n_iters,
maxf=self.n_func_evals)
sys.stdout.flush();
os.dup2(stdout.fileno(), fileno) # restore
return x
| 32.878049 | 101 | 0.528932 | import os
import sys
try:
import DIRECT
except ImportError:
raise ImportError("""
In order to use this module, DIRECT need to be installed. Try running
pip install direct
""")
import numpy as np
from robo.maximizers.base_maximizer import BaseMaximizer
class Direct(BaseMaximizer):
def __init__(self, objective_function, lower, upper,
n_func_evals=400, n_iters=200, verbose=True):
self.n_func_evals = n_func_evals
self.n_iters = n_iters
self.verbose = verbose
super(Direct, self).__init__(objective_function, lower, upper)
def _direct_acquisition_fkt_wrapper(self, acq_f):
def _l(x, user_data):
return -acq_f(np.array([x])), 0
return _l
def maximize(self):
if self.verbose:
x, _, _ = DIRECT.solve(self._direct_acquisition_fkt_wrapper(self.objective_func),
l=[self.lower],
u=[self.upper],
maxT=self.n_iters,
maxf=self.n_func_evals)
else:
fileno = sys.stdout.fileno()
with os.fdopen(os.dup(fileno), 'wb') as stdout:
with os.fdopen(os.open(os.devnull, os.O_WRONLY), 'wb') as devnull:
sys.stdout.flush();
os.dup2(devnull.fileno(), fileno)
x, _, _ = DIRECT.solve(self._direct_acquisition_fkt_wrapper(self.objective_func),
l=[self.lower],
u=[self.upper],
maxT=self.n_iters,
maxf=self.n_func_evals)
sys.stdout.flush();
os.dup2(stdout.fileno(), fileno)
return x
| true | true |
1c3d077ae6a45bd5e200e15041fb33043040b04e | 95 | py | Python | ecactivity/apps.py | minlaxz/university-blog | 4ff75adbeee3c32ea7bd2b647e06e8c5892c38a6 | [
"MIT"
] | null | null | null | ecactivity/apps.py | minlaxz/university-blog | 4ff75adbeee3c32ea7bd2b647e06e8c5892c38a6 | [
"MIT"
] | null | null | null | ecactivity/apps.py | minlaxz/university-blog | 4ff75adbeee3c32ea7bd2b647e06e8c5892c38a6 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class EcactivityConfig(AppConfig):
name = 'ecactivity'
| 15.833333 | 34 | 0.768421 | from django.apps import AppConfig
class EcactivityConfig(AppConfig):
name = 'ecactivity'
| true | true |
1c3d09dc17bc58a64b3b41021ca264b66d8e9b31 | 427 | py | Python | tutorials/30-days-of-code/30-operators.py | PingHuskar/hackerrank | 1bfdbc63de5d0f94cd9e6ae250476b4a267662f2 | [
"Unlicense"
] | 41 | 2018-05-11T07:54:34.000Z | 2022-03-29T19:02:32.000Z | tutorials/30-days-of-code/30-operators.py | PingHuskar/hackerrank | 1bfdbc63de5d0f94cd9e6ae250476b4a267662f2 | [
"Unlicense"
] | 2 | 2021-09-13T10:03:26.000Z | 2021-10-04T10:21:05.000Z | tutorials/30-days-of-code/30-operators.py | PingHuskar/hackerrank | 1bfdbc63de5d0f94cd9e6ae250476b4a267662f2 | [
"Unlicense"
] | 21 | 2019-01-23T19:06:59.000Z | 2021-12-23T16:03:47.000Z | # Day 2: Operators
# Start using arithmetic operators.
#
# https://www.hackerrank.com/challenges/30-operators/problem
#
#!/bin/python3
import sys
if __name__ == "__main__":
meal_cost = float(input().strip())
tip_percent = int(input().strip())
tax_percent = int(input().strip())
cost = meal_cost * (1 + tip_percent / 100 + tax_percent / 100)
print("The total meal cost is {:.0f} dollars.".format(cost))
| 22.473684 | 66 | 0.665105 |
import sys
if __name__ == "__main__":
meal_cost = float(input().strip())
tip_percent = int(input().strip())
tax_percent = int(input().strip())
cost = meal_cost * (1 + tip_percent / 100 + tax_percent / 100)
print("The total meal cost is {:.0f} dollars.".format(cost))
| true | true |
1c3d0b27b2b88394656225fd9089618728ac2ef3 | 10,012 | py | Python | conf.py | theTrueMikeBrown/SimpleIdentityServerDoc | 10d3286f97124dc2760fb140ebbb1939d930ab71 | [
"Apache-2.0"
] | null | null | null | conf.py | theTrueMikeBrown/SimpleIdentityServerDoc | 10d3286f97124dc2760fb140ebbb1939d930ab71 | [
"Apache-2.0"
] | null | null | null | conf.py | theTrueMikeBrown/SimpleIdentityServerDoc | 10d3286f97124dc2760fb140ebbb1939d930ab71 | [
"Apache-2.0"
] | null | null | null | import sphinx_rtd_theme
# -*- coding: utf-8 -*-
#
# SimpleIdentityServer documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 26 14:24:57 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SimpleIdentityServer'
copyright = u'2016, habart thierry'
author = u'habart thierry'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'SimpleIdentityServer v1.0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'SimpleIdentityServerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SimpleIdentityServer.tex', u'SimpleIdentityServer Documentation',
u'habart thierry', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'simpleidentityserver', u'SimpleIdentityServer Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SimpleIdentityServer', u'SimpleIdentityServer Documentation',
author, 'SimpleIdentityServer', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| 29.274854 | 83 | 0.70825 | import sphinx_rtd_theme
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'SimpleIdentityServer'
copyright = u'2016, habart thierry'
author = u'habart thierry'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'SimpleIdentityServer v1.0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'SimpleIdentityServerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SimpleIdentityServer.tex', u'SimpleIdentityServer Documentation',
u'habart thierry', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'simpleidentityserver', u'SimpleIdentityServer Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SimpleIdentityServer', u'SimpleIdentityServer Documentation',
author, 'SimpleIdentityServer', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
| true | true |
1c3d0bde10ae7f327ea370830c6e7edaa45fd6bb | 172 | py | Python | datainterface/__init__.py | adam2392/eegdatastorage | fc6fbaaa4d31df83b45e6d420d023fada62cfa8e | [
"Apache-2.0"
] | null | null | null | datainterface/__init__.py | adam2392/eegdatastorage | fc6fbaaa4d31df83b45e6d420d023fada62cfa8e | [
"Apache-2.0"
] | null | null | null | datainterface/__init__.py | adam2392/eegdatastorage | fc6fbaaa4d31df83b45e6d420d023fada62cfa8e | [
"Apache-2.0"
] | null | null | null | import sys
if int(sys.version_info[0]) < 3:
import dataconversion
import savedata
import readers
import loadpatient
import loadregions
import utils
| 19.111111 | 32 | 0.715116 | import sys
if int(sys.version_info[0]) < 3:
import dataconversion
import savedata
import readers
import loadpatient
import loadregions
import utils
| true | true |
1c3d0bffa57b4581e8473797e651744d5505b184 | 992 | py | Python | app/character.py | ZKemstedt/404notfound | d0634302f505e40347659e22ac6f4b7265c019c8 | [
"MIT"
] | null | null | null | app/character.py | ZKemstedt/404notfound | d0634302f505e40347659e22ac6f4b7265c019c8 | [
"MIT"
] | null | null | null | app/character.py | ZKemstedt/404notfound | d0634302f505e40347659e22ac6f4b7265c019c8 | [
"MIT"
] | 1 | 2021-09-27T15:58:29.000Z | 2021-09-27T15:58:29.000Z | class Character(object):
def __init__(self, name, initiative, health, power, evasion, treasure=0):
self.name = name
self.initiative = initiative
self.health = health
self.power = power
self.evasion = evasion
self.treasures = treasure
def special_power(self):
raise NotImplementedError("Must be implemented!")
def export(self) -> dict:
character_class = self.__class__.__name__
character_data = {}
character_data[self.name] = {'class': character_class, 'health': self.health, 'treasure': self.treasures}
return character_data
class Knight(Character):
def __init__(self, name, treasure=0):
super().__init__(name, 5, 9, 6, 4, treasure)
class Wizard(Character):
def __init__(self, name, treasure=0):
super().__init__(name, 6, 4, 9, 5, treasure)
class Thief(Character):
def __init__(self, name, treasure=0):
super().__init__(name, 7, 5, 5, 7, treasure)
| 30.060606 | 113 | 0.643145 | class Character(object):
def __init__(self, name, initiative, health, power, evasion, treasure=0):
self.name = name
self.initiative = initiative
self.health = health
self.power = power
self.evasion = evasion
self.treasures = treasure
def special_power(self):
raise NotImplementedError("Must be implemented!")
def export(self) -> dict:
character_class = self.__class__.__name__
character_data = {}
character_data[self.name] = {'class': character_class, 'health': self.health, 'treasure': self.treasures}
return character_data
class Knight(Character):
def __init__(self, name, treasure=0):
super().__init__(name, 5, 9, 6, 4, treasure)
class Wizard(Character):
def __init__(self, name, treasure=0):
super().__init__(name, 6, 4, 9, 5, treasure)
class Thief(Character):
def __init__(self, name, treasure=0):
super().__init__(name, 7, 5, 5, 7, treasure)
| true | true |
1c3d0c36608348ad1b32c901ca514941e4ddcfdf | 5,276 | py | Python | tools/render-ansible-tasks.py | smolar/tripleo-heat-templates | 6b858eb39f96cc2a81a115246fd4a2ef6a0b0097 | [
"Apache-2.0"
] | null | null | null | tools/render-ansible-tasks.py | smolar/tripleo-heat-templates | 6b858eb39f96cc2a81a115246fd4a2ef6a0b0097 | [
"Apache-2.0"
] | null | null | null | tools/render-ansible-tasks.py | smolar/tripleo-heat-templates | 6b858eb39f96cc2a81a115246fd4a2ef6a0b0097 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import argparse
import errno
import json
import os
import sys
import yaml
import yaql
def parse_opts(argv):
parser = argparse.ArgumentParser(
description='Render the Ansible tasks based in the role and the tags selected.'
'Those tasks can be used for debugging or linting purposes.')
subp = parser.add_mutually_exclusive_group(required=True)
parser.add_argument('--output', required=True, metavar='<tasks directory output>',
help="The folder to store the rendered tasks",
)
parser.add_argument('--ansible-tasks', nargs="+", required=True,
metavar='<ansible tasks to be rendered>',
help='THT tags to filter the Ansible rendering '
'i.e. update_tasks')
subp.add_argument('--roles-list', nargs="+", metavar='<list of roles to render>',
help='Composable roles to filter the Ansible rendering '
'i.e. Controller Compute')
subp.add_argument('--all', action='store_true',
help='Process all services in the resource registry at once, '
'this allows to test all services templates avoiding '
'reading and generating all the files.')
opts = parser.parse_args(argv[1:])
return opts
def main():
opts = parse_opts(sys.argv)
engine = yaql.factory.YaqlFactory().create()
output = opts.output
# We open the resource registry once
resource_registry = "./overcloud-resource-registry-puppet.yaml"
resource_reg = yaml.load(open(os.path.join(resource_registry), 'r'))
if (opts.all):
# This means we will parse all the services defined
# by default in the resource registry
roles_list = ["overcloud-resource-registry-puppet"]
else:
roles_list = opts.roles_list
for role in roles_list:
# We open the role file only once.
if (opts.all):
# The service definition will be the same resource registry
role_resources = resource_reg
else:
role_resources = yaml.load(open(os.path.join("./roles/", role + ".yaml"), 'r'))
for section_task in opts.ansible_tasks:
if(opts.all):
# We get all the services in the resource_registry section
expression = engine(
"$.resource_registry"
)
else:
expression = engine(
"$.ServicesDefault.flatten().distinct()"
)
heat_resources = expression.evaluate(data=role_resources)
role_ansible_tasks = []
for resource in heat_resources:
if(opts.all):
# If we use the resource registry as the source of the
# data we need to split the service name of the
# service config definition
resource = resource.split(' ')[0]
expression = engine(
"$.resource_registry.get('" + resource + "')"
)
config_file = expression.evaluate(data=resource_reg)
if(config_file is not None):
if('::' in config_file):
print("This is a nested Heat resource")
else:
data_source = yaml.load(open("./" + config_file, 'r'))
expression = engine(
"$.outputs.role_data.value.get(" + section_task + ").flatten().distinct()"
)
try:
ansible_tasks = expression.evaluate(data=data_source)
print(ansible_tasks)
role_ansible_tasks = role_ansible_tasks + ansible_tasks
except Exception as e:
print("There are no tasks in the configuration file")
if (role_ansible_tasks != []):
tasks_output_file = os.path.join(output, role + "_" + section_task + ".yml")
if not os.path.exists(os.path.dirname(tasks_output_file)):
try:
os.makedirs(os.path.dirname(tasks_output_file))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
save = open(tasks_output_file, 'w+')
yaml.dump(yaml.load(json.dumps(role_ansible_tasks)), save, default_flow_style=False)
if __name__ == '__main__':
main()
| 41.21875 | 100 | 0.565201 |
import argparse
import errno
import json
import os
import sys
import yaml
import yaql
def parse_opts(argv):
parser = argparse.ArgumentParser(
description='Render the Ansible tasks based in the role and the tags selected.'
'Those tasks can be used for debugging or linting purposes.')
subp = parser.add_mutually_exclusive_group(required=True)
parser.add_argument('--output', required=True, metavar='<tasks directory output>',
help="The folder to store the rendered tasks",
)
parser.add_argument('--ansible-tasks', nargs="+", required=True,
metavar='<ansible tasks to be rendered>',
help='THT tags to filter the Ansible rendering '
'i.e. update_tasks')
subp.add_argument('--roles-list', nargs="+", metavar='<list of roles to render>',
help='Composable roles to filter the Ansible rendering '
'i.e. Controller Compute')
subp.add_argument('--all', action='store_true',
help='Process all services in the resource registry at once, '
'this allows to test all services templates avoiding '
'reading and generating all the files.')
opts = parser.parse_args(argv[1:])
return opts
def main():
opts = parse_opts(sys.argv)
engine = yaql.factory.YaqlFactory().create()
output = opts.output
resource_registry = "./overcloud-resource-registry-puppet.yaml"
resource_reg = yaml.load(open(os.path.join(resource_registry), 'r'))
if (opts.all):
roles_list = ["overcloud-resource-registry-puppet"]
else:
roles_list = opts.roles_list
for role in roles_list:
if (opts.all):
role_resources = resource_reg
else:
role_resources = yaml.load(open(os.path.join("./roles/", role + ".yaml"), 'r'))
for section_task in opts.ansible_tasks:
if(opts.all):
expression = engine(
"$.resource_registry"
)
else:
expression = engine(
"$.ServicesDefault.flatten().distinct()"
)
heat_resources = expression.evaluate(data=role_resources)
role_ansible_tasks = []
for resource in heat_resources:
if(opts.all):
resource = resource.split(' ')[0]
expression = engine(
"$.resource_registry.get('" + resource + "')"
)
config_file = expression.evaluate(data=resource_reg)
if(config_file is not None):
if('::' in config_file):
print("This is a nested Heat resource")
else:
data_source = yaml.load(open("./" + config_file, 'r'))
expression = engine(
"$.outputs.role_data.value.get(" + section_task + ").flatten().distinct()"
)
try:
ansible_tasks = expression.evaluate(data=data_source)
print(ansible_tasks)
role_ansible_tasks = role_ansible_tasks + ansible_tasks
except Exception as e:
print("There are no tasks in the configuration file")
if (role_ansible_tasks != []):
tasks_output_file = os.path.join(output, role + "_" + section_task + ".yml")
if not os.path.exists(os.path.dirname(tasks_output_file)):
try:
os.makedirs(os.path.dirname(tasks_output_file))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
save = open(tasks_output_file, 'w+')
yaml.dump(yaml.load(json.dumps(role_ansible_tasks)), save, default_flow_style=False)
if __name__ == '__main__':
main()
| true | true |
1c3d0c8da724d8bd9913ca3e131905bf9cda0451 | 18,385 | py | Python | google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py | nachocano/python-aiplatform | 1c6b998d9145309d79712f494a2b00b50a9a9bf4 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py | nachocano/python-aiplatform | 1c6b998d9145309d79712f494a2b00b50a9a9bf4 | [
"Apache-2.0"
] | 1 | 2021-02-12T23:56:38.000Z | 2021-02-12T23:56:38.000Z | google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py | nachocano/python-aiplatform | 1c6b998d9145309d79712f494a2b00b50a9a9bf4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types import (
export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config,
)
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1.schema.trainingjob.definition",
manifest={"AutoMlTables", "AutoMlTablesInputs", "AutoMlTablesMetadata",},
)
class AutoMlTables(proto.Message):
r"""A TrainingJob that trains and uploads an AutoML Tables Model.
Attributes:
inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs):
The input parameters of this TrainingJob.
metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesMetadata):
The metadata information.
"""
inputs = proto.Field(proto.MESSAGE, number=1, message="AutoMlTablesInputs",)
metadata = proto.Field(proto.MESSAGE, number=2, message="AutoMlTablesMetadata",)
class AutoMlTablesInputs(proto.Message):
r"""
Attributes:
optimization_objective_recall_value (float):
Required when optimization_objective is
"maximize-precision-at-recall". Must be between 0 and 1,
inclusive.
optimization_objective_precision_value (float):
Required when optimization_objective is
"maximize-recall-at-precision". Must be between 0 and 1,
inclusive.
prediction_type (str):
The type of prediction the Model is to
produce. "classification" - Predict one out of
multiple target values is
picked for each row.
"regression" - Predict a value based on its
relation to other values. This
type is available only to columns that contain
semantically numeric values, i.e. integers or
floating point number, even if
stored as e.g. strings.
target_column (str):
The column name of the target column that the
model is to predict.
transformations (Sequence[google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation]):
Each transformation will apply transform
function to given input column. And the result
will be used for training. When creating
transformation for BigQuery Struct column, the
column should be flattened using "." as the
delimiter.
optimization_objective (str):
Objective function the model is optimizing
towards. The training process creates a model
that maximizes/minimizes the value of the
objective function over the validation set.
The supported optimization objectives depend on
the prediction type. If the field is not set, a
default objective function is used.
classification (binary):
"maximize-au-roc" (default) - Maximize the
area under the receiver
operating characteristic (ROC) curve.
"minimize-log-loss" - Minimize log loss.
"maximize-au-prc" - Maximize the area under
the precision-recall curve. "maximize-
precision-at-recall" - Maximize precision for a
specified
recall value. "maximize-recall-at-precision" -
Maximize recall for a specified
precision value.
classification (multi-class):
"minimize-log-loss" (default) - Minimize log
loss.
regression:
"minimize-rmse" (default) - Minimize root-
mean-squared error (RMSE). "minimize-mae" -
Minimize mean-absolute error (MAE). "minimize-
rmsle" - Minimize root-mean-squared log error
(RMSLE).
train_budget_milli_node_hours (int):
Required. The train budget of creating this
model, expressed in milli node hours i.e. 1,000
value in this field means 1 node hour.
The training cost of the model will not exceed
this budget. The final cost will be attempted to
be close to the budget, though may end up being
(even) noticeably smaller - at the backend's
discretion. This especially may happen when
further model training ceases to provide any
improvements.
If the budget is set to a value known to be
insufficient to train a model for the given
dataset, the training won't be attempted and
will error.
The train budget must be between 1,000 and
72,000 milli node hours, inclusive.
disable_early_stopping (bool):
Use the entire training budget. This disables
the early stopping feature. By default, the
early stopping feature is enabled, which means
that AutoML Tables might stop training before
the entire training budget has been used.
weight_column_name (str):
Column name that should be used as the weight
column. Higher values in this column give more
importance to the row during model training. The
column must have numeric values between 0 and
10000 inclusively; 0 means the row is ignored
for training. If weight column field is not set,
then all rows are assumed to have equal weight
of 1.
export_evaluated_data_items_config (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.ExportEvaluatedDataItemsConfig):
Configuration for exporting test set
predictions to a BigQuery table. If this
configuration is absent, then the export is not
performed.
"""
class Transformation(proto.Message):
r"""
Attributes:
auto (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.AutoTransformation):
numeric (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.NumericTransformation):
categorical (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.CategoricalTransformation):
timestamp (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TimestampTransformation):
text (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TextTransformation):
repeated_numeric (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.NumericArrayTransformation):
repeated_categorical (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.CategoricalArrayTransformation):
repeated_text (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TextArrayTransformation):
"""
class AutoTransformation(proto.Message):
r"""Training pipeline will infer the proper transformation based
on the statistic of dataset.
Attributes:
column_name (str):
"""
column_name = proto.Field(proto.STRING, number=1)
class NumericTransformation(proto.Message):
r"""Training pipeline will perform following transformation functions.
- The value converted to float32.
- The z_score of the value.
- log(value+1) when the value is greater than or equal to 0.
Otherwise, this transformation is not applied and the value is
considered a missing value.
- z_score of log(value+1) when the value is greater than or equal
to 0. Otherwise, this transformation is not applied and the value
is considered a missing value.
- A boolean value that indicates whether the value is valid.
Attributes:
column_name (str):
invalid_values_allowed (bool):
If invalid values is allowed, the training
pipeline will create a boolean feature that
indicated whether the value is valid. Otherwise,
the training pipeline will discard the input row
from trainining data.
"""
column_name = proto.Field(proto.STRING, number=1)
invalid_values_allowed = proto.Field(proto.BOOL, number=2)
class CategoricalTransformation(proto.Message):
r"""Training pipeline will perform following transformation functions.
- The categorical string as is--no change to case, punctuation,
spelling, tense, and so on.
- Convert the category name to a dictionary lookup index and
generate an embedding for each index.
- Categories that appear less than 5 times in the training dataset
are treated as the "unknown" category. The "unknown" category
gets its own special lookup index and resulting embedding.
Attributes:
column_name (str):
"""
column_name = proto.Field(proto.STRING, number=1)
class TimestampTransformation(proto.Message):
r"""Training pipeline will perform following transformation functions.
- Apply the transformation functions for Numerical columns.
- Determine the year, month, day,and weekday. Treat each value from
the
- timestamp as a Categorical column.
- Invalid numerical values (for example, values that fall outside
of a typical timestamp range, or are extreme values) receive no
special treatment and are not removed.
Attributes:
column_name (str):
time_format (str):
The format in which that time field is expressed. The
time_format must either be one of:
- ``unix-seconds``
- ``unix-milliseconds``
- ``unix-microseconds``
- ``unix-nanoseconds`` (for respectively number of seconds,
milliseconds, microseconds and nanoseconds since start of
the Unix epoch); or be written in ``strftime`` syntax. If
time_format is not set, then the default format is RFC
3339 ``date-time`` format, where ``time-offset`` =
``"Z"`` (e.g. 1985-04-12T23:20:50.52Z)
invalid_values_allowed (bool):
If invalid values is allowed, the training
pipeline will create a boolean feature that
indicated whether the value is valid. Otherwise,
the training pipeline will discard the input row
from trainining data.
"""
column_name = proto.Field(proto.STRING, number=1)
time_format = proto.Field(proto.STRING, number=2)
invalid_values_allowed = proto.Field(proto.BOOL, number=3)
class TextTransformation(proto.Message):
r"""Training pipeline will perform following transformation functions.
- The text as is--no change to case, punctuation, spelling, tense,
and so on.
- Tokenize text to words. Convert each words to a dictionary lookup
index and generate an embedding for each index. Combine the
embedding of all elements into a single embedding using the mean.
- Tokenization is based on unicode script boundaries.
- Missing values get their own lookup index and resulting
embedding.
- Stop-words receive no special treatment and are not removed.
Attributes:
column_name (str):
"""
column_name = proto.Field(proto.STRING, number=1)
class NumericArrayTransformation(proto.Message):
r"""Treats the column as numerical array and performs following
transformation functions.
- All transformations for Numerical types applied to the average of
the all elements.
- The average of empty arrays is treated as zero.
Attributes:
column_name (str):
invalid_values_allowed (bool):
If invalid values is allowed, the training
pipeline will create a boolean feature that
indicated whether the value is valid. Otherwise,
the training pipeline will discard the input row
from trainining data.
"""
column_name = proto.Field(proto.STRING, number=1)
invalid_values_allowed = proto.Field(proto.BOOL, number=2)
class CategoricalArrayTransformation(proto.Message):
r"""Treats the column as categorical array and performs following
transformation functions.
- For each element in the array, convert the category name to a
dictionary lookup index and generate an embedding for each index.
Combine the embedding of all elements into a single embedding
using the mean.
- Empty arrays treated as an embedding of zeroes.
Attributes:
column_name (str):
"""
column_name = proto.Field(proto.STRING, number=1)
class TextArrayTransformation(proto.Message):
r"""Treats the column as text array and performs following
transformation functions.
- Concatenate all text values in the array into a single text value
using a space (" ") as a delimiter, and then treat the result as
a single text value. Apply the transformations for Text columns.
- Empty arrays treated as an empty text.
Attributes:
column_name (str):
"""
column_name = proto.Field(proto.STRING, number=1)
auto = proto.Field(
proto.MESSAGE,
number=1,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.AutoTransformation",
)
numeric = proto.Field(
proto.MESSAGE,
number=2,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.NumericTransformation",
)
categorical = proto.Field(
proto.MESSAGE,
number=3,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.CategoricalTransformation",
)
timestamp = proto.Field(
proto.MESSAGE,
number=4,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.TimestampTransformation",
)
text = proto.Field(
proto.MESSAGE,
number=5,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.TextTransformation",
)
repeated_numeric = proto.Field(
proto.MESSAGE,
number=6,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.NumericArrayTransformation",
)
repeated_categorical = proto.Field(
proto.MESSAGE,
number=7,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.CategoricalArrayTransformation",
)
repeated_text = proto.Field(
proto.MESSAGE,
number=8,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.TextArrayTransformation",
)
optimization_objective_recall_value = proto.Field(
proto.FLOAT, number=5, oneof="additional_optimization_objective_config"
)
optimization_objective_precision_value = proto.Field(
proto.FLOAT, number=6, oneof="additional_optimization_objective_config"
)
prediction_type = proto.Field(proto.STRING, number=1)
target_column = proto.Field(proto.STRING, number=2)
transformations = proto.RepeatedField(
proto.MESSAGE, number=3, message=Transformation,
)
optimization_objective = proto.Field(proto.STRING, number=4)
train_budget_milli_node_hours = proto.Field(proto.INT64, number=7)
disable_early_stopping = proto.Field(proto.BOOL, number=8)
weight_column_name = proto.Field(proto.STRING, number=9)
export_evaluated_data_items_config = proto.Field(
proto.MESSAGE,
number=10,
message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig,
)
class AutoMlTablesMetadata(proto.Message):
r"""Model metadata specific to AutoML Tables.
Attributes:
train_cost_milli_node_hours (int):
Output only. The actual training cost of the
model, expressed in milli node hours, i.e. 1,000
value in this field means 1 node hour.
Guaranteed to not exceed the train budget.
"""
train_cost_milli_node_hours = proto.Field(proto.INT64, number=1)
__all__ = tuple(sorted(__protobuf__.manifest))
| 41.037946 | 166 | 0.633016 |
import proto
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types import (
export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config,
)
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1.schema.trainingjob.definition",
manifest={"AutoMlTables", "AutoMlTablesInputs", "AutoMlTablesMetadata",},
)
class AutoMlTables(proto.Message):
inputs = proto.Field(proto.MESSAGE, number=1, message="AutoMlTablesInputs",)
metadata = proto.Field(proto.MESSAGE, number=2, message="AutoMlTablesMetadata",)
class AutoMlTablesInputs(proto.Message):
class Transformation(proto.Message):
class AutoTransformation(proto.Message):
column_name = proto.Field(proto.STRING, number=1)
class NumericTransformation(proto.Message):
column_name = proto.Field(proto.STRING, number=1)
invalid_values_allowed = proto.Field(proto.BOOL, number=2)
class CategoricalTransformation(proto.Message):
column_name = proto.Field(proto.STRING, number=1)
class TimestampTransformation(proto.Message):
column_name = proto.Field(proto.STRING, number=1)
time_format = proto.Field(proto.STRING, number=2)
invalid_values_allowed = proto.Field(proto.BOOL, number=3)
class TextTransformation(proto.Message):
column_name = proto.Field(proto.STRING, number=1)
class NumericArrayTransformation(proto.Message):
column_name = proto.Field(proto.STRING, number=1)
invalid_values_allowed = proto.Field(proto.BOOL, number=2)
class CategoricalArrayTransformation(proto.Message):
column_name = proto.Field(proto.STRING, number=1)
class TextArrayTransformation(proto.Message):
column_name = proto.Field(proto.STRING, number=1)
auto = proto.Field(
proto.MESSAGE,
number=1,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.AutoTransformation",
)
numeric = proto.Field(
proto.MESSAGE,
number=2,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.NumericTransformation",
)
categorical = proto.Field(
proto.MESSAGE,
number=3,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.CategoricalTransformation",
)
timestamp = proto.Field(
proto.MESSAGE,
number=4,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.TimestampTransformation",
)
text = proto.Field(
proto.MESSAGE,
number=5,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.TextTransformation",
)
repeated_numeric = proto.Field(
proto.MESSAGE,
number=6,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.NumericArrayTransformation",
)
repeated_categorical = proto.Field(
proto.MESSAGE,
number=7,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.CategoricalArrayTransformation",
)
repeated_text = proto.Field(
proto.MESSAGE,
number=8,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.TextArrayTransformation",
)
optimization_objective_recall_value = proto.Field(
proto.FLOAT, number=5, oneof="additional_optimization_objective_config"
)
optimization_objective_precision_value = proto.Field(
proto.FLOAT, number=6, oneof="additional_optimization_objective_config"
)
prediction_type = proto.Field(proto.STRING, number=1)
target_column = proto.Field(proto.STRING, number=2)
transformations = proto.RepeatedField(
proto.MESSAGE, number=3, message=Transformation,
)
optimization_objective = proto.Field(proto.STRING, number=4)
train_budget_milli_node_hours = proto.Field(proto.INT64, number=7)
disable_early_stopping = proto.Field(proto.BOOL, number=8)
weight_column_name = proto.Field(proto.STRING, number=9)
export_evaluated_data_items_config = proto.Field(
proto.MESSAGE,
number=10,
message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig,
)
class AutoMlTablesMetadata(proto.Message):
train_cost_milli_node_hours = proto.Field(proto.INT64, number=1)
__all__ = tuple(sorted(__protobuf__.manifest))
| true | true |
1c3d0ca0828fa3b7b54f47b783abb541b09fb064 | 606 | py | Python | IMS/product/migrations/0004_recipt_customer.py | AyushPaudel/Inventory-Management-System | 04e57b0d02b1b7cade992b959569e750ca339c8e | [
"MIT"
] | 2 | 2021-09-01T13:00:24.000Z | 2021-11-19T12:16:52.000Z | IMS/product/migrations/0004_recipt_customer.py | aadarshadhakalg/Inventory-Management-System-1 | 075ec49b9d4abebb7d9a0b150a6cb70f6cbf5144 | [
"MIT"
] | null | null | null | IMS/product/migrations/0004_recipt_customer.py | aadarshadhakalg/Inventory-Management-System-1 | 075ec49b9d4abebb7d9a0b150a6cb70f6cbf5144 | [
"MIT"
] | 1 | 2022-01-07T05:50:08.000Z | 2022-01-07T05:50:08.000Z | # Generated by Django 3.2 on 2021-08-07 15:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('product', '0003_products_original_stock'),
]
operations = [
migrations.AddField(
model_name='recipt',
name='customer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| 27.545455 | 134 | 0.683168 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('product', '0003_products_original_stock'),
]
operations = [
migrations.AddField(
model_name='recipt',
name='customer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| true | true |
1c3d0d31c74257db4edef6bb746f9ab82098b25a | 710 | py | Python | nominations/migrations/0048_auto_20170804_1713.py | JoshZero87/site | c8024b805ff5ff0e16f54dce7bf05097fd2f08e0 | [
"MIT"
] | 4 | 2017-01-29T00:38:41.000Z | 2019-09-04T14:30:24.000Z | nominations/migrations/0048_auto_20170804_1713.py | JoshZero87/site | c8024b805ff5ff0e16f54dce7bf05097fd2f08e0 | [
"MIT"
] | 74 | 2017-10-02T04:42:54.000Z | 2022-01-13T00:44:16.000Z | nominations/migrations/0048_auto_20170804_1713.py | JoshZero87/site | c8024b805ff5ff0e16f54dce7bf05097fd2f08e0 | [
"MIT"
] | 3 | 2017-03-24T23:26:46.000Z | 2019-10-21T01:16:03.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-08-04 17:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nominations', '0047_initiativeapplication_locality'),
]
operations = [
migrations.AlterField(
model_name='initiativeapplication',
name='status',
field=models.CharField(choices=[('incomplete', 'Incomplete'), ('submitted', 'Submitted'), ('needs-research', 'Needs Research'), ('needs-staff-review', 'Needs Staff Review'), ('approved', 'Endorsed'), ('removed', 'Not Endorsed')], default='submitted', max_length=16),
),
]
| 33.809524 | 278 | 0.650704 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nominations', '0047_initiativeapplication_locality'),
]
operations = [
migrations.AlterField(
model_name='initiativeapplication',
name='status',
field=models.CharField(choices=[('incomplete', 'Incomplete'), ('submitted', 'Submitted'), ('needs-research', 'Needs Research'), ('needs-staff-review', 'Needs Staff Review'), ('approved', 'Endorsed'), ('removed', 'Not Endorsed')], default='submitted', max_length=16),
),
]
| true | true |
1c3d0e63d40ffea8848cffbd1846ca02f64a99d4 | 2,126 | py | Python | mai_experiments/run_experiments_mai_version/financial.py | joschout/tilde | 1403b50842b83f2edd6b16b1fbe24b9bec2d0048 | [
"Apache-2.0"
] | 16 | 2019-03-06T06:11:33.000Z | 2022-02-07T21:30:25.000Z | mai_experiments/run_experiments_mai_version/financial.py | joschout/tilde | 1403b50842b83f2edd6b16b1fbe24b9bec2d0048 | [
"Apache-2.0"
] | 4 | 2019-10-08T14:48:23.000Z | 2020-03-26T00:31:57.000Z | mai_experiments/run_experiments_mai_version/financial.py | krishnangovindraj/tilde | 5243a02d92f375d56ffc49ab8c3d1a87e31e99b9 | [
"Apache-2.0"
] | 4 | 2019-08-14T05:40:47.000Z | 2020-08-05T13:21:16.000Z | import os
import sys
from mai_version.fold.fold_file_parser import main_cross_validation
from mai_version.main import kb_suffix, s_suffix, bg_suffix
# CHANGE THESE TWO FOR EACH TEST
test_name = 'financial'
logic_name = 'financial-d-mod'
# --- command-line printing settings ---
debug_printing_tree_building = False
debug_printing_program_conversion = False
debug_printing_get_classifier = False
debug_printing_classification = False
filter_out_unlabeled_examples = False
hide_printouts = True
# --- directories ---
droot = 'D:\\KUL\\KUL MAI\\Masterproef\\TILDE\\tilde\\fold\\data\\'
dlogic_relative = 't-0-0-0\\'
dfold_relative = 'folds\\'
dout_relative = 'output\\'
dlogic = droot + test_name + '\\' + dlogic_relative
dfold = droot + test_name + '\\' + dfold_relative
doutput = droot + test_name + '\\' + dout_relative
# --- file names ---
fname_examples = dlogic + logic_name + kb_suffix
fname_settings = dlogic + logic_name + s_suffix
fname_background = dlogic + logic_name + bg_suffix
# --- fold settings ---
fname_prefix_fold = 'test'
fold_start_index = 0
nb_folds = 10
fold_suffix = '.txt'
# -- create output directory
if not os.path.exists(doutput):
os.makedirs(doutput)
print("start financial")
save_stdout = sys.stdout
if hide_printouts:
sys.stdout = open(os.devnull, "w")
main_cross_validation(fname_examples=fname_examples, fname_settings=fname_settings, fname_background=fname_background,
dir_fold_files=dfold, fname_prefix_fold=fname_prefix_fold, fold_start_index=fold_start_index,
nb_folds=nb_folds, fold_suffix=fold_suffix, dir_output_files=doutput,
filter_out_unlabeled_examples=filter_out_unlabeled_examples,
debug_printing_tree_building=debug_printing_tree_building,
debug_printing_program_conversion=debug_printing_program_conversion,
debug_printing_get_classifier=debug_printing_get_classifier,
debug_printing_classification=debug_printing_classification)
if hide_printouts:
sys.stdout = save_stdout
print("finished financial")
| 34.290323 | 118 | 0.742239 | import os
import sys
from mai_version.fold.fold_file_parser import main_cross_validation
from mai_version.main import kb_suffix, s_suffix, bg_suffix
test_name = 'financial'
logic_name = 'financial-d-mod'
debug_printing_tree_building = False
debug_printing_program_conversion = False
debug_printing_get_classifier = False
debug_printing_classification = False
filter_out_unlabeled_examples = False
hide_printouts = True
droot = 'D:\\KUL\\KUL MAI\\Masterproef\\TILDE\\tilde\\fold\\data\\'
dlogic_relative = 't-0-0-0\\'
dfold_relative = 'folds\\'
dout_relative = 'output\\'
dlogic = droot + test_name + '\\' + dlogic_relative
dfold = droot + test_name + '\\' + dfold_relative
doutput = droot + test_name + '\\' + dout_relative
fname_examples = dlogic + logic_name + kb_suffix
fname_settings = dlogic + logic_name + s_suffix
fname_background = dlogic + logic_name + bg_suffix
fname_prefix_fold = 'test'
fold_start_index = 0
nb_folds = 10
fold_suffix = '.txt'
if not os.path.exists(doutput):
os.makedirs(doutput)
print("start financial")
save_stdout = sys.stdout
if hide_printouts:
sys.stdout = open(os.devnull, "w")
main_cross_validation(fname_examples=fname_examples, fname_settings=fname_settings, fname_background=fname_background,
dir_fold_files=dfold, fname_prefix_fold=fname_prefix_fold, fold_start_index=fold_start_index,
nb_folds=nb_folds, fold_suffix=fold_suffix, dir_output_files=doutput,
filter_out_unlabeled_examples=filter_out_unlabeled_examples,
debug_printing_tree_building=debug_printing_tree_building,
debug_printing_program_conversion=debug_printing_program_conversion,
debug_printing_get_classifier=debug_printing_get_classifier,
debug_printing_classification=debug_printing_classification)
if hide_printouts:
sys.stdout = save_stdout
print("finished financial")
| true | true |
1c3d0f852d0c8a67e4af49ba082a98715be1fef7 | 1,567 | py | Python | tests/asynctest.py | netrack/bayes | 15e0c54b795f4ce527cc5e2c46bbb7da434ac036 | [
"Apache-2.0"
] | 12 | 2019-07-15T11:15:23.000Z | 2019-12-05T12:19:48.000Z | tests/asynctest.py | netrack/bayes | 15e0c54b795f4ce527cc5e2c46bbb7da434ac036 | [
"Apache-2.0"
] | 10 | 2019-06-25T17:42:44.000Z | 2019-07-09T13:28:12.000Z | tests/asynctest.py | netrack/tensorcraft | 15e0c54b795f4ce527cc5e2c46bbb7da434ac036 | [
"Apache-2.0"
] | 1 | 2019-05-23T13:22:19.000Z | 2019-05-23T13:22:19.000Z | import aiohttp.web
import asyncio
import typing
import unittest
import unittest.mock
class AsyncMagicMock(unittest.mock.MagicMock):
async def __call__(self, *args, **kwargs):
return super().__call__(*args, **kwargs)
class AsyncGeneratorMock(unittest.mock.MagicMock):
"""Mock async generator type
This type allows to pass a regular sequence of items in order
to mimic asynchronous generator.
"""
def __init__(self, *args, return_value: typing.Sequence = [], **kwargs):
super().__init__(*args, **kwargs)
self.iter = return_value.__iter__()
self.return_value = self
def __aiter__(self) -> typing.AsyncGenerator:
return self
async def __anext__(self):
try:
return self.iter.__next__()
except StopIteration:
raise StopAsyncIteration
class AsyncTestCase(unittest.TestCase):
def setUp(self):
self.__loop = asyncio.get_event_loop()
self.__loop.run_until_complete(self.setUpAsync())
def tearDown(self):
self.__loop.run_until_complete(self.tearDownAsync())
async def setUpAsync(self) -> None:
pass
async def tearDownAsync(self) -> None:
pass
def unittest_run_loop(coroutine):
def test(*args, **kwargs):
loop = asyncio.get_event_loop()
return loop.run_until_complete(coroutine(*args, **kwargs))
return test
def unittest_handler(awaitable):
async def _handler(req: aiohttp.web.Request) -> aiohttp.web.Response:
return await awaitable()
return _handler
| 24.873016 | 76 | 0.675175 | import aiohttp.web
import asyncio
import typing
import unittest
import unittest.mock
class AsyncMagicMock(unittest.mock.MagicMock):
async def __call__(self, *args, **kwargs):
return super().__call__(*args, **kwargs)
class AsyncGeneratorMock(unittest.mock.MagicMock):
def __init__(self, *args, return_value: typing.Sequence = [], **kwargs):
super().__init__(*args, **kwargs)
self.iter = return_value.__iter__()
self.return_value = self
def __aiter__(self) -> typing.AsyncGenerator:
return self
async def __anext__(self):
try:
return self.iter.__next__()
except StopIteration:
raise StopAsyncIteration
class AsyncTestCase(unittest.TestCase):
def setUp(self):
self.__loop = asyncio.get_event_loop()
self.__loop.run_until_complete(self.setUpAsync())
def tearDown(self):
self.__loop.run_until_complete(self.tearDownAsync())
async def setUpAsync(self) -> None:
pass
async def tearDownAsync(self) -> None:
pass
def unittest_run_loop(coroutine):
def test(*args, **kwargs):
loop = asyncio.get_event_loop()
return loop.run_until_complete(coroutine(*args, **kwargs))
return test
def unittest_handler(awaitable):
async def _handler(req: aiohttp.web.Request) -> aiohttp.web.Response:
return await awaitable()
return _handler
| true | true |
1c3d1382931acac4261b02aa85d074dd209e05c9 | 68 | py | Python | Python Basics/For Loop/Lab/Task02.py | DonikaChervenkova/SoftUni | bff579c037ec48f39ed193b34bc3502a32e90732 | [
"MIT"
] | 1 | 2022-03-16T10:23:04.000Z | 2022-03-16T10:23:04.000Z | Python Basics/For Loop/Lab/Task02.py | IvanTodorovBG/SoftUni | 7b667f6905d9f695ab1484efbb02b6715f6d569e | [
"MIT"
] | null | null | null | Python Basics/For Loop/Lab/Task02.py | IvanTodorovBG/SoftUni | 7b667f6905d9f695ab1484efbb02b6715f6d569e | [
"MIT"
] | 1 | 2021-12-04T12:30:57.000Z | 2021-12-04T12:30:57.000Z | n = int(input())
for number in range(1, n + 1, 3):
print(number) | 22.666667 | 33 | 0.588235 | n = int(input())
for number in range(1, n + 1, 3):
print(number) | true | true |
1c3d13e491754d362060f5e3585aa9afe9ae2b56 | 5,059 | py | Python | doc/conf.py | eduardo-rodrigues/scikit-hep | ef5d04ac6a8d050b323247c784149d3408f6cf20 | [
"BSD-3-Clause"
] | null | null | null | doc/conf.py | eduardo-rodrigues/scikit-hep | ef5d04ac6a8d050b323247c784149d3408f6cf20 | [
"BSD-3-Clause"
] | null | null | null | doc/conf.py | eduardo-rodrigues/scikit-hep | ef5d04ac6a8d050b323247c784149d3408f6cf20 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Add the directory above this one, containing the skhep module, to the Python PATH variable.
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'nbsphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Scikit-HEP'
copyright = u'2016-2018, The Scikit-HEP Developers'
author = u'The Scikit-HEP Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.2.0'
# The full version, including alpha/beta/rc tags.
release = u'0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
html_theme_options = {
'github_user': 'scikit-hep',
'github_repo': 'scikit-hep',
'github_type': 'star',
'github_banner': True,
'logo': 'logo.svg',
'fixed_sidebar': True,
'show_powered_by': False,
'link': '#7092C0',
'font_family': 'Source Sans Pro',
'head_font_family': 'Source Serif Pro',
'code_font_family': 'Consolas',
'code_font_size': '0.8em'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Scikit-HEPdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Scikit-HEP.tex', u'Scikit-HEP Documentation',
u'The Scikit-HEP Developers', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'scikit-hep', u'Scikit-HEP Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Scikit-HEP', u'Scikit-HEP Documentation',
author, 'Scikit-HEP', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'https://docs.python.org/': None
}
| 29.412791 | 93 | 0.659617 |
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'nbsphinx',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Scikit-HEP'
copyright = u'2016-2018, The Scikit-HEP Developers'
author = u'The Scikit-HEP Developers'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.2.0'
# The full version, including alpha/beta/rc tags.
release = u'0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
html_theme_options = {
'github_user': 'scikit-hep',
'github_repo': 'scikit-hep',
'github_type': 'star',
'github_banner': True,
'logo': 'logo.svg',
'fixed_sidebar': True,
'show_powered_by': False,
'link': '
'font_family': 'Source Sans Pro',
'head_font_family': 'Source Serif Pro',
'code_font_family': 'Consolas',
'code_font_size': '0.8em'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Scikit-HEPdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Scikit-HEP.tex', u'Scikit-HEP Documentation',
u'The Scikit-HEP Developers', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'scikit-hep', u'Scikit-HEP Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Scikit-HEP', u'Scikit-HEP Documentation',
author, 'Scikit-HEP', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'https://docs.python.org/': None
}
| true | true |
1c3d147f8e7eb64f16ccdd23267265fea188355e | 9,795 | py | Python | stubs.min/System/Windows/Media/__init___parts/GradientStop.py | denfromufa/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2017-07-07T11:15:45.000Z | 2017-07-07T11:15:45.000Z | stubs.min/System/Windows/Media/__init___parts/GradientStop.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/System/Windows/Media/__init___parts/GradientStop.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class GradientStop(Animatable,ISealable,IAnimatable,IResource,IFormattable):
"""
Describes the location and color of a transition point in a gradient.
GradientStop()
GradientStop(color: Color,offset: float)
"""
def Clone(self):
"""
Clone(self: GradientStop) -> GradientStop
Creates a modifiable clone of this System.Windows.Media.GradientStop,making
deep copies of this object's values. When copying dependency properties,this
method copies resource references and data bindings (but they might no longer
resolve) but not animations or their current values.
Returns: A modifiable clone of the current object. The cloned object's
System.Windows.Freezable.IsFrozen property will be false even if the source's
System.Windows.Freezable.IsFrozen property was true.
"""
pass
def CloneCore(self,*args):
"""
CloneCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a clone (deep copy) of the specified
System.Windows.Freezable using base (non-animated) property values.
sourceFreezable: The object to clone.
"""
pass
def CloneCurrentValue(self):
"""
CloneCurrentValue(self: GradientStop) -> GradientStop
Creates a modifiable clone of this System.Windows.Media.GradientStop object,
making deep copies of this object's current values. Resource references,data
bindings,and animations are not copied,but their current values are.
Returns: A modifiable clone of the current object. The cloned object's
System.Windows.Freezable.IsFrozen property will be false even if the source's
System.Windows.Freezable.IsFrozen property was true.
"""
pass
def CloneCurrentValueCore(self,*args):
"""
CloneCurrentValueCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a modifiable clone (deep copy) of the specified
System.Windows.Freezable using current property values.
sourceFreezable: The System.Windows.Freezable to be cloned.
"""
pass
def CreateInstance(self,*args):
"""
CreateInstance(self: Freezable) -> Freezable
Initializes a new instance of the System.Windows.Freezable class.
Returns: The new instance.
"""
pass
def CreateInstanceCore(self,*args):
""" CreateInstanceCore(self: GradientStop) -> Freezable """
pass
def FreezeCore(self,*args):
"""
FreezeCore(self: Animatable,isChecking: bool) -> bool
Makes this System.Windows.Media.Animation.Animatable object unmodifiable or
determines whether it can be made unmodifiable.
isChecking: true if this method should simply determine whether this instance can be
frozen. false if this instance should actually freeze itself when this method
is called.
Returns: If isChecking is true,this method returns true if this
System.Windows.Media.Animation.Animatable can be made unmodifiable,or false if
it cannot be made unmodifiable. If isChecking is false,this method returns
true if the if this System.Windows.Media.Animation.Animatable is now
unmodifiable,or false if it cannot be made unmodifiable,with the side effect
of having begun to change the frozen status of this object.
"""
pass
def GetAsFrozenCore(self,*args):
"""
GetAsFrozenCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a frozen clone of the specified System.Windows.Freezable
using base (non-animated) property values.
sourceFreezable: The instance to copy.
"""
pass
def GetCurrentValueAsFrozenCore(self,*args):
"""
GetCurrentValueAsFrozenCore(self: Freezable,sourceFreezable: Freezable)
Makes the current instance a frozen clone of the specified
System.Windows.Freezable. If the object has animated dependency properties,
their current animated values are copied.
sourceFreezable: The System.Windows.Freezable to copy and freeze.
"""
pass
def OnChanged(self,*args):
"""
OnChanged(self: Freezable)
Called when the current System.Windows.Freezable object is modified.
"""
pass
def OnFreezablePropertyChanged(self,*args):
"""
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject,property: DependencyProperty)
This member supports the Windows Presentation Foundation (WPF) infrastructure
and is not intended to be used directly from your code.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
property: The property that changed.
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject)
Ensures that appropriate context pointers are established for a
System.Windows.DependencyObjectType data member that has just been set.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
"""
pass
def OnPropertyChanged(self,*args):
"""
OnPropertyChanged(self: Freezable,e: DependencyPropertyChangedEventArgs)
Overrides the System.Windows.DependencyObject implementation of
System.Windows.DependencyObject.OnPropertyChanged(System.Windows.DependencyPrope
rtyChangedEventArgs) to also invoke any System.Windows.Freezable.Changed
handlers in response to a changing dependency property of type
System.Windows.Freezable.
e: Event data that contains information about which property changed,and its old
and new values.
"""
pass
def ReadPreamble(self,*args):
"""
ReadPreamble(self: Freezable)
Ensures that the System.Windows.Freezable is being accessed from a valid
thread. Inheritors of System.Windows.Freezable must call this method at the
beginning of any API that reads data members that are not dependency
properties.
"""
pass
def ShouldSerializeProperty(self,*args):
"""
ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool
Returns a value that indicates whether serialization processes should serialize
the value for the provided dependency property.
dp: The identifier for the dependency property that should be serialized.
Returns: true if the dependency property that is supplied should be value-serialized;
otherwise,false.
ShouldSerializeProperty(self: Window_16$17,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: Label_17$18,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: TextBox_18$19,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: Button_19$20,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: CheckBox_20$21,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: ComboBox_21$22,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: Separator_22$23,dp: DependencyProperty) -> bool
"""
pass
def ToString(self,provider=None):
"""
ToString(self: GradientStop,provider: IFormatProvider) -> str
Creates a string representation of this object based on the specified
culture-specific formatting information.
provider: Culture specific formatting information,or null to use the current culture.
Returns: A string representation of this object that contains its
System.Windows.Media.GradientStop.Color and
System.Windows.Media.GradientStop.Offset values.
ToString(self: GradientStop) -> str
Creates a string representation of this object based on the current culture.
Returns: A string representation of this object that contains its
System.Windows.Media.GradientStop.Color and
System.Windows.Media.GradientStop.Offset values.
"""
pass
def WritePostscript(self,*args):
"""
WritePostscript(self: Freezable)
Raises the System.Windows.Freezable.Changed event for the
System.Windows.Freezable and invokes its System.Windows.Freezable.OnChanged
method. Classes that derive from System.Windows.Freezable should call this
method at the end of any API that modifies class members that are not stored as
dependency properties.
"""
pass
def WritePreamble(self,*args):
"""
WritePreamble(self: Freezable)
Verifies that the System.Windows.Freezable is not frozen and that it is being
accessed from a valid threading context. System.Windows.Freezable inheritors
should call this method at the beginning of any API that writes to data members
that are not dependency properties.
"""
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,color=None,offset=None):
"""
__new__(cls: type)
__new__(cls: type,color: Color,offset: float)
"""
pass
def __str__(self,*args):
pass
Color=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the color of the gradient stop.
Get: Color(self: GradientStop) -> Color
Set: Color(self: GradientStop)=value
"""
Offset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the location of the gradient stop within the gradient vector.
Get: Offset(self: GradientStop) -> float
Set: Offset(self: GradientStop)=value
"""
ColorProperty=None
OffsetProperty=None
| 39.023904 | 215 | 0.723532 | class GradientStop(Animatable,ISealable,IAnimatable,IResource,IFormattable):
"""
CloneCurrentValue(self: GradientStop) -> GradientStop
Makes the instance a modifiable clone (deep copy) of the specified
System.Windows.Freezable using current property values.
Makes this System.Windows.Media.Animation.Animatable object unmodifiable or
"""
pass
pass
def GetCurrentValueAsFrozenCore(self,*args):
GetCurrentValueAsFrozenCore(self: Freezable,sourceFreezable: Freezable)
Makes the current instance a frozen clone of the specified
"""
OnPropertyChanged(self: Freezable,e: DependencyPropertyChangedEventArgs)
ReadPreamble(self: Freezable)
Ensures that the System.Windows.Freezable is being accessed from a valid
otherwise,false.
ShouldSerializeProperty(self: ComboBox_21$22,dp: DependencyProperty) -> bool
ShouldSerializeProperty(self: Separator_22$23,dp: DependencyProperty) -> bool
"""
pass
pass
def __str__(self,*args):
Get: Offset(self: GradientStop) -> float
| true | true |
1c3d1615225ed51405817fb1a5a00ca0ce6af73d | 41,496 | py | Python | tests/kafkatest/services/kafka/kafka.py | changqing98/kafka | 2094711a2f0e2e5f38b07867ae221d98c077615a | [
"Apache-2.0"
] | 2 | 2020-03-20T06:19:42.000Z | 2020-03-20T06:19:44.000Z | tests/kafkatest/services/kafka/kafka.py | changqing98/kafka | 2094711a2f0e2e5f38b07867ae221d98c077615a | [
"Apache-2.0"
] | 15 | 2020-03-05T00:32:48.000Z | 2022-02-16T00:55:24.000Z | tests/kafkatest/services/kafka/kafka.py | changqing98/kafka | 2094711a2f0e2e5f38b07867ae221d98c077615a | [
"Apache-2.0"
] | 1 | 2021-09-01T08:46:50.000Z | 2021-09-01T08:46:50.000Z | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import os.path
import re
import signal
import time
from ducktape.services.service import Service
from ducktape.utils.util import wait_until
from ducktape.cluster.remoteaccount import RemoteCommandError
from config import KafkaConfig
from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin
from kafkatest.services.kafka import config_property
from kafkatest.services.monitor.jmx import JmxMixin
from kafkatest.services.security.minikdc import MiniKdc
from kafkatest.services.security.listener_security_config import ListenerSecurityConfig
from kafkatest.services.security.security_config import SecurityConfig
from kafkatest.version import DEV_BRANCH, LATEST_0_10_0
class KafkaListener:
def __init__(self, name, port_number, security_protocol, open=False):
self.name = name
self.port_number = port_number
self.security_protocol = security_protocol
self.open = open
def listener(self):
return "%s://:%s" % (self.name, str(self.port_number))
def advertised_listener(self, node):
return "%s://%s:%s" % (self.name, node.account.hostname, str(self.port_number))
def listener_security_protocol(self):
return "%s:%s" % (self.name, self.security_protocol)
class KafkaService(KafkaPathResolverMixin, JmxMixin, Service):
PERSISTENT_ROOT = "/mnt/kafka"
STDOUT_STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "server-start-stdout-stderr.log")
LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "kafka-log4j.properties")
# Logs such as controller.log, server.log, etc all go here
OPERATIONAL_LOG_DIR = os.path.join(PERSISTENT_ROOT, "kafka-operational-logs")
OPERATIONAL_LOG_INFO_DIR = os.path.join(OPERATIONAL_LOG_DIR, "info")
OPERATIONAL_LOG_DEBUG_DIR = os.path.join(OPERATIONAL_LOG_DIR, "debug")
# Kafka log segments etc go here
DATA_LOG_DIR_PREFIX = os.path.join(PERSISTENT_ROOT, "kafka-data-logs")
DATA_LOG_DIR_1 = "%s-1" % (DATA_LOG_DIR_PREFIX)
DATA_LOG_DIR_2 = "%s-2" % (DATA_LOG_DIR_PREFIX)
CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "kafka.properties")
# Kafka Authorizer
ACL_AUTHORIZER = "kafka.security.authorizer.AclAuthorizer"
# Old Kafka Authorizer. This is deprecated but still supported.
SIMPLE_AUTHORIZER = "kafka.security.auth.SimpleAclAuthorizer"
HEAP_DUMP_FILE = os.path.join(PERSISTENT_ROOT, "kafka_heap_dump.bin")
INTERBROKER_LISTENER_NAME = 'INTERNAL'
JAAS_CONF_PROPERTY = "java.security.auth.login.config=/mnt/security/jaas.conf"
KRB5_CONF = "java.security.krb5.conf=/mnt/security/krb5.conf"
logs = {
"kafka_server_start_stdout_stderr": {
"path": STDOUT_STDERR_CAPTURE,
"collect_default": True},
"kafka_operational_logs_info": {
"path": OPERATIONAL_LOG_INFO_DIR,
"collect_default": True},
"kafka_operational_logs_debug": {
"path": OPERATIONAL_LOG_DEBUG_DIR,
"collect_default": False},
"kafka_data_1": {
"path": DATA_LOG_DIR_1,
"collect_default": False},
"kafka_data_2": {
"path": DATA_LOG_DIR_2,
"collect_default": False},
"kafka_heap_dump_file": {
"path": HEAP_DUMP_FILE,
"collect_default": True}
}
def __init__(self, context, num_nodes, zk, security_protocol=SecurityConfig.PLAINTEXT, interbroker_security_protocol=SecurityConfig.PLAINTEXT,
client_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI, interbroker_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI,
authorizer_class_name=None, topics=None, version=DEV_BRANCH, jmx_object_names=None,
jmx_attributes=None, zk_connect_timeout=5000, zk_session_timeout=6000, server_prop_overides=None, zk_chroot=None,
zk_client_secure=False,
listener_security_config=ListenerSecurityConfig(), per_node_server_prop_overrides=None, extra_kafka_opts=""):
"""
:param context: test context
:param ZookeeperService zk:
:param dict topics: which topics to create automatically
:param str security_protocol: security protocol for clients to use
:param str interbroker_security_protocol: security protocol to use for broker-to-broker communication
:param str client_sasl_mechanism: sasl mechanism for clients to use
:param str interbroker_sasl_mechanism: sasl mechanism to use for broker-to-broker communication
:param str authorizer_class_name: which authorizer class to use
:param str version: which kafka version to use. Defaults to "dev" branch
:param jmx_object_names:
:param jmx_attributes:
:param int zk_connect_timeout:
:param int zk_session_timeout:
:param dict server_prop_overides: overrides for kafka.properties file
:param zk_chroot:
:param bool zk_client_secure: connect to Zookeeper over secure client port (TLS) when True
:param ListenerSecurityConfig listener_security_config: listener config to use
:param dict per_node_server_prop_overrides:
:param str extra_kafka_opts: jvm args to add to KAFKA_OPTS variable
"""
Service.__init__(self, context, num_nodes)
JmxMixin.__init__(self, num_nodes=num_nodes, jmx_object_names=jmx_object_names, jmx_attributes=(jmx_attributes or []),
root=KafkaService.PERSISTENT_ROOT)
self.zk = zk
self.security_protocol = security_protocol
self.client_sasl_mechanism = client_sasl_mechanism
self.topics = topics
self.minikdc = None
self.authorizer_class_name = authorizer_class_name
self.zk_set_acl = False
if server_prop_overides is None:
self.server_prop_overides = []
else:
self.server_prop_overides = server_prop_overides
if per_node_server_prop_overrides is None:
self.per_node_server_prop_overrides = {}
else:
self.per_node_server_prop_overrides = per_node_server_prop_overrides
self.log_level = "DEBUG"
self.zk_chroot = zk_chroot
self.zk_client_secure = zk_client_secure
self.listener_security_config = listener_security_config
self.extra_kafka_opts = extra_kafka_opts
#
# In a heavily loaded and not very fast machine, it is
# sometimes necessary to give more time for the zk client
# to have its session established, especially if the client
# is authenticating and waiting for the SaslAuthenticated
# in addition to the SyncConnected event.
#
# The default value for zookeeper.connect.timeout.ms is
# 2 seconds and here we increase it to 5 seconds, but
# it can be overridden by setting the corresponding parameter
# for this constructor.
self.zk_connect_timeout = zk_connect_timeout
# Also allow the session timeout to be provided explicitly,
# primarily so that test cases can depend on it when waiting
# e.g. brokers to deregister after a hard kill.
self.zk_session_timeout = zk_session_timeout
self.port_mappings = {
'PLAINTEXT': KafkaListener('PLAINTEXT', 9092, 'PLAINTEXT', False),
'SSL': KafkaListener('SSL', 9093, 'SSL', False),
'SASL_PLAINTEXT': KafkaListener('SASL_PLAINTEXT', 9094, 'SASL_PLAINTEXT', False),
'SASL_SSL': KafkaListener('SASL_SSL', 9095, 'SASL_SSL', False),
KafkaService.INTERBROKER_LISTENER_NAME:
KafkaListener(KafkaService.INTERBROKER_LISTENER_NAME, 9099, None, False)
}
self.interbroker_listener = None
self.setup_interbroker_listener(interbroker_security_protocol, self.listener_security_config.use_separate_interbroker_listener)
self.interbroker_sasl_mechanism = interbroker_sasl_mechanism
for node in self.nodes:
node.version = version
node.config = KafkaConfig(**{config_property.BROKER_ID: self.idx(node)})
def set_version(self, version):
for node in self.nodes:
node.version = version
@property
def interbroker_security_protocol(self):
return self.interbroker_listener.security_protocol
# this is required for backwards compatibility - there are a lot of tests that set this property explicitly
# meaning 'use one of the existing listeners that match given security protocol, do not use custom listener'
@interbroker_security_protocol.setter
def interbroker_security_protocol(self, security_protocol):
self.setup_interbroker_listener(security_protocol, use_separate_listener=False)
def setup_interbroker_listener(self, security_protocol, use_separate_listener=False):
self.listener_security_config.use_separate_interbroker_listener = use_separate_listener
if self.listener_security_config.use_separate_interbroker_listener:
# do not close existing port here since it is not used exclusively for interbroker communication
self.interbroker_listener = self.port_mappings[KafkaService.INTERBROKER_LISTENER_NAME]
self.interbroker_listener.security_protocol = security_protocol
else:
# close dedicated interbroker port, so it's not dangling in 'listeners' and 'advertised.listeners'
self.close_port(KafkaService.INTERBROKER_LISTENER_NAME)
self.interbroker_listener = self.port_mappings[security_protocol]
@property
def security_config(self):
config = SecurityConfig(self.context, self.security_protocol, self.interbroker_listener.security_protocol,
zk_sasl=self.zk.zk_sasl, zk_tls=self.zk_client_secure,
client_sasl_mechanism=self.client_sasl_mechanism,
interbroker_sasl_mechanism=self.interbroker_sasl_mechanism,
listener_security_config=self.listener_security_config)
for port in self.port_mappings.values():
if port.open:
config.enable_security_protocol(port.security_protocol)
return config
def open_port(self, listener_name):
self.port_mappings[listener_name].open = True
def close_port(self, listener_name):
self.port_mappings[listener_name].open = False
def start_minikdc_if_necessary(self, add_principals=""):
if self.security_config.has_sasl:
if self.minikdc is None:
self.minikdc = MiniKdc(self.context, self.nodes, extra_principals = add_principals)
self.minikdc.start()
else:
self.minikdc = None
def alive(self, node):
return len(self.pids(node)) > 0
def start(self, add_principals="", use_zk_to_create_topic=True):
if self.zk_client_secure and not self.zk.zk_client_secure_port:
raise Exception("Unable to start Kafka: TLS to Zookeeper requested but Zookeeper secure port not enabled")
self.open_port(self.security_protocol)
self.interbroker_listener.open = True
self.start_minikdc_if_necessary(add_principals)
self._ensure_zk_chroot()
Service.start(self)
self.logger.info("Waiting for brokers to register at ZK")
retries = 30
expected_broker_ids = set(self.nodes)
wait_until(lambda: {node for node in self.nodes if self.is_registered(node)} == expected_broker_ids, 30, 1)
if retries == 0:
raise RuntimeError("Kafka servers didn't register at ZK within 30 seconds")
# Create topics if necessary
if self.topics is not None:
for topic, topic_cfg in self.topics.items():
if topic_cfg is None:
topic_cfg = {}
topic_cfg["topic"] = topic
self.create_topic(topic_cfg, use_zk_to_create_topic=use_zk_to_create_topic)
def _ensure_zk_chroot(self):
self.logger.info("Ensuring zk_chroot %s exists", self.zk_chroot)
if self.zk_chroot:
if not self.zk_chroot.startswith('/'):
raise Exception("Zookeeper chroot must start with '/' but found " + self.zk_chroot)
parts = self.zk_chroot.split('/')[1:]
for i in range(len(parts)):
self.zk.create('/' + '/'.join(parts[:i+1]))
def set_protocol_and_port(self, node):
listeners = []
advertised_listeners = []
protocol_map = []
for port in self.port_mappings.values():
if port.open:
listeners.append(port.listener())
advertised_listeners.append(port.advertised_listener(node))
protocol_map.append(port.listener_security_protocol())
self.listeners = ','.join(listeners)
self.advertised_listeners = ','.join(advertised_listeners)
self.listener_security_protocol_map = ','.join(protocol_map)
self.interbroker_bootstrap_servers = self.__bootstrap_servers(self.interbroker_listener, True)
def prop_file(self, node):
self.set_protocol_and_port(node)
#load template configs as dictionary
config_template = self.render('kafka.properties', node=node, broker_id=self.idx(node),
security_config=self.security_config, num_nodes=self.num_nodes,
listener_security_config=self.listener_security_config)
configs = dict( l.rstrip().split('=', 1) for l in config_template.split('\n')
if not l.startswith("#") and "=" in l )
#load specific test override configs
override_configs = KafkaConfig(**node.config)
override_configs[config_property.ADVERTISED_HOSTNAME] = node.account.hostname
override_configs[config_property.ZOOKEEPER_CONNECT] = self.zk_connect_setting()
if self.zk_client_secure:
override_configs[config_property.ZOOKEEPER_SSL_CLIENT_ENABLE] = 'true'
override_configs[config_property.ZOOKEEPER_CLIENT_CNXN_SOCKET] = 'org.apache.zookeeper.ClientCnxnSocketNetty'
else:
override_configs[config_property.ZOOKEEPER_SSL_CLIENT_ENABLE] = 'false'
for prop in self.server_prop_overides:
override_configs[prop[0]] = prop[1]
for prop in self.per_node_server_prop_overrides.get(self.idx(node), []):
override_configs[prop[0]] = prop[1]
#update template configs with test override configs
configs.update(override_configs)
prop_file = self.render_configs(configs)
return prop_file
def render_configs(self, configs):
"""Render self as a series of lines key=val\n, and do so in a consistent order. """
keys = [k for k in configs.keys()]
keys.sort()
s = ""
for k in keys:
s += "%s=%s\n" % (k, str(configs[k]))
return s
def start_cmd(self, node):
cmd = "export JMX_PORT=%d; " % self.jmx_port
cmd += "export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % self.LOG4J_CONFIG
heap_kafka_opts = "-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=%s" % \
self.logs["kafka_heap_dump_file"]["path"]
security_kafka_opts = self.security_config.kafka_opts.strip('\"')
cmd += "export KAFKA_OPTS=\"%s %s %s\"; " % (heap_kafka_opts, security_kafka_opts, self.extra_kafka_opts)
cmd += "%s %s 1>> %s 2>> %s &" % \
(self.path.script("kafka-server-start.sh", node),
KafkaService.CONFIG_FILE,
KafkaService.STDOUT_STDERR_CAPTURE,
KafkaService.STDOUT_STDERR_CAPTURE)
return cmd
def start_node(self, node, timeout_sec=60):
node.account.mkdirs(KafkaService.PERSISTENT_ROOT)
prop_file = self.prop_file(node)
self.logger.info("kafka.properties:")
self.logger.info(prop_file)
node.account.create_file(KafkaService.CONFIG_FILE, prop_file)
node.account.create_file(self.LOG4J_CONFIG, self.render('log4j.properties', log_dir=KafkaService.OPERATIONAL_LOG_DIR))
self.security_config.setup_node(node)
self.security_config.setup_credentials(node, self.path, self.zk_connect_setting(), broker=True)
cmd = self.start_cmd(node)
self.logger.debug("Attempting to start KafkaService on %s with command: %s" % (str(node.account), cmd))
with node.account.monitor_log(KafkaService.STDOUT_STDERR_CAPTURE) as monitor:
node.account.ssh(cmd)
# Kafka 1.0.0 and higher don't have a space between "Kafka" and "Server"
monitor.wait_until("Kafka\s*Server.*started", timeout_sec=timeout_sec, backoff_sec=.25,
err_msg="Kafka server didn't finish startup in %d seconds" % timeout_sec)
# Credentials for inter-broker communication are created before starting Kafka.
# Client credentials are created after starting Kafka so that both loading of
# existing credentials from ZK and dynamic update of credentials in Kafka are tested.
self.security_config.setup_credentials(node, self.path, self.zk_connect_setting(), broker=False)
self.start_jmx_tool(self.idx(node), node)
if len(self.pids(node)) == 0:
raise Exception("No process ids recorded on node %s" % node.account.hostname)
def pids(self, node):
"""Return process ids associated with running processes on the given node."""
try:
cmd = "jcmd | grep -e %s | awk '{print $1}'" % self.java_class_name()
pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)]
return pid_arr
except (RemoteCommandError, ValueError) as e:
return []
def signal_node(self, node, sig=signal.SIGTERM):
pids = self.pids(node)
for pid in pids:
node.account.signal(pid, sig)
def signal_leader(self, topic, partition=0, sig=signal.SIGTERM):
leader = self.leader(topic, partition)
self.signal_node(leader, sig)
def stop_node(self, node, clean_shutdown=True, timeout_sec=60):
pids = self.pids(node)
sig = signal.SIGTERM if clean_shutdown else signal.SIGKILL
for pid in pids:
node.account.signal(pid, sig, allow_fail=False)
try:
wait_until(lambda: len(self.pids(node)) == 0, timeout_sec=timeout_sec,
err_msg="Kafka node failed to stop in %d seconds" % timeout_sec)
except Exception:
self.thread_dump(node)
raise
def thread_dump(self, node):
for pid in self.pids(node):
try:
node.account.signal(pid, signal.SIGQUIT, allow_fail=True)
except:
self.logger.warn("Could not dump threads on node")
def clean_node(self, node):
JmxMixin.clean_node(self, node)
self.security_config.clean_node(node)
node.account.kill_java_processes(self.java_class_name(),
clean_shutdown=False, allow_fail=True)
node.account.ssh("sudo rm -rf -- %s" % KafkaService.PERSISTENT_ROOT, allow_fail=False)
def _kafka_topics_cmd(self, node, use_zk_connection=True):
"""
Returns kafka-topics.sh command path with jaas configuration and krb5 environment variable
set. If Admin client is not going to be used, don't set the environment variable.
"""
kafka_topic_script = self.path.script("kafka-topics.sh", node)
skip_security_settings = use_zk_connection or not node.version.topic_command_supports_bootstrap_server()
return kafka_topic_script if skip_security_settings else \
"KAFKA_OPTS='-D%s -D%s' %s" % (KafkaService.JAAS_CONF_PROPERTY, KafkaService.KRB5_CONF, kafka_topic_script)
def _kafka_topics_cmd_config(self, node, use_zk_connection=True):
"""
Return --command-config parameter to the kafka-topics.sh command. The config parameter specifies
the security settings that AdminClient uses to connect to a secure kafka server.
"""
skip_command_config = use_zk_connection or not node.version.topic_command_supports_bootstrap_server()
return "" if skip_command_config else " --command-config <(echo '%s')" % (self.security_config.client_config())
def create_topic(self, topic_cfg, node=None, use_zk_to_create_topic=True):
"""Run the admin tool create topic command.
Specifying node is optional, and may be done if for different kafka nodes have different versions,
and we care where command gets run.
If the node is not specified, run the command from self.nodes[0]
"""
if node is None:
node = self.nodes[0]
self.logger.info("Creating topic %s with settings %s",
topic_cfg["topic"], topic_cfg)
use_zk_connection = topic_cfg.get('if-not-exists', False) or use_zk_to_create_topic
cmd = "%(kafka_topics_cmd)s %(connection_string)s --create --topic %(topic)s " % {
'kafka_topics_cmd': self._kafka_topics_cmd(node, use_zk_connection),
'connection_string': self._connect_setting(node, use_zk_connection),
'topic': topic_cfg.get("topic"),
}
if 'replica-assignment' in topic_cfg:
cmd += " --replica-assignment %(replica-assignment)s" % {
'replica-assignment': topic_cfg.get('replica-assignment')
}
else:
cmd += " --partitions %(partitions)d --replication-factor %(replication-factor)d" % {
'partitions': topic_cfg.get('partitions', 1),
'replication-factor': topic_cfg.get('replication-factor', 1)
}
if topic_cfg.get('if-not-exists', False):
cmd += ' --if-not-exists'
if "configs" in topic_cfg.keys() and topic_cfg["configs"] is not None:
for config_name, config_value in topic_cfg["configs"].items():
cmd += " --config %s=%s" % (config_name, str(config_value))
cmd += self._kafka_topics_cmd_config(node, use_zk_connection)
self.logger.info("Running topic creation command...\n%s" % cmd)
node.account.ssh(cmd)
def delete_topic(self, topic, node=None):
"""
Delete a topic with the topics command
:param topic:
:param node:
:return:
"""
if node is None:
node = self.nodes[0]
self.logger.info("Deleting topic %s" % topic)
kafka_topic_script = self.path.script("kafka-topics.sh", node)
cmd = kafka_topic_script + " "
cmd += "--bootstrap-server %(bootstrap_servers)s --delete --topic %(topic)s " % {
'bootstrap_servers': self.bootstrap_servers(self.security_protocol),
'topic': topic
}
self.logger.info("Running topic delete command...\n%s" % cmd)
node.account.ssh(cmd)
def describe_topic(self, topic, node=None, use_zk_to_describe_topic=True):
if node is None:
node = self.nodes[0]
cmd = "%s %s --topic %s --describe %s" % \
(self._kafka_topics_cmd(node=node, use_zk_connection=use_zk_to_describe_topic),
self._connect_setting(node=node, use_zk_connection=use_zk_to_describe_topic),
topic, self._kafka_topics_cmd_config(node=node, use_zk_connection=use_zk_to_describe_topic))
self.logger.info("Running topic describe command...\n%s" % cmd)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
return output
def list_topics(self, node=None, use_zk_to_list_topic=True):
if node is None:
node = self.nodes[0]
cmd = "%s %s --list %s" % (self._kafka_topics_cmd(node, use_zk_to_list_topic),
self._connect_setting(node, use_zk_to_list_topic),
self._kafka_topics_cmd_config(node, use_zk_to_list_topic))
for line in node.account.ssh_capture(cmd):
if not line.startswith("SLF4J"):
yield line.rstrip()
def alter_message_format(self, topic, msg_format_version, node=None):
if node is None:
node = self.nodes[0]
self.logger.info("Altering message format version for topic %s with format %s", topic, msg_format_version)
cmd = "%s --zookeeper %s %s --entity-name %s --entity-type topics --alter --add-config message.format.version=%s" % \
(self.path.script("kafka-configs.sh", node), self.zk_connect_setting(), self.zk.zkTlsConfigFileOption(), topic, msg_format_version)
self.logger.info("Running alter message format command...\n%s" % cmd)
node.account.ssh(cmd)
def set_unclean_leader_election(self, topic, value=True, node=None):
if node is None:
node = self.nodes[0]
if value is True:
self.logger.info("Enabling unclean leader election for topic %s", topic)
else:
self.logger.info("Disabling unclean leader election for topic %s", topic)
cmd = "%s --zookeeper %s %s --entity-name %s --entity-type topics --alter --add-config unclean.leader.election.enable=%s" % \
(self.path.script("kafka-configs.sh", node), self.zk_connect_setting(), self.zk.zkTlsConfigFileOption(), topic, str(value).lower())
self.logger.info("Running alter unclean leader command...\n%s" % cmd)
node.account.ssh(cmd)
def parse_describe_topic(self, topic_description):
"""Parse output of kafka-topics.sh --describe (or describe_topic() method above), which is a string of form
PartitionCount:2\tReplicationFactor:2\tConfigs:
Topic: test_topic\ttPartition: 0\tLeader: 3\tReplicas: 3,1\tIsr: 3,1
Topic: test_topic\tPartition: 1\tLeader: 1\tReplicas: 1,2\tIsr: 1,2
into a dictionary structure appropriate for use with reassign-partitions tool:
{
"partitions": [
{"topic": "test_topic", "partition": 0, "replicas": [3, 1]},
{"topic": "test_topic", "partition": 1, "replicas": [1, 2]}
]
}
"""
lines = map(lambda x: x.strip(), topic_description.split("\n"))
partitions = []
for line in lines:
m = re.match(".*Leader:.*", line)
if m is None:
continue
fields = line.split("\t")
# ["Partition: 4", "Leader: 0"] -> ["4", "0"]
fields = map(lambda x: x.split(" ")[1], fields)
partitions.append(
{"topic": fields[0],
"partition": int(fields[1]),
"replicas": map(int, fields[3].split(','))})
return {"partitions": partitions}
def verify_reassign_partitions(self, reassignment, node=None):
"""Run the reassign partitions admin tool in "verify" mode
"""
if node is None:
node = self.nodes[0]
json_file = "/tmp/%s_reassign.json" % str(time.time())
# reassignment to json
json_str = json.dumps(reassignment)
json_str = json.dumps(json_str)
# create command
cmd = "echo %s > %s && " % (json_str, json_file)
cmd += "%s " % self.path.script("kafka-reassign-partitions.sh", node)
cmd += "--zookeeper %s " % self.zk_connect_setting()
cmd += "--reassignment-json-file %s " % json_file
cmd += "--verify "
cmd += "&& sleep 1 && rm -f %s" % json_file
# send command
self.logger.info("Verifying partition reassignment...")
self.logger.debug(cmd)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
self.logger.debug(output)
if re.match(".*Reassignment of partition.*failed.*",
output.replace('\n', '')) is not None:
return False
if re.match(".*is still in progress.*",
output.replace('\n', '')) is not None:
return False
return True
def execute_reassign_partitions(self, reassignment, node=None,
throttle=None):
"""Run the reassign partitions admin tool in "verify" mode
"""
if node is None:
node = self.nodes[0]
json_file = "/tmp/%s_reassign.json" % str(time.time())
# reassignment to json
json_str = json.dumps(reassignment)
json_str = json.dumps(json_str)
# create command
cmd = "echo %s > %s && " % (json_str, json_file)
cmd += "%s " % self.path.script( "kafka-reassign-partitions.sh", node)
cmd += "--zookeeper %s " % self.zk_connect_setting()
cmd += "--reassignment-json-file %s " % json_file
cmd += "--execute"
if throttle is not None:
cmd += " --throttle %d" % throttle
cmd += " && sleep 1 && rm -f %s" % json_file
# send command
self.logger.info("Executing parition reassignment...")
self.logger.debug(cmd)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
self.logger.debug("Verify partition reassignment:")
self.logger.debug(output)
def search_data_files(self, topic, messages):
"""Check if a set of messages made it into the Kakfa data files. Note that
this method takes no account of replication. It simply looks for the
payload in all the partition files of the specified topic. 'messages' should be
an array of numbers. The list of missing messages is returned.
"""
payload_match = "payload: " + "$|payload: ".join(str(x) for x in messages) + "$"
found = set([])
self.logger.debug("number of unique missing messages we will search for: %d",
len(messages))
for node in self.nodes:
# Grab all .log files in directories prefixed with this topic
files = node.account.ssh_capture("find %s* -regex '.*/%s-.*/[^/]*.log'" % (KafkaService.DATA_LOG_DIR_PREFIX, topic))
# Check each data file to see if it contains the messages we want
for log in files:
cmd = "%s kafka.tools.DumpLogSegments --print-data-log --files %s | grep -E \"%s\"" % \
(self.path.script("kafka-run-class.sh", node), log.strip(), payload_match)
for line in node.account.ssh_capture(cmd, allow_fail=True):
for val in messages:
if line.strip().endswith("payload: "+str(val)):
self.logger.debug("Found %s in data-file [%s] in line: [%s]" % (val, log.strip(), line.strip()))
found.add(val)
self.logger.debug("Number of unique messages found in the log: %d",
len(found))
missing = list(set(messages) - found)
if len(missing) > 0:
self.logger.warn("The following values were not found in the data files: " + str(missing))
return missing
def restart_cluster(self, clean_shutdown=True, timeout_sec=60, after_each_broker_restart=None, *args):
for node in self.nodes:
self.restart_node(node, clean_shutdown=clean_shutdown, timeout_sec=timeout_sec)
if after_each_broker_restart is not None:
after_each_broker_restart(*args)
def restart_node(self, node, clean_shutdown=True, timeout_sec=60):
"""Restart the given node."""
self.stop_node(node, clean_shutdown, timeout_sec)
self.start_node(node, timeout_sec)
def isr_idx_list(self, topic, partition=0):
""" Get in-sync replica list the given topic and partition.
"""
self.logger.debug("Querying zookeeper to find in-sync replicas for topic %s and partition %d" % (topic, partition))
zk_path = "/brokers/topics/%s/partitions/%d/state" % (topic, partition)
partition_state = self.zk.query(zk_path, chroot=self.zk_chroot)
if partition_state is None:
raise Exception("Error finding partition state for topic %s and partition %d." % (topic, partition))
partition_state = json.loads(partition_state)
self.logger.info(partition_state)
isr_idx_list = partition_state["isr"]
self.logger.info("Isr for topic %s and partition %d is now: %s" % (topic, partition, isr_idx_list))
return isr_idx_list
def replicas(self, topic, partition=0):
""" Get the assigned replicas for the given topic and partition.
"""
self.logger.debug("Querying zookeeper to find assigned replicas for topic %s and partition %d" % (topic, partition))
zk_path = "/brokers/topics/%s" % (topic)
assignment = self.zk.query(zk_path, chroot=self.zk_chroot)
if assignment is None:
raise Exception("Error finding partition state for topic %s and partition %d." % (topic, partition))
assignment = json.loads(assignment)
self.logger.info(assignment)
replicas = assignment["partitions"][str(partition)]
self.logger.info("Assigned replicas for topic %s and partition %d is now: %s" % (topic, partition, replicas))
return [self.get_node(replica) for replica in replicas]
def leader(self, topic, partition=0):
""" Get the leader replica for the given topic and partition.
"""
self.logger.debug("Querying zookeeper to find leader replica for topic %s and partition %d" % (topic, partition))
zk_path = "/brokers/topics/%s/partitions/%d/state" % (topic, partition)
partition_state = self.zk.query(zk_path, chroot=self.zk_chroot)
if partition_state is None:
raise Exception("Error finding partition state for topic %s and partition %d." % (topic, partition))
partition_state = json.loads(partition_state)
self.logger.info(partition_state)
leader_idx = int(partition_state["leader"])
self.logger.info("Leader for topic %s and partition %d is now: %d" % (topic, partition, leader_idx))
return self.get_node(leader_idx)
def cluster_id(self):
""" Get the current cluster id
"""
self.logger.debug("Querying ZooKeeper to retrieve cluster id")
cluster = self.zk.query("/cluster/id", chroot=self.zk_chroot)
try:
return json.loads(cluster)['id'] if cluster else None
except:
self.logger.debug("Data in /cluster/id znode could not be parsed. Data = %s" % cluster)
raise
def check_protocol_errors(self, node):
""" Checks for common protocol exceptions due to invalid inter broker protocol handling.
While such errors can and should be checked in other ways, checking the logs is a worthwhile failsafe.
"""
for node in self.nodes:
exit_code = node.account.ssh("grep -e 'java.lang.IllegalArgumentException: Invalid version' -e SchemaException %s/*"
% KafkaService.OPERATIONAL_LOG_DEBUG_DIR, allow_fail=True)
if exit_code != 1:
return False
return True
def list_consumer_groups(self, node=None, command_config=None):
""" Get list of consumer groups.
"""
if node is None:
node = self.nodes[0]
consumer_group_script = self.path.script("kafka-consumer-groups.sh", node)
if command_config is None:
command_config = ""
else:
command_config = "--command-config " + command_config
cmd = "%s --bootstrap-server %s %s --list" % \
(consumer_group_script,
self.bootstrap_servers(self.security_protocol),
command_config)
output = ""
self.logger.debug(cmd)
for line in node.account.ssh_capture(cmd):
if not line.startswith("SLF4J"):
output += line
self.logger.debug(output)
return output
def describe_consumer_group(self, group, node=None, command_config=None):
""" Describe a consumer group.
"""
if node is None:
node = self.nodes[0]
consumer_group_script = self.path.script("kafka-consumer-groups.sh", node)
if command_config is None:
command_config = ""
else:
command_config = "--command-config " + command_config
cmd = "%s --bootstrap-server %s %s --group %s --describe" % \
(consumer_group_script,
self.bootstrap_servers(self.security_protocol),
command_config, group)
output = ""
self.logger.debug(cmd)
for line in node.account.ssh_capture(cmd):
if not (line.startswith("SLF4J") or line.startswith("TOPIC") or line.startswith("Could not fetch offset")):
output += line
self.logger.debug(output)
return output
def zk_connect_setting(self):
return self.zk.connect_setting(self.zk_chroot, self.zk_client_secure)
def _connect_setting(self, node, use_zk_connection=True):
"""
Checks if --bootstrap-server config is supported, if yes then returns a string with
bootstrap server, otherwise returns zookeeper connection string.
"""
if node.version.topic_command_supports_bootstrap_server() and not use_zk_connection:
connection_setting = "--bootstrap-server %s" % (self.bootstrap_servers(self.security_protocol))
else:
connection_setting = "--zookeeper %s" % (self.zk_connect_setting())
return connection_setting
def __bootstrap_servers(self, port, validate=True, offline_nodes=[]):
if validate and not port.open:
raise ValueError("We are retrieving bootstrap servers for the port: %s which is not currently open. - " %
str(port.port_number))
return ','.join([node.account.hostname + ":" + str(port.port_number)
for node in self.nodes
if node not in offline_nodes])
def bootstrap_servers(self, protocol='PLAINTEXT', validate=True, offline_nodes=[]):
"""Return comma-delimited list of brokers in this cluster formatted as HOSTNAME1:PORT1,HOSTNAME:PORT2,...
This is the format expected by many config files.
"""
port_mapping = self.port_mappings[protocol]
self.logger.info("Bootstrap client port is: " + str(port_mapping.port_number))
return self.__bootstrap_servers(port_mapping, validate, offline_nodes)
def controller(self):
""" Get the controller node
"""
self.logger.debug("Querying zookeeper to find controller broker")
controller_info = self.zk.query("/controller", chroot=self.zk_chroot)
if controller_info is None:
raise Exception("Error finding controller info")
controller_info = json.loads(controller_info)
self.logger.debug(controller_info)
controller_idx = int(controller_info["brokerid"])
self.logger.info("Controller's ID: %d" % (controller_idx))
return self.get_node(controller_idx)
def is_registered(self, node):
"""
Check whether a broker is registered in Zookeeper
"""
self.logger.debug("Querying zookeeper to see if broker %s is registered", str(node))
broker_info = self.zk.query("/brokers/ids/%s" % self.idx(node), chroot=self.zk_chroot)
self.logger.debug("Broker info: %s", broker_info)
return broker_info is not None
def get_offset_shell(self, topic, partitions, max_wait_ms, offsets, time):
node = self.nodes[0]
cmd = self.path.script("kafka-run-class.sh", node)
cmd += " kafka.tools.GetOffsetShell"
cmd += " --topic %s --broker-list %s --max-wait-ms %s --offsets %s --time %s" % (topic, self.bootstrap_servers(self.security_protocol), max_wait_ms, offsets, time)
if partitions:
cmd += ' --partitions %s' % partitions
cmd += " 2>> %s/get_offset_shell.log" % KafkaService.PERSISTENT_ROOT
cmd += " | tee -a %s/get_offset_shell.log &" % KafkaService.PERSISTENT_ROOT
output = ""
self.logger.debug(cmd)
for line in node.account.ssh_capture(cmd):
output += line
self.logger.debug(output)
return output
def java_class_name(self):
return "kafka.Kafka"
| 46.209354 | 171 | 0.646351 |
import collections
import json
import os.path
import re
import signal
import time
from ducktape.services.service import Service
from ducktape.utils.util import wait_until
from ducktape.cluster.remoteaccount import RemoteCommandError
from config import KafkaConfig
from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin
from kafkatest.services.kafka import config_property
from kafkatest.services.monitor.jmx import JmxMixin
from kafkatest.services.security.minikdc import MiniKdc
from kafkatest.services.security.listener_security_config import ListenerSecurityConfig
from kafkatest.services.security.security_config import SecurityConfig
from kafkatest.version import DEV_BRANCH, LATEST_0_10_0
class KafkaListener:
def __init__(self, name, port_number, security_protocol, open=False):
self.name = name
self.port_number = port_number
self.security_protocol = security_protocol
self.open = open
def listener(self):
return "%s://:%s" % (self.name, str(self.port_number))
def advertised_listener(self, node):
return "%s://%s:%s" % (self.name, node.account.hostname, str(self.port_number))
def listener_security_protocol(self):
return "%s:%s" % (self.name, self.security_protocol)
class KafkaService(KafkaPathResolverMixin, JmxMixin, Service):
PERSISTENT_ROOT = "/mnt/kafka"
STDOUT_STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "server-start-stdout-stderr.log")
LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "kafka-log4j.properties")
OPERATIONAL_LOG_DIR = os.path.join(PERSISTENT_ROOT, "kafka-operational-logs")
OPERATIONAL_LOG_INFO_DIR = os.path.join(OPERATIONAL_LOG_DIR, "info")
OPERATIONAL_LOG_DEBUG_DIR = os.path.join(OPERATIONAL_LOG_DIR, "debug")
DATA_LOG_DIR_PREFIX = os.path.join(PERSISTENT_ROOT, "kafka-data-logs")
DATA_LOG_DIR_1 = "%s-1" % (DATA_LOG_DIR_PREFIX)
DATA_LOG_DIR_2 = "%s-2" % (DATA_LOG_DIR_PREFIX)
CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "kafka.properties")
ACL_AUTHORIZER = "kafka.security.authorizer.AclAuthorizer"
SIMPLE_AUTHORIZER = "kafka.security.auth.SimpleAclAuthorizer"
HEAP_DUMP_FILE = os.path.join(PERSISTENT_ROOT, "kafka_heap_dump.bin")
INTERBROKER_LISTENER_NAME = 'INTERNAL'
JAAS_CONF_PROPERTY = "java.security.auth.login.config=/mnt/security/jaas.conf"
KRB5_CONF = "java.security.krb5.conf=/mnt/security/krb5.conf"
logs = {
"kafka_server_start_stdout_stderr": {
"path": STDOUT_STDERR_CAPTURE,
"collect_default": True},
"kafka_operational_logs_info": {
"path": OPERATIONAL_LOG_INFO_DIR,
"collect_default": True},
"kafka_operational_logs_debug": {
"path": OPERATIONAL_LOG_DEBUG_DIR,
"collect_default": False},
"kafka_data_1": {
"path": DATA_LOG_DIR_1,
"collect_default": False},
"kafka_data_2": {
"path": DATA_LOG_DIR_2,
"collect_default": False},
"kafka_heap_dump_file": {
"path": HEAP_DUMP_FILE,
"collect_default": True}
}
def __init__(self, context, num_nodes, zk, security_protocol=SecurityConfig.PLAINTEXT, interbroker_security_protocol=SecurityConfig.PLAINTEXT,
client_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI, interbroker_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI,
authorizer_class_name=None, topics=None, version=DEV_BRANCH, jmx_object_names=None,
jmx_attributes=None, zk_connect_timeout=5000, zk_session_timeout=6000, server_prop_overides=None, zk_chroot=None,
zk_client_secure=False,
listener_security_config=ListenerSecurityConfig(), per_node_server_prop_overrides=None, extra_kafka_opts=""):
Service.__init__(self, context, num_nodes)
JmxMixin.__init__(self, num_nodes=num_nodes, jmx_object_names=jmx_object_names, jmx_attributes=(jmx_attributes or []),
root=KafkaService.PERSISTENT_ROOT)
self.zk = zk
self.security_protocol = security_protocol
self.client_sasl_mechanism = client_sasl_mechanism
self.topics = topics
self.minikdc = None
self.authorizer_class_name = authorizer_class_name
self.zk_set_acl = False
if server_prop_overides is None:
self.server_prop_overides = []
else:
self.server_prop_overides = server_prop_overides
if per_node_server_prop_overrides is None:
self.per_node_server_prop_overrides = {}
else:
self.per_node_server_prop_overrides = per_node_server_prop_overrides
self.log_level = "DEBUG"
self.zk_chroot = zk_chroot
self.zk_client_secure = zk_client_secure
self.listener_security_config = listener_security_config
self.extra_kafka_opts = extra_kafka_opts
self.zk_connect_timeout = zk_connect_timeout
self.zk_session_timeout = zk_session_timeout
self.port_mappings = {
'PLAINTEXT': KafkaListener('PLAINTEXT', 9092, 'PLAINTEXT', False),
'SSL': KafkaListener('SSL', 9093, 'SSL', False),
'SASL_PLAINTEXT': KafkaListener('SASL_PLAINTEXT', 9094, 'SASL_PLAINTEXT', False),
'SASL_SSL': KafkaListener('SASL_SSL', 9095, 'SASL_SSL', False),
KafkaService.INTERBROKER_LISTENER_NAME:
KafkaListener(KafkaService.INTERBROKER_LISTENER_NAME, 9099, None, False)
}
self.interbroker_listener = None
self.setup_interbroker_listener(interbroker_security_protocol, self.listener_security_config.use_separate_interbroker_listener)
self.interbroker_sasl_mechanism = interbroker_sasl_mechanism
for node in self.nodes:
node.version = version
node.config = KafkaConfig(**{config_property.BROKER_ID: self.idx(node)})
def set_version(self, version):
for node in self.nodes:
node.version = version
@property
def interbroker_security_protocol(self):
return self.interbroker_listener.security_protocol
@interbroker_security_protocol.setter
def interbroker_security_protocol(self, security_protocol):
self.setup_interbroker_listener(security_protocol, use_separate_listener=False)
def setup_interbroker_listener(self, security_protocol, use_separate_listener=False):
self.listener_security_config.use_separate_interbroker_listener = use_separate_listener
if self.listener_security_config.use_separate_interbroker_listener:
self.interbroker_listener = self.port_mappings[KafkaService.INTERBROKER_LISTENER_NAME]
self.interbroker_listener.security_protocol = security_protocol
else:
self.close_port(KafkaService.INTERBROKER_LISTENER_NAME)
self.interbroker_listener = self.port_mappings[security_protocol]
@property
def security_config(self):
config = SecurityConfig(self.context, self.security_protocol, self.interbroker_listener.security_protocol,
zk_sasl=self.zk.zk_sasl, zk_tls=self.zk_client_secure,
client_sasl_mechanism=self.client_sasl_mechanism,
interbroker_sasl_mechanism=self.interbroker_sasl_mechanism,
listener_security_config=self.listener_security_config)
for port in self.port_mappings.values():
if port.open:
config.enable_security_protocol(port.security_protocol)
return config
def open_port(self, listener_name):
self.port_mappings[listener_name].open = True
def close_port(self, listener_name):
self.port_mappings[listener_name].open = False
def start_minikdc_if_necessary(self, add_principals=""):
if self.security_config.has_sasl:
if self.minikdc is None:
self.minikdc = MiniKdc(self.context, self.nodes, extra_principals = add_principals)
self.minikdc.start()
else:
self.minikdc = None
def alive(self, node):
return len(self.pids(node)) > 0
def start(self, add_principals="", use_zk_to_create_topic=True):
if self.zk_client_secure and not self.zk.zk_client_secure_port:
raise Exception("Unable to start Kafka: TLS to Zookeeper requested but Zookeeper secure port not enabled")
self.open_port(self.security_protocol)
self.interbroker_listener.open = True
self.start_minikdc_if_necessary(add_principals)
self._ensure_zk_chroot()
Service.start(self)
self.logger.info("Waiting for brokers to register at ZK")
retries = 30
expected_broker_ids = set(self.nodes)
wait_until(lambda: {node for node in self.nodes if self.is_registered(node)} == expected_broker_ids, 30, 1)
if retries == 0:
raise RuntimeError("Kafka servers didn't register at ZK within 30 seconds")
if self.topics is not None:
for topic, topic_cfg in self.topics.items():
if topic_cfg is None:
topic_cfg = {}
topic_cfg["topic"] = topic
self.create_topic(topic_cfg, use_zk_to_create_topic=use_zk_to_create_topic)
def _ensure_zk_chroot(self):
self.logger.info("Ensuring zk_chroot %s exists", self.zk_chroot)
if self.zk_chroot:
if not self.zk_chroot.startswith('/'):
raise Exception("Zookeeper chroot must start with '/' but found " + self.zk_chroot)
parts = self.zk_chroot.split('/')[1:]
for i in range(len(parts)):
self.zk.create('/' + '/'.join(parts[:i+1]))
def set_protocol_and_port(self, node):
listeners = []
advertised_listeners = []
protocol_map = []
for port in self.port_mappings.values():
if port.open:
listeners.append(port.listener())
advertised_listeners.append(port.advertised_listener(node))
protocol_map.append(port.listener_security_protocol())
self.listeners = ','.join(listeners)
self.advertised_listeners = ','.join(advertised_listeners)
self.listener_security_protocol_map = ','.join(protocol_map)
self.interbroker_bootstrap_servers = self.__bootstrap_servers(self.interbroker_listener, True)
def prop_file(self, node):
self.set_protocol_and_port(node)
config_template = self.render('kafka.properties', node=node, broker_id=self.idx(node),
security_config=self.security_config, num_nodes=self.num_nodes,
listener_security_config=self.listener_security_config)
configs = dict( l.rstrip().split('=', 1) for l in config_template.split('\n')
if not l.startswith("#") and "=" in l )
override_configs = KafkaConfig(**node.config)
override_configs[config_property.ADVERTISED_HOSTNAME] = node.account.hostname
override_configs[config_property.ZOOKEEPER_CONNECT] = self.zk_connect_setting()
if self.zk_client_secure:
override_configs[config_property.ZOOKEEPER_SSL_CLIENT_ENABLE] = 'true'
override_configs[config_property.ZOOKEEPER_CLIENT_CNXN_SOCKET] = 'org.apache.zookeeper.ClientCnxnSocketNetty'
else:
override_configs[config_property.ZOOKEEPER_SSL_CLIENT_ENABLE] = 'false'
for prop in self.server_prop_overides:
override_configs[prop[0]] = prop[1]
for prop in self.per_node_server_prop_overrides.get(self.idx(node), []):
override_configs[prop[0]] = prop[1]
configs.update(override_configs)
prop_file = self.render_configs(configs)
return prop_file
def render_configs(self, configs):
keys = [k for k in configs.keys()]
keys.sort()
s = ""
for k in keys:
s += "%s=%s\n" % (k, str(configs[k]))
return s
def start_cmd(self, node):
cmd = "export JMX_PORT=%d; " % self.jmx_port
cmd += "export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % self.LOG4J_CONFIG
heap_kafka_opts = "-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=%s" % \
self.logs["kafka_heap_dump_file"]["path"]
security_kafka_opts = self.security_config.kafka_opts.strip('\"')
cmd += "export KAFKA_OPTS=\"%s %s %s\"; " % (heap_kafka_opts, security_kafka_opts, self.extra_kafka_opts)
cmd += "%s %s 1>> %s 2>> %s &" % \
(self.path.script("kafka-server-start.sh", node),
KafkaService.CONFIG_FILE,
KafkaService.STDOUT_STDERR_CAPTURE,
KafkaService.STDOUT_STDERR_CAPTURE)
return cmd
def start_node(self, node, timeout_sec=60):
node.account.mkdirs(KafkaService.PERSISTENT_ROOT)
prop_file = self.prop_file(node)
self.logger.info("kafka.properties:")
self.logger.info(prop_file)
node.account.create_file(KafkaService.CONFIG_FILE, prop_file)
node.account.create_file(self.LOG4J_CONFIG, self.render('log4j.properties', log_dir=KafkaService.OPERATIONAL_LOG_DIR))
self.security_config.setup_node(node)
self.security_config.setup_credentials(node, self.path, self.zk_connect_setting(), broker=True)
cmd = self.start_cmd(node)
self.logger.debug("Attempting to start KafkaService on %s with command: %s" % (str(node.account), cmd))
with node.account.monitor_log(KafkaService.STDOUT_STDERR_CAPTURE) as monitor:
node.account.ssh(cmd)
# Kafka 1.0.0 and higher don't have a space between "Kafka" and "Server"
monitor.wait_until("Kafka\s*Server.*started", timeout_sec=timeout_sec, backoff_sec=.25,
err_msg="Kafka server didn't finish startup in %d seconds" % timeout_sec)
# Credentials for inter-broker communication are created before starting Kafka.
# Client credentials are created after starting Kafka so that both loading of
# existing credentials from ZK and dynamic update of credentials in Kafka are tested.
self.security_config.setup_credentials(node, self.path, self.zk_connect_setting(), broker=False)
self.start_jmx_tool(self.idx(node), node)
if len(self.pids(node)) == 0:
raise Exception("No process ids recorded on node %s" % node.account.hostname)
def pids(self, node):
try:
cmd = "jcmd | grep -e %s | awk '{print $1}'" % self.java_class_name()
pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)]
return pid_arr
except (RemoteCommandError, ValueError) as e:
return []
def signal_node(self, node, sig=signal.SIGTERM):
pids = self.pids(node)
for pid in pids:
node.account.signal(pid, sig)
def signal_leader(self, topic, partition=0, sig=signal.SIGTERM):
leader = self.leader(topic, partition)
self.signal_node(leader, sig)
def stop_node(self, node, clean_shutdown=True, timeout_sec=60):
pids = self.pids(node)
sig = signal.SIGTERM if clean_shutdown else signal.SIGKILL
for pid in pids:
node.account.signal(pid, sig, allow_fail=False)
try:
wait_until(lambda: len(self.pids(node)) == 0, timeout_sec=timeout_sec,
err_msg="Kafka node failed to stop in %d seconds" % timeout_sec)
except Exception:
self.thread_dump(node)
raise
def thread_dump(self, node):
for pid in self.pids(node):
try:
node.account.signal(pid, signal.SIGQUIT, allow_fail=True)
except:
self.logger.warn("Could not dump threads on node")
def clean_node(self, node):
JmxMixin.clean_node(self, node)
self.security_config.clean_node(node)
node.account.kill_java_processes(self.java_class_name(),
clean_shutdown=False, allow_fail=True)
node.account.ssh("sudo rm -rf -- %s" % KafkaService.PERSISTENT_ROOT, allow_fail=False)
def _kafka_topics_cmd(self, node, use_zk_connection=True):
kafka_topic_script = self.path.script("kafka-topics.sh", node)
skip_security_settings = use_zk_connection or not node.version.topic_command_supports_bootstrap_server()
return kafka_topic_script if skip_security_settings else \
"KAFKA_OPTS='-D%s -D%s' %s" % (KafkaService.JAAS_CONF_PROPERTY, KafkaService.KRB5_CONF, kafka_topic_script)
def _kafka_topics_cmd_config(self, node, use_zk_connection=True):
skip_command_config = use_zk_connection or not node.version.topic_command_supports_bootstrap_server()
return "" if skip_command_config else " --command-config <(echo '%s')" % (self.security_config.client_config())
def create_topic(self, topic_cfg, node=None, use_zk_to_create_topic=True):
if node is None:
node = self.nodes[0]
self.logger.info("Creating topic %s with settings %s",
topic_cfg["topic"], topic_cfg)
use_zk_connection = topic_cfg.get('if-not-exists', False) or use_zk_to_create_topic
cmd = "%(kafka_topics_cmd)s %(connection_string)s --create --topic %(topic)s " % {
'kafka_topics_cmd': self._kafka_topics_cmd(node, use_zk_connection),
'connection_string': self._connect_setting(node, use_zk_connection),
'topic': topic_cfg.get("topic"),
}
if 'replica-assignment' in topic_cfg:
cmd += " --replica-assignment %(replica-assignment)s" % {
'replica-assignment': topic_cfg.get('replica-assignment')
}
else:
cmd += " --partitions %(partitions)d --replication-factor %(replication-factor)d" % {
'partitions': topic_cfg.get('partitions', 1),
'replication-factor': topic_cfg.get('replication-factor', 1)
}
if topic_cfg.get('if-not-exists', False):
cmd += ' --if-not-exists'
if "configs" in topic_cfg.keys() and topic_cfg["configs"] is not None:
for config_name, config_value in topic_cfg["configs"].items():
cmd += " --config %s=%s" % (config_name, str(config_value))
cmd += self._kafka_topics_cmd_config(node, use_zk_connection)
self.logger.info("Running topic creation command...\n%s" % cmd)
node.account.ssh(cmd)
def delete_topic(self, topic, node=None):
if node is None:
node = self.nodes[0]
self.logger.info("Deleting topic %s" % topic)
kafka_topic_script = self.path.script("kafka-topics.sh", node)
cmd = kafka_topic_script + " "
cmd += "--bootstrap-server %(bootstrap_servers)s --delete --topic %(topic)s " % {
'bootstrap_servers': self.bootstrap_servers(self.security_protocol),
'topic': topic
}
self.logger.info("Running topic delete command...\n%s" % cmd)
node.account.ssh(cmd)
def describe_topic(self, topic, node=None, use_zk_to_describe_topic=True):
if node is None:
node = self.nodes[0]
cmd = "%s %s --topic %s --describe %s" % \
(self._kafka_topics_cmd(node=node, use_zk_connection=use_zk_to_describe_topic),
self._connect_setting(node=node, use_zk_connection=use_zk_to_describe_topic),
topic, self._kafka_topics_cmd_config(node=node, use_zk_connection=use_zk_to_describe_topic))
self.logger.info("Running topic describe command...\n%s" % cmd)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
return output
def list_topics(self, node=None, use_zk_to_list_topic=True):
if node is None:
node = self.nodes[0]
cmd = "%s %s --list %s" % (self._kafka_topics_cmd(node, use_zk_to_list_topic),
self._connect_setting(node, use_zk_to_list_topic),
self._kafka_topics_cmd_config(node, use_zk_to_list_topic))
for line in node.account.ssh_capture(cmd):
if not line.startswith("SLF4J"):
yield line.rstrip()
def alter_message_format(self, topic, msg_format_version, node=None):
if node is None:
node = self.nodes[0]
self.logger.info("Altering message format version for topic %s with format %s", topic, msg_format_version)
cmd = "%s --zookeeper %s %s --entity-name %s --entity-type topics --alter --add-config message.format.version=%s" % \
(self.path.script("kafka-configs.sh", node), self.zk_connect_setting(), self.zk.zkTlsConfigFileOption(), topic, msg_format_version)
self.logger.info("Running alter message format command...\n%s" % cmd)
node.account.ssh(cmd)
def set_unclean_leader_election(self, topic, value=True, node=None):
if node is None:
node = self.nodes[0]
if value is True:
self.logger.info("Enabling unclean leader election for topic %s", topic)
else:
self.logger.info("Disabling unclean leader election for topic %s", topic)
cmd = "%s --zookeeper %s %s --entity-name %s --entity-type topics --alter --add-config unclean.leader.election.enable=%s" % \
(self.path.script("kafka-configs.sh", node), self.zk_connect_setting(), self.zk.zkTlsConfigFileOption(), topic, str(value).lower())
self.logger.info("Running alter unclean leader command...\n%s" % cmd)
node.account.ssh(cmd)
def parse_describe_topic(self, topic_description):
lines = map(lambda x: x.strip(), topic_description.split("\n"))
partitions = []
for line in lines:
m = re.match(".*Leader:.*", line)
if m is None:
continue
fields = line.split("\t")
# ["Partition: 4", "Leader: 0"] -> ["4", "0"]
fields = map(lambda x: x.split(" ")[1], fields)
partitions.append(
{"topic": fields[0],
"partition": int(fields[1]),
"replicas": map(int, fields[3].split(','))})
return {"partitions": partitions}
def verify_reassign_partitions(self, reassignment, node=None):
if node is None:
node = self.nodes[0]
json_file = "/tmp/%s_reassign.json" % str(time.time())
# reassignment to json
json_str = json.dumps(reassignment)
json_str = json.dumps(json_str)
# create command
cmd = "echo %s > %s && " % (json_str, json_file)
cmd += "%s " % self.path.script("kafka-reassign-partitions.sh", node)
cmd += "--zookeeper %s " % self.zk_connect_setting()
cmd += "--reassignment-json-file %s " % json_file
cmd += "--verify "
cmd += "&& sleep 1 && rm -f %s" % json_file
# send command
self.logger.info("Verifying partition reassignment...")
self.logger.debug(cmd)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
self.logger.debug(output)
if re.match(".*Reassignment of partition.*failed.*",
output.replace('\n', '')) is not None:
return False
if re.match(".*is still in progress.*",
output.replace('\n', '')) is not None:
return False
return True
def execute_reassign_partitions(self, reassignment, node=None,
throttle=None):
if node is None:
node = self.nodes[0]
json_file = "/tmp/%s_reassign.json" % str(time.time())
# reassignment to json
json_str = json.dumps(reassignment)
json_str = json.dumps(json_str)
# create command
cmd = "echo %s > %s && " % (json_str, json_file)
cmd += "%s " % self.path.script( "kafka-reassign-partitions.sh", node)
cmd += "--zookeeper %s " % self.zk_connect_setting()
cmd += "--reassignment-json-file %s " % json_file
cmd += "--execute"
if throttle is not None:
cmd += " --throttle %d" % throttle
cmd += " && sleep 1 && rm -f %s" % json_file
# send command
self.logger.info("Executing parition reassignment...")
self.logger.debug(cmd)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
self.logger.debug("Verify partition reassignment:")
self.logger.debug(output)
def search_data_files(self, topic, messages):
payload_match = "payload: " + "$|payload: ".join(str(x) for x in messages) + "$"
found = set([])
self.logger.debug("number of unique missing messages we will search for: %d",
len(messages))
for node in self.nodes:
# Grab all .log files in directories prefixed with this topic
files = node.account.ssh_capture("find %s* -regex '.*/%s-.*/[^/]*.log'" % (KafkaService.DATA_LOG_DIR_PREFIX, topic))
# Check each data file to see if it contains the messages we want
for log in files:
cmd = "%s kafka.tools.DumpLogSegments --print-data-log --files %s | grep -E \"%s\"" % \
(self.path.script("kafka-run-class.sh", node), log.strip(), payload_match)
for line in node.account.ssh_capture(cmd, allow_fail=True):
for val in messages:
if line.strip().endswith("payload: "+str(val)):
self.logger.debug("Found %s in data-file [%s] in line: [%s]" % (val, log.strip(), line.strip()))
found.add(val)
self.logger.debug("Number of unique messages found in the log: %d",
len(found))
missing = list(set(messages) - found)
if len(missing) > 0:
self.logger.warn("The following values were not found in the data files: " + str(missing))
return missing
def restart_cluster(self, clean_shutdown=True, timeout_sec=60, after_each_broker_restart=None, *args):
for node in self.nodes:
self.restart_node(node, clean_shutdown=clean_shutdown, timeout_sec=timeout_sec)
if after_each_broker_restart is not None:
after_each_broker_restart(*args)
def restart_node(self, node, clean_shutdown=True, timeout_sec=60):
self.stop_node(node, clean_shutdown, timeout_sec)
self.start_node(node, timeout_sec)
def isr_idx_list(self, topic, partition=0):
self.logger.debug("Querying zookeeper to find in-sync replicas for topic %s and partition %d" % (topic, partition))
zk_path = "/brokers/topics/%s/partitions/%d/state" % (topic, partition)
partition_state = self.zk.query(zk_path, chroot=self.zk_chroot)
if partition_state is None:
raise Exception("Error finding partition state for topic %s and partition %d." % (topic, partition))
partition_state = json.loads(partition_state)
self.logger.info(partition_state)
isr_idx_list = partition_state["isr"]
self.logger.info("Isr for topic %s and partition %d is now: %s" % (topic, partition, isr_idx_list))
return isr_idx_list
def replicas(self, topic, partition=0):
self.logger.debug("Querying zookeeper to find assigned replicas for topic %s and partition %d" % (topic, partition))
zk_path = "/brokers/topics/%s" % (topic)
assignment = self.zk.query(zk_path, chroot=self.zk_chroot)
if assignment is None:
raise Exception("Error finding partition state for topic %s and partition %d." % (topic, partition))
assignment = json.loads(assignment)
self.logger.info(assignment)
replicas = assignment["partitions"][str(partition)]
self.logger.info("Assigned replicas for topic %s and partition %d is now: %s" % (topic, partition, replicas))
return [self.get_node(replica) for replica in replicas]
def leader(self, topic, partition=0):
self.logger.debug("Querying zookeeper to find leader replica for topic %s and partition %d" % (topic, partition))
zk_path = "/brokers/topics/%s/partitions/%d/state" % (topic, partition)
partition_state = self.zk.query(zk_path, chroot=self.zk_chroot)
if partition_state is None:
raise Exception("Error finding partition state for topic %s and partition %d." % (topic, partition))
partition_state = json.loads(partition_state)
self.logger.info(partition_state)
leader_idx = int(partition_state["leader"])
self.logger.info("Leader for topic %s and partition %d is now: %d" % (topic, partition, leader_idx))
return self.get_node(leader_idx)
def cluster_id(self):
self.logger.debug("Querying ZooKeeper to retrieve cluster id")
cluster = self.zk.query("/cluster/id", chroot=self.zk_chroot)
try:
return json.loads(cluster)['id'] if cluster else None
except:
self.logger.debug("Data in /cluster/id znode could not be parsed. Data = %s" % cluster)
raise
def check_protocol_errors(self, node):
for node in self.nodes:
exit_code = node.account.ssh("grep -e 'java.lang.IllegalArgumentException: Invalid version' -e SchemaException %s/*"
% KafkaService.OPERATIONAL_LOG_DEBUG_DIR, allow_fail=True)
if exit_code != 1:
return False
return True
def list_consumer_groups(self, node=None, command_config=None):
if node is None:
node = self.nodes[0]
consumer_group_script = self.path.script("kafka-consumer-groups.sh", node)
if command_config is None:
command_config = ""
else:
command_config = "--command-config " + command_config
cmd = "%s --bootstrap-server %s %s --list" % \
(consumer_group_script,
self.bootstrap_servers(self.security_protocol),
command_config)
output = ""
self.logger.debug(cmd)
for line in node.account.ssh_capture(cmd):
if not line.startswith("SLF4J"):
output += line
self.logger.debug(output)
return output
def describe_consumer_group(self, group, node=None, command_config=None):
if node is None:
node = self.nodes[0]
consumer_group_script = self.path.script("kafka-consumer-groups.sh", node)
if command_config is None:
command_config = ""
else:
command_config = "--command-config " + command_config
cmd = "%s --bootstrap-server %s %s --group %s --describe" % \
(consumer_group_script,
self.bootstrap_servers(self.security_protocol),
command_config, group)
output = ""
self.logger.debug(cmd)
for line in node.account.ssh_capture(cmd):
if not (line.startswith("SLF4J") or line.startswith("TOPIC") or line.startswith("Could not fetch offset")):
output += line
self.logger.debug(output)
return output
def zk_connect_setting(self):
return self.zk.connect_setting(self.zk_chroot, self.zk_client_secure)
def _connect_setting(self, node, use_zk_connection=True):
if node.version.topic_command_supports_bootstrap_server() and not use_zk_connection:
connection_setting = "--bootstrap-server %s" % (self.bootstrap_servers(self.security_protocol))
else:
connection_setting = "--zookeeper %s" % (self.zk_connect_setting())
return connection_setting
def __bootstrap_servers(self, port, validate=True, offline_nodes=[]):
if validate and not port.open:
raise ValueError("We are retrieving bootstrap servers for the port: %s which is not currently open. - " %
str(port.port_number))
return ','.join([node.account.hostname + ":" + str(port.port_number)
for node in self.nodes
if node not in offline_nodes])
def bootstrap_servers(self, protocol='PLAINTEXT', validate=True, offline_nodes=[]):
port_mapping = self.port_mappings[protocol]
self.logger.info("Bootstrap client port is: " + str(port_mapping.port_number))
return self.__bootstrap_servers(port_mapping, validate, offline_nodes)
def controller(self):
self.logger.debug("Querying zookeeper to find controller broker")
controller_info = self.zk.query("/controller", chroot=self.zk_chroot)
if controller_info is None:
raise Exception("Error finding controller info")
controller_info = json.loads(controller_info)
self.logger.debug(controller_info)
controller_idx = int(controller_info["brokerid"])
self.logger.info("Controller's ID: %d" % (controller_idx))
return self.get_node(controller_idx)
def is_registered(self, node):
self.logger.debug("Querying zookeeper to see if broker %s is registered", str(node))
broker_info = self.zk.query("/brokers/ids/%s" % self.idx(node), chroot=self.zk_chroot)
self.logger.debug("Broker info: %s", broker_info)
return broker_info is not None
def get_offset_shell(self, topic, partitions, max_wait_ms, offsets, time):
node = self.nodes[0]
cmd = self.path.script("kafka-run-class.sh", node)
cmd += " kafka.tools.GetOffsetShell"
cmd += " --topic %s --broker-list %s --max-wait-ms %s --offsets %s --time %s" % (topic, self.bootstrap_servers(self.security_protocol), max_wait_ms, offsets, time)
if partitions:
cmd += ' --partitions %s' % partitions
cmd += " 2>> %s/get_offset_shell.log" % KafkaService.PERSISTENT_ROOT
cmd += " | tee -a %s/get_offset_shell.log &" % KafkaService.PERSISTENT_ROOT
output = ""
self.logger.debug(cmd)
for line in node.account.ssh_capture(cmd):
output += line
self.logger.debug(output)
return output
def java_class_name(self):
return "kafka.Kafka"
| true | true |
1c3d167d460705b821782cf48d496c4d9b5c8928 | 1,575 | py | Python | linalg_0424.py | Jianyang-Hu/numpypractice | f4d4a3e28f5dd10f9722f83b1ac66f0f2ccef8b9 | [
"Apache-2.0"
] | null | null | null | linalg_0424.py | Jianyang-Hu/numpypractice | f4d4a3e28f5dd10f9722f83b1ac66f0f2ccef8b9 | [
"Apache-2.0"
] | null | null | null | linalg_0424.py | Jianyang-Hu/numpypractice | f4d4a3e28f5dd10f9722f83b1ac66f0f2ccef8b9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @version : Python3.6
# @Time : 2017/4/24 16:31
# @Author : Jianyang-Hu
# @contact : jianyang1993@163.com
# @File : linalg_0424.py
# @Software: PyCharm
import numpy as np
from numpy import *
import sys
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot
from matplotlib.pyplot import show
# #逆矩阵
# # A = np.mat("1,2,3;4,5,6;7,8,9")
# A = np.mat("1 -2 1;0 2 -8;-4 5 9")
# # inverse = np.linalg.inv(A)
# # print("inverse of A\n",inverse)
#
# #对形如Ax=b的线性方程组求x
# b = np.array([0,-8,9])
# x = np.linalg.solve(A,b)
# print("Solution",x)
# #dot检验
# print("Check b:\n",np.dot(A,x))
#
# #特征值 eig
# print("Eigenvalues:",np.linalg.eigvals(A))
# #奇异值分解 M=U V
# A = np.mat("4 11 14;8,7,-2")
# U,Sigma,V = np.linalg.svd(A,full_matrices=False)
# print("U:\n",U)
# print("Sigma:\n",Sigma)
# print("V:\n",V)
# print("use diag :",U*np.diag(Sigma)*V)#奇异值矩阵
#
# #计算行列式
# B = np.mat("4 11 14;8,7,-2;5,12,3")
# print("Detetminant:",np.linalg.det(B))
#超几何分布
# points = np.zeros(100)
# outcomes = np.random.hypergeometric(25,1,3,size=len(points))
#
# for i in range(len(points)):
# if outcomes[i] == 3:
# points[i] = points[i - 1] + 1
# elif outcomes[i] == 2:
# points[i] = points[i - 1]- 6
# else:
# print(outcomes[i])
#
# plot(np.arange(len(points)),points)
# show()
#连续分布
N = 100000
normal_values = np.random.normal(size=N)
dummy,bins,dummy = plt.hist(normal_values,np.sqrt(N),normed=True,lw=1)
sigma = 1
mu = 0
plt.plot(bins,1/(sigma*np.sqrt(2*np.pi))*np.exp(-(bins - mu)**2/(2*sigma**2)),lw=2)
plt.show() | 23.507463 | 83 | 0.610159 |
import numpy as np
from numpy import *
import sys
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot
from matplotlib.pyplot import show
ormed=True,lw=1)
sigma = 1
mu = 0
plt.plot(bins,1/(sigma*np.sqrt(2*np.pi))*np.exp(-(bins - mu)**2/(2*sigma**2)),lw=2)
plt.show() | true | true |
1c3d16a76838293bfc8c7be2a4f8b3a9445ffebf | 19,563 | py | Python | seahub/api2/endpoints/dtable.py | JIMhackKING/seahub-1 | c10ef67f2118287e6c0d4d194ef27f93ddff0164 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | seahub/api2/endpoints/dtable.py | JIMhackKING/seahub-1 | c10ef67f2118287e6c0d4d194ef27f93ddff0164 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | seahub/api2/endpoints/dtable.py | JIMhackKING/seahub-1 | c10ef67f2118287e6c0d4d194ef27f93ddff0164 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import logging
import time
import jwt
from rest_framework.views import APIView
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework import status
from rest_framework.response import Response
from django.utils.translation import ugettext as _
from pysearpc import SearpcError
from seaserv import seafile_api, ccnet_api
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.utils import api_error
from seahub.dtable.models import Workspaces, DTables
from seahub.base.templatetags.seahub_tags import email2nickname
from seahub.group.utils import group_id_to_name
from seahub.utils import is_valid_dirent_name, is_org_context, normalize_file_path, \
check_filename_with_rename, gen_file_upload_url
from seahub.settings import MAX_UPLOAD_FILE_NAME_LEN, DTABLE_PRIVATE_KEY
from seahub.dtable.utils import check_dtable_permission
from seahub.constants import PERMISSION_ADMIN, PERMISSION_READ_WRITE
logger = logging.getLogger(__name__)
FILE_TYPE = '.dtable'
WRITE_PERMISSION_TUPLE = (PERMISSION_READ_WRITE, PERMISSION_ADMIN)
class WorkspacesView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request):
"""get all workspaces
"""
username = request.user.username
org_id = -1
if is_org_context(request):
org_id = request.user.org.org_id
if org_id and org_id > 0:
groups = ccnet_api.get_org_groups_by_user(org_id, username)
else:
groups = ccnet_api.get_groups(username, return_ancestors=True)
owner_list = list()
owner_list.append(username)
for group in groups:
group_user = '%s@seafile_group' % group.id
owner_list.append(group_user)
workspace_list = list()
for owner in owner_list:
try:
workspace = Workspaces.objects.get_workspace_by_owner(owner)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error.'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if not workspace:
if '@seafile_group' in owner:
continue
# permission check
if not request.user.permissions.can_add_repo():
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
if org_id and org_id > 0:
repo_id = seafile_api.create_org_repo(
_("My Workspace"),
_("My Workspace"),
"dtable@seafile",
org_id
)
else:
repo_id = seafile_api.create_repo(
_("My Workspace"),
_("My Workspace"),
"dtable@seafile"
)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error.'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
workspace = Workspaces.objects.create_workspace(owner, repo_id)
# resource check
repo_id = workspace.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
logger.warning('Library %s not found.' % repo_id)
continue
res = workspace.to_dict()
table_list = DTables.objects.get_dtable_by_workspace(workspace)
res["table_list"] = table_list
if '@seafile_group' in owner:
group_id = owner.split('@')[0]
res["owner_name"] = group_id_to_name(group_id)
res["owner_type"] = "Group"
else:
res["owner_name"] = email2nickname(owner)
res["owner_type"] = "Personal"
workspace_list.append(res)
return Response({"workspace_list": workspace_list}, status=status.HTTP_200_OK)
class DTablesView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def post(self, request):
"""create a table file
Permission:
1. owner
2. group member
"""
# argument check
table_owner = request.POST.get('owner')
if not table_owner:
error_msg = 'owner invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
table_name = request.POST.get('name')
if not table_name:
error_msg = 'name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
table_file_name = table_name + FILE_TYPE
if not is_valid_dirent_name(table_file_name):
error_msg = 'name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
# resource check
workspace = Workspaces.objects.get_workspace_by_owner(table_owner)
if not workspace:
if not request.user.permissions.can_add_repo():
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
org_id = -1
if is_org_context(request):
org_id = request.user.org.org_id
try:
if org_id and org_id > 0:
repo_id = seafile_api.create_org_repo(
_("My Workspace"),
_("My Workspace"),
"dtable@seafile",
org_id
)
else:
repo_id = seafile_api.create_repo(
_("My Workspace"),
_("My Workspace"),
"dtable@seafile"
)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error.'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
try:
workspace = Workspaces.objects.create_workspace(table_owner, repo_id)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error.'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
repo_id = workspace.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
username = request.user.username
if not check_dtable_permission(username, workspace):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# repo status check
repo_status = repo.status
if repo_status != 0:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# create new empty table
table_file_name = check_filename_with_rename(repo_id, '/', table_file_name)
try:
seafile_api.post_empty_file(repo_id, '/', table_file_name, username)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
try:
dtable = DTables.objects.create_dtable(username, workspace, table_name)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({"table": dtable.to_dict()}, status=status.HTTP_201_CREATED)
class DTableView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def put(self, request, workspace_id):
"""rename a table
Permission:
1. owner
2. group member
"""
# argument check
old_table_name = request.data.get('old_name')
if not old_table_name:
error_msg = 'old_name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
new_table_name = request.data.get('new_name')
if not new_table_name:
error_msg = 'new_name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
new_table_file_name = new_table_name + FILE_TYPE
if not is_valid_dirent_name(new_table_file_name):
error_msg = 'new_name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if len(new_table_file_name) > MAX_UPLOAD_FILE_NAME_LEN:
error_msg = 'new_name is too long.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
# resource check
workspace = Workspaces.objects.get_workspace_by_id(workspace_id)
if not workspace:
error_msg = 'Workspace %s not found.' % workspace_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
repo_id = workspace.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
dtable = DTables.objects.get_dtable(workspace, old_table_name)
if not dtable:
error_msg = 'dtable %s not found.' % old_table_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
old_table_file_name = old_table_name + FILE_TYPE
old_table_path = normalize_file_path(old_table_file_name)
table_file_id = seafile_api.get_file_id_by_path(repo_id, old_table_path)
if not table_file_id:
error_msg = 'file %s not found.' % old_table_file_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
username = request.user.username
if not check_dtable_permission(username, workspace):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# repo status check
repo_status = repo.status
if repo_status != 0:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# rename table
new_table_file_name = check_filename_with_rename(repo_id, '/', new_table_file_name)
try:
seafile_api.rename_file(repo_id, '/', old_table_file_name, new_table_file_name, username)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
try:
dtable.name = new_table_name
dtable.modifier = username
dtable.save()
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error.'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({"table": dtable.to_dict()}, status=status.HTTP_200_OK)
def delete(self, request, workspace_id):
"""delete a table
Permission:
1. owner
2. group member
"""
# argument check
table_name = request.data.get('name')
if not table_name:
error_msg = 'name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
table_file_name = table_name + FILE_TYPE
# resource check
workspace = Workspaces.objects.get_workspace_by_id(workspace_id)
if not workspace:
error_msg = 'Workspace %s not found.' % workspace_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
repo_id = workspace.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
dtable = DTables.objects.get_dtable(workspace, table_name)
if not dtable:
error_msg = 'dtable %s not found.' % table_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
table_path = normalize_file_path(table_file_name)
table_file_id = seafile_api.get_file_id_by_path(repo_id, table_path)
if not table_file_id:
error_msg = 'file %s not found.' % table_file_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
username = request.user.username
if not check_dtable_permission(username, workspace):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# repo status check
repo_status = repo.status
if repo_status != 0:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# delete asset
asset_dir_path = '/asset/' + str(dtable.uuid)
asset_dir_id = seafile_api.get_dir_id_by_path(repo_id, asset_dir_path)
if asset_dir_id:
parent_dir = os.path.dirname(asset_dir_path)
file_name = os.path.basename(asset_dir_path)
try:
seafile_api.del_file(repo_id, parent_dir, file_name, username)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
# delete table
try:
seafile_api.del_file(repo_id, '/', table_file_name, username)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
try:
DTables.objects.delete_dtable(workspace, table_name)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error.'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({'success': True}, status=status.HTTP_200_OK)
class DTableAssetUploadLinkView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def get(self, request, workspace_id):
"""get table file upload link
Permission:
1. owner
2. group member
3. shared user with `rw` or `admin` permission
"""
# argument check
table_name = request.GET.get('name', None)
if not table_name:
error_msg = 'name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
# resource check
workspace = Workspaces.objects.get_workspace_by_id(workspace_id)
if not workspace:
error_msg = 'Workspace %s not found.' % workspace_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
repo_id = workspace.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
dtable = DTables.objects.get_dtable(workspace, table_name)
if not dtable:
error_msg = 'dtable %s not found.' % table_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
username = request.user.username
if check_dtable_permission(username, workspace, dtable) not in WRITE_PERMISSION_TUPLE:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
token = seafile_api.get_fileserver_access_token(repo_id, 'dummy', 'upload',
'', use_onetime=False)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
upload_link = gen_file_upload_url(token, 'upload-api')
# create asset dir
asset_dir_path = '/asset/' + str(dtable.uuid)
asset_dir_id = seafile_api.get_dir_id_by_path(repo_id, asset_dir_path)
if not asset_dir_id:
seafile_api.mkdir_with_parents(repo_id, '/', asset_dir_path[1:], username)
dtable.modifier = username
dtable.save()
res = dict()
res['upload_link'] = upload_link
res['parent_path'] = asset_dir_path
return Response(res)
class DTableAccessTokenView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def get(self, request, workspace_id, name):
"""get dtable access token
"""
table_name = name
table_file_name = table_name + FILE_TYPE
# resource check
workspace = Workspaces.objects.get_workspace_by_id(workspace_id)
if not workspace:
error_msg = 'Workspace %s not found.' % workspace_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
repo_id = workspace.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
dtable = DTables.objects.get_dtable(workspace, table_name)
if not dtable:
error_msg = 'dtable %s not found.' % table_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
table_path = normalize_file_path(table_file_name)
table_file_id = seafile_api.get_file_id_by_path(repo_id, table_path)
if not table_file_id:
error_msg = 'file %s not found.' % table_file_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
username = request.user.username
if not check_dtable_permission(username, workspace, dtable):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# generate json web token
payload = {
'exp': int(time.time()) + 86400 * 3,
'dtable_uuid': dtable.uuid.hex,
'username': username,
}
try:
access_token = jwt.encode(
payload, DTABLE_PRIVATE_KEY, algorithm='HS256'
)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({'access_token': access_token})
| 37.051136 | 101 | 0.623882 |
import os
import logging
import time
import jwt
from rest_framework.views import APIView
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework import status
from rest_framework.response import Response
from django.utils.translation import ugettext as _
from pysearpc import SearpcError
from seaserv import seafile_api, ccnet_api
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.utils import api_error
from seahub.dtable.models import Workspaces, DTables
from seahub.base.templatetags.seahub_tags import email2nickname
from seahub.group.utils import group_id_to_name
from seahub.utils import is_valid_dirent_name, is_org_context, normalize_file_path, \
check_filename_with_rename, gen_file_upload_url
from seahub.settings import MAX_UPLOAD_FILE_NAME_LEN, DTABLE_PRIVATE_KEY
from seahub.dtable.utils import check_dtable_permission
from seahub.constants import PERMISSION_ADMIN, PERMISSION_READ_WRITE
logger = logging.getLogger(__name__)
FILE_TYPE = '.dtable'
WRITE_PERMISSION_TUPLE = (PERMISSION_READ_WRITE, PERMISSION_ADMIN)
class WorkspacesView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request):
username = request.user.username
org_id = -1
if is_org_context(request):
org_id = request.user.org.org_id
if org_id and org_id > 0:
groups = ccnet_api.get_org_groups_by_user(org_id, username)
else:
groups = ccnet_api.get_groups(username, return_ancestors=True)
owner_list = list()
owner_list.append(username)
for group in groups:
group_user = '%s@seafile_group' % group.id
owner_list.append(group_user)
workspace_list = list()
for owner in owner_list:
try:
workspace = Workspaces.objects.get_workspace_by_owner(owner)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error.'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if not workspace:
if '@seafile_group' in owner:
continue
if not request.user.permissions.can_add_repo():
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
if org_id and org_id > 0:
repo_id = seafile_api.create_org_repo(
_("My Workspace"),
_("My Workspace"),
"dtable@seafile",
org_id
)
else:
repo_id = seafile_api.create_repo(
_("My Workspace"),
_("My Workspace"),
"dtable@seafile"
)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error.'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
workspace = Workspaces.objects.create_workspace(owner, repo_id)
repo_id = workspace.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
logger.warning('Library %s not found.' % repo_id)
continue
res = workspace.to_dict()
table_list = DTables.objects.get_dtable_by_workspace(workspace)
res["table_list"] = table_list
if '@seafile_group' in owner:
group_id = owner.split('@')[0]
res["owner_name"] = group_id_to_name(group_id)
res["owner_type"] = "Group"
else:
res["owner_name"] = email2nickname(owner)
res["owner_type"] = "Personal"
workspace_list.append(res)
return Response({"workspace_list": workspace_list}, status=status.HTTP_200_OK)
class DTablesView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def post(self, request):
table_owner = request.POST.get('owner')
if not table_owner:
error_msg = 'owner invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
table_name = request.POST.get('name')
if not table_name:
error_msg = 'name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
table_file_name = table_name + FILE_TYPE
if not is_valid_dirent_name(table_file_name):
error_msg = 'name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
workspace = Workspaces.objects.get_workspace_by_owner(table_owner)
if not workspace:
if not request.user.permissions.can_add_repo():
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
org_id = -1
if is_org_context(request):
org_id = request.user.org.org_id
try:
if org_id and org_id > 0:
repo_id = seafile_api.create_org_repo(
_("My Workspace"),
_("My Workspace"),
"dtable@seafile",
org_id
)
else:
repo_id = seafile_api.create_repo(
_("My Workspace"),
_("My Workspace"),
"dtable@seafile"
)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error.'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
try:
workspace = Workspaces.objects.create_workspace(table_owner, repo_id)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error.'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
repo_id = workspace.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
username = request.user.username
if not check_dtable_permission(username, workspace):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
repo_status = repo.status
if repo_status != 0:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
table_file_name = check_filename_with_rename(repo_id, '/', table_file_name)
try:
seafile_api.post_empty_file(repo_id, '/', table_file_name, username)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
try:
dtable = DTables.objects.create_dtable(username, workspace, table_name)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({"table": dtable.to_dict()}, status=status.HTTP_201_CREATED)
class DTableView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def put(self, request, workspace_id):
old_table_name = request.data.get('old_name')
if not old_table_name:
error_msg = 'old_name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
new_table_name = request.data.get('new_name')
if not new_table_name:
error_msg = 'new_name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
new_table_file_name = new_table_name + FILE_TYPE
if not is_valid_dirent_name(new_table_file_name):
error_msg = 'new_name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if len(new_table_file_name) > MAX_UPLOAD_FILE_NAME_LEN:
error_msg = 'new_name is too long.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
workspace = Workspaces.objects.get_workspace_by_id(workspace_id)
if not workspace:
error_msg = 'Workspace %s not found.' % workspace_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
repo_id = workspace.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
dtable = DTables.objects.get_dtable(workspace, old_table_name)
if not dtable:
error_msg = 'dtable %s not found.' % old_table_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
old_table_file_name = old_table_name + FILE_TYPE
old_table_path = normalize_file_path(old_table_file_name)
table_file_id = seafile_api.get_file_id_by_path(repo_id, old_table_path)
if not table_file_id:
error_msg = 'file %s not found.' % old_table_file_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
username = request.user.username
if not check_dtable_permission(username, workspace):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
repo_status = repo.status
if repo_status != 0:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
new_table_file_name = check_filename_with_rename(repo_id, '/', new_table_file_name)
try:
seafile_api.rename_file(repo_id, '/', old_table_file_name, new_table_file_name, username)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
try:
dtable.name = new_table_name
dtable.modifier = username
dtable.save()
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error.'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({"table": dtable.to_dict()}, status=status.HTTP_200_OK)
def delete(self, request, workspace_id):
table_name = request.data.get('name')
if not table_name:
error_msg = 'name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
table_file_name = table_name + FILE_TYPE
workspace = Workspaces.objects.get_workspace_by_id(workspace_id)
if not workspace:
error_msg = 'Workspace %s not found.' % workspace_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
repo_id = workspace.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
dtable = DTables.objects.get_dtable(workspace, table_name)
if not dtable:
error_msg = 'dtable %s not found.' % table_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
table_path = normalize_file_path(table_file_name)
table_file_id = seafile_api.get_file_id_by_path(repo_id, table_path)
if not table_file_id:
error_msg = 'file %s not found.' % table_file_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
username = request.user.username
if not check_dtable_permission(username, workspace):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
repo_status = repo.status
if repo_status != 0:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
asset_dir_path = '/asset/' + str(dtable.uuid)
asset_dir_id = seafile_api.get_dir_id_by_path(repo_id, asset_dir_path)
if asset_dir_id:
parent_dir = os.path.dirname(asset_dir_path)
file_name = os.path.basename(asset_dir_path)
try:
seafile_api.del_file(repo_id, parent_dir, file_name, username)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
try:
seafile_api.del_file(repo_id, '/', table_file_name, username)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
try:
DTables.objects.delete_dtable(workspace, table_name)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error.'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({'success': True}, status=status.HTTP_200_OK)
class DTableAssetUploadLinkView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def get(self, request, workspace_id):
table_name = request.GET.get('name', None)
if not table_name:
error_msg = 'name invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
workspace = Workspaces.objects.get_workspace_by_id(workspace_id)
if not workspace:
error_msg = 'Workspace %s not found.' % workspace_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
repo_id = workspace.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
dtable = DTables.objects.get_dtable(workspace, table_name)
if not dtable:
error_msg = 'dtable %s not found.' % table_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
username = request.user.username
if check_dtable_permission(username, workspace, dtable) not in WRITE_PERMISSION_TUPLE:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
token = seafile_api.get_fileserver_access_token(repo_id, 'dummy', 'upload',
'', use_onetime=False)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
upload_link = gen_file_upload_url(token, 'upload-api')
asset_dir_path = '/asset/' + str(dtable.uuid)
asset_dir_id = seafile_api.get_dir_id_by_path(repo_id, asset_dir_path)
if not asset_dir_id:
seafile_api.mkdir_with_parents(repo_id, '/', asset_dir_path[1:], username)
dtable.modifier = username
dtable.save()
res = dict()
res['upload_link'] = upload_link
res['parent_path'] = asset_dir_path
return Response(res)
class DTableAccessTokenView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def get(self, request, workspace_id, name):
table_name = name
table_file_name = table_name + FILE_TYPE
workspace = Workspaces.objects.get_workspace_by_id(workspace_id)
if not workspace:
error_msg = 'Workspace %s not found.' % workspace_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
repo_id = workspace.repo_id
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
dtable = DTables.objects.get_dtable(workspace, table_name)
if not dtable:
error_msg = 'dtable %s not found.' % table_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
table_path = normalize_file_path(table_file_name)
table_file_id = seafile_api.get_file_id_by_path(repo_id, table_path)
if not table_file_id:
error_msg = 'file %s not found.' % table_file_name
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
username = request.user.username
if not check_dtable_permission(username, workspace, dtable):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
payload = {
'exp': int(time.time()) + 86400 * 3,
'dtable_uuid': dtable.uuid.hex,
'username': username,
}
try:
access_token = jwt.encode(
payload, DTABLE_PRIVATE_KEY, algorithm='HS256'
)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({'access_token': access_token})
| true | true |
1c3d17c860a284bbd0ff5cd1356a20460ce0be43 | 5,809 | py | Python | JSON2OWL/otb2owl/otb2owl.py | houzw/knowledge-base-data | 60771e8bf300227e1a26c9e77f56b09d23acd64a | [
"MIT"
] | null | null | null | JSON2OWL/otb2owl/otb2owl.py | houzw/knowledge-base-data | 60771e8bf300227e1a26c9e77f56b09d23acd64a | [
"MIT"
] | null | null | null | JSON2OWL/otb2owl/otb2owl.py | houzw/knowledge-base-data | 60771e8bf300227e1a26c9e77f56b09d23acd64a | [
"MIT"
] | 1 | 2018-12-17T06:40:53.000Z | 2018-12-17T06:40:53.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: houzhiwei
# time: 2019/2/20 22:11
# TODO
from owlready2 import *
import json
import re
from JSON2OWL.OwlConvert.OwlUtils import OWLUtils
from JSON2OWL.OwlConvert.Preprocessor import Preprocessor
import datetime
module_uri = 'http://www.egc.org/ont/process/otb'
onto = get_ontology(module_uri)
# onto, skos, dcterms, props = OWLUtils.load_common(onto)
onto, shacl, skos, dcterms, props, foaf = OWLUtils.load_common(onto)
onto, geospatial = OWLUtils.load_geo_vocabl(onto)
onto, gb, task, data, cyber, context = OWLUtils.load_common_for_process_tool(onto)
print('ontologies imported')
with onto:
class OTBTool(gb.GeoprocessingFunctionality):
pass
class OTBInput(cyber.Input):
pass
class OTBOutput(cyber.Output):
pass
class OTBConstraint(cyber.Constraint):
pass
class OTBAvailableChoice(cyber.AvailableChoice):
pass
class OTBOption(cyber.Option):
pass
onto.metadata.creator.append('houzhiwei')
onto.metadata.title.append('Orfeo-Toolbox Tools')
onto.metadata.created.append(datetime.datetime.today())
onto.metadata.versionInfo.append('6.6.1')
def handle_task(tool, category, task_name, des):
config = OWLUtils.get_config(module_path + '/config.ini')
task_cls = config.get('task', category)
# avoid duplicate
i_task_name = task_name.replace(' ', '_')
if not task[i_task_name + "_task"]:
task_ins = task[task_cls](i_task_name + "_task", prefLabel=locstr(task_name + " task", lang='en'))
task_ins.isAtomicTask = True
task_ins.identifier = i_task_name
else:
task_ins = task[i_task_name + "_task"]
if (task_ins in tool.usedByTask) is False:
tool.usedByTask.append(task_ins)
if (tool in tool.processingTool) is False:
task_ins.processingTool.append(tool)
task_ins.description.append(locstr(des, lang='en'))
def get_datatype(k):
config = OWLUtils.get_config(module_path + '/config.ini')
_type = OWLUtils.get_option(config, 'datatype', k)
if _type is None:
return 'http://www.w3.org/2001/XMLSchema#string'
else:
return _type
def handle_parameters(tool, param):
# 部分parameter不包含isInputFile等属性
p = None
parameterName = param['parameterName']
_name = Preprocessor.io_name(parameterName,onto)
if 'isInputFile' in param.keys() and param['isInputFile']:
p = OTBInput(_name,prefLabel=locstr(parameterName, lang='en'))
# p = OTBInput(0, prefLabel=locstr(param['name'], lang='en'))
tool.input.append(p)
p.isInput = param['isInputFile']
p.supportsDataFormat.append(data.GeoTIFF)
OWLUtils.link_to_domain_concept(p, parameterName.replace('_', ' '))
elif 'isOutputFile' in param.keys() and param['isOutputFile']:
p = OTBOutput(_name,prefLabel=locstr(parameterName, lang='en'))
# p = OTBOutput(0, prefLabel=locstr(parameterName, lang='en'))
tool.output.append(p)
p.isOutput = param['isOutputFile']
p.supportsDataFormat.append(data.GeoTIFF)
OWLUtils.link_to_domain_concept(p, parameterName.replace('_', ' '))
p.flag = param['flag']
p.identifier = parameterName
if 'dataType' in param.keys() and param['dataType']:
p.datatypeInString.append(param['dataType'])
p.datatype.append(OWLUtils.get_datatype_iris(param['dataType']))
p.description.append(locstr(' '.join(param['explanation']), lang='en'))
# p.isOptional = param['isOptional'] # no this information in document
def handle_options(tool, param, _onto):
parameterName = param['parameterName']
_name = Preprocessor.io_name(parameterName,_onto)
o = OTBOption(_name,prefLabel=locstr(parameterName, lang='en'))
# p = OTBOption(0, prefLabel=locstr(parameterName, lang='en'))
tool.option.append(o)
o.identifier = parameterName
if 'dataType' in param.keys() and param['dataType']:
if param['dataType'] == "Choices":
o.datatypeInString.append('String')
o.datatypeInString.append(param['dataType'])
# sc.datatype.append(IRIS[get_datatype(param['dataType'])])
o.description.append(''.join(param['explanation']))
# p.isOptional = param['isOptional']
if 'availableChoices' in param.keys() and param['availableChoices']:
o, onto = OWLUtils.handle_choices(o, parameterName, param['availableChoices'], OTBAvailableChoice, _onto)
def map_to_owl(json_data):
for d in json_data:
"""mapping json data to ontology properties"""
if d['category'] == 'Deprecated':
continue
name_str = d['name']
toolClass = tool_class(d['category'])
tool = toolClass(name_str, prefLabel=locstr(d['label'], lang='en'))
OWLUtils.application_category(tool, [], d['category'], [])
tool.isToolOfSoftware.append(cyber.OrfeoToolBox)
tool.identifier = name_str
tool.manualPageURL.append(normstr(d['manual_url']))
tool.executable = d['command']
tool.description.append(locstr(d['description'], lang='en'))
tool.definition.append(d['definition'])
keywords = OWLUtils.to_keywords(d['description'])
keywords.extend(d['label'].split(" "))
# keywords=d['label'].split(" ")
OWLUtils.link_to_domain_concept(tool, keywords)
if d['authors']:
tool.authors.append(d['authors'])
for ex in d['example']:
tool.example.append(ex.replace(' . ','.'))
handle_task(tool, d['category'], d['label'], d['description'])
for parameter in d['parameters']:
handle_parameters(tool, parameter)
for option in d['options']:
handle_options(tool, option, onto)
def tool_class(category):
tool_cls = category.replace(' ', '') + 'Tool'
return OWLUtils.create_onto_class(onto, tool_cls, OTBTool)
if __name__ == "__main__":
module_path = os.path.dirname(__file__)
with open(module_path + '/otb.json', 'r') as f:
jdata = json.load(f) # list
# print(len(jdata))
# otherwise will report stack overflow exception
threading.stack_size(2000000)
thread = threading.Thread(target=map_to_owl(jdata))
thread.start()
onto.save(file='otb.owl', format="rdfxml")
# update task ontology
task.save()
print('OTB Done!')
| 33.005682 | 107 | 0.731107 |
from owlready2 import *
import json
import re
from JSON2OWL.OwlConvert.OwlUtils import OWLUtils
from JSON2OWL.OwlConvert.Preprocessor import Preprocessor
import datetime
module_uri = 'http://www.egc.org/ont/process/otb'
onto = get_ontology(module_uri)
onto, shacl, skos, dcterms, props, foaf = OWLUtils.load_common(onto)
onto, geospatial = OWLUtils.load_geo_vocabl(onto)
onto, gb, task, data, cyber, context = OWLUtils.load_common_for_process_tool(onto)
print('ontologies imported')
with onto:
class OTBTool(gb.GeoprocessingFunctionality):
pass
class OTBInput(cyber.Input):
pass
class OTBOutput(cyber.Output):
pass
class OTBConstraint(cyber.Constraint):
pass
class OTBAvailableChoice(cyber.AvailableChoice):
pass
class OTBOption(cyber.Option):
pass
onto.metadata.creator.append('houzhiwei')
onto.metadata.title.append('Orfeo-Toolbox Tools')
onto.metadata.created.append(datetime.datetime.today())
onto.metadata.versionInfo.append('6.6.1')
def handle_task(tool, category, task_name, des):
config = OWLUtils.get_config(module_path + '/config.ini')
task_cls = config.get('task', category)
i_task_name = task_name.replace(' ', '_')
if not task[i_task_name + "_task"]:
task_ins = task[task_cls](i_task_name + "_task", prefLabel=locstr(task_name + " task", lang='en'))
task_ins.isAtomicTask = True
task_ins.identifier = i_task_name
else:
task_ins = task[i_task_name + "_task"]
if (task_ins in tool.usedByTask) is False:
tool.usedByTask.append(task_ins)
if (tool in tool.processingTool) is False:
task_ins.processingTool.append(tool)
task_ins.description.append(locstr(des, lang='en'))
def get_datatype(k):
config = OWLUtils.get_config(module_path + '/config.ini')
_type = OWLUtils.get_option(config, 'datatype', k)
if _type is None:
return 'http://www.w3.org/2001/XMLSchema#string'
else:
return _type
def handle_parameters(tool, param):
p = None
parameterName = param['parameterName']
_name = Preprocessor.io_name(parameterName,onto)
if 'isInputFile' in param.keys() and param['isInputFile']:
p = OTBInput(_name,prefLabel=locstr(parameterName, lang='en'))
tool.input.append(p)
p.isInput = param['isInputFile']
p.supportsDataFormat.append(data.GeoTIFF)
OWLUtils.link_to_domain_concept(p, parameterName.replace('_', ' '))
elif 'isOutputFile' in param.keys() and param['isOutputFile']:
p = OTBOutput(_name,prefLabel=locstr(parameterName, lang='en'))
tool.output.append(p)
p.isOutput = param['isOutputFile']
p.supportsDataFormat.append(data.GeoTIFF)
OWLUtils.link_to_domain_concept(p, parameterName.replace('_', ' '))
p.flag = param['flag']
p.identifier = parameterName
if 'dataType' in param.keys() and param['dataType']:
p.datatypeInString.append(param['dataType'])
p.datatype.append(OWLUtils.get_datatype_iris(param['dataType']))
p.description.append(locstr(' '.join(param['explanation']), lang='en'))
_onto):
parameterName = param['parameterName']
_name = Preprocessor.io_name(parameterName,_onto)
o = OTBOption(_name,prefLabel=locstr(parameterName, lang='en'))
tool.option.append(o)
o.identifier = parameterName
if 'dataType' in param.keys() and param['dataType']:
if param['dataType'] == "Choices":
o.datatypeInString.append('String')
o.datatypeInString.append(param['dataType'])
o.description.append(''.join(param['explanation']))
if 'availableChoices' in param.keys() and param['availableChoices']:
o, onto = OWLUtils.handle_choices(o, parameterName, param['availableChoices'], OTBAvailableChoice, _onto)
def map_to_owl(json_data):
for d in json_data:
if d['category'] == 'Deprecated':
continue
name_str = d['name']
toolClass = tool_class(d['category'])
tool = toolClass(name_str, prefLabel=locstr(d['label'], lang='en'))
OWLUtils.application_category(tool, [], d['category'], [])
tool.isToolOfSoftware.append(cyber.OrfeoToolBox)
tool.identifier = name_str
tool.manualPageURL.append(normstr(d['manual_url']))
tool.executable = d['command']
tool.description.append(locstr(d['description'], lang='en'))
tool.definition.append(d['definition'])
keywords = OWLUtils.to_keywords(d['description'])
keywords.extend(d['label'].split(" "))
OWLUtils.link_to_domain_concept(tool, keywords)
if d['authors']:
tool.authors.append(d['authors'])
for ex in d['example']:
tool.example.append(ex.replace(' . ','.'))
handle_task(tool, d['category'], d['label'], d['description'])
for parameter in d['parameters']:
handle_parameters(tool, parameter)
for option in d['options']:
handle_options(tool, option, onto)
def tool_class(category):
tool_cls = category.replace(' ', '') + 'Tool'
return OWLUtils.create_onto_class(onto, tool_cls, OTBTool)
if __name__ == "__main__":
module_path = os.path.dirname(__file__)
with open(module_path + '/otb.json', 'r') as f:
jdata = json.load(f)
threading.stack_size(2000000)
thread = threading.Thread(target=map_to_owl(jdata))
thread.start()
onto.save(file='otb.owl', format="rdfxml")
task.save()
print('OTB Done!')
| true | true |
1c3d17ee908cb860703746671fb358583533173f | 28,660 | py | Python | pandas/tools/rplot.py | danielballan/pandas | 576818f169c0d494e74f787f7486d090e5e6662f | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 29 | 2015-01-08T19:20:37.000Z | 2021-04-20T08:25:56.000Z | pandas/tools/rplot.py | danielballan/pandas | 576818f169c0d494e74f787f7486d090e5e6662f | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/tools/rplot.py | danielballan/pandas | 576818f169c0d494e74f787f7486d090e5e6662f | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 22 | 2015-01-02T12:14:20.000Z | 2021-10-13T09:22:30.000Z | import random
from copy import deepcopy
from pandas.core.common import _values_from_object
import numpy as np
from pandas.compat import range, zip
#
# TODO:
# * Make sure legends work properly
#
class Scale:
"""
Base class for mapping between graphical and data attributes.
"""
pass
class ScaleGradient(Scale):
"""
A mapping between a data attribute value and a
point in colour space between two specified colours.
"""
def __init__(self, column, colour1, colour2):
"""Initialize ScaleGradient instance.
Parameters:
-----------
column: string, pandas DataFrame column name
colour1: tuple, 3 element tuple with float values representing an RGB colour
colour2: tuple, 3 element tuple with float values representing an RGB colour
"""
self.column = column
self.colour1 = colour1
self.colour2 = colour2
self.categorical = False
def __call__(self, data, index):
"""Return a colour corresponding to data attribute value.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
A three element tuple representing an RGB somewhere between colour1 and colour2
"""
x = data[self.column].iget(index)
a = min(data[self.column])
b = max(data[self.column])
r1, g1, b1 = self.colour1
r2, g2, b2 = self.colour2
x_scaled = (x - a) / (b - a)
return (r1 + (r2 - r1) * x_scaled,
g1 + (g2 - g1) * x_scaled,
b1 + (b2 - b1) * x_scaled)
class ScaleGradient2(Scale):
"""
Create a mapping between a data attribute value and a
point in colour space in a line of three specified colours.
"""
def __init__(self, column, colour1, colour2, colour3):
"""Initialize ScaleGradient2 instance.
Parameters:
-----------
column: string, pandas DataFrame column name
colour1: tuple, 3 element tuple with float values representing an RGB colour
colour2: tuple, 3 element tuple with float values representing an RGB colour
colour3: tuple, 3 element tuple with float values representing an RGB colour
"""
self.column = column
self.colour1 = colour1
self.colour2 = colour2
self.colour3 = colour3
self.categorical = False
def __call__(self, data, index):
"""Return a colour corresponding to data attribute value.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
A three element tuple representing an RGB somewhere along the line
of colour1, colour2 and colour3
"""
x = data[self.column].iget(index)
a = min(data[self.column])
b = max(data[self.column])
r1, g1, b1 = self.colour1
r2, g2, b2 = self.colour2
r3, g3, b3 = self.colour3
x_scaled = (x - a) / (b - a)
if x_scaled < 0.5:
x_scaled *= 2.0
return (r1 + (r2 - r1) * x_scaled,
g1 + (g2 - g1) * x_scaled,
b1 + (b2 - b1) * x_scaled)
else:
x_scaled = (x_scaled - 0.5) * 2.0
return (r2 + (r3 - r2) * x_scaled,
g2 + (g3 - g2) * x_scaled,
b2 + (b3 - b2) * x_scaled)
class ScaleSize(Scale):
"""
Provide a mapping between a DataFrame column and matplotlib
scatter plot shape size.
"""
def __init__(self, column, min_size=5.0, max_size=100.0, transform=lambda x: x):
"""Initialize ScaleSize instance.
Parameters:
-----------
column: string, a column name
min_size: float, minimum point size
max_size: float, maximum point size
transform: a one argument function of form float -> float (e.g. lambda x: log(x))
"""
self.column = column
self.min_size = min_size
self.max_size = max_size
self.transform = transform
self.categorical = False
def __call__(self, data, index):
"""Return matplotlib scatter plot marker shape size.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
"""
x = data[self.column].iget(index)
a = float(min(data[self.column]))
b = float(max(data[self.column]))
return self.transform(self.min_size + ((x - a) / (b - a)) *
(self.max_size - self.min_size))
class ScaleShape(Scale):
"""
Provides a mapping between matplotlib marker shapes
and attribute values.
"""
def __init__(self, column):
"""Initialize ScaleShape instance.
Parameters:
-----------
column: string, pandas DataFrame column name
"""
self.column = column
self.shapes = ['o', '+', 's', '*', '^', '<', '>', 'v', '|', 'x']
self.legends = set([])
self.categorical = True
def __call__(self, data, index):
"""Returns a matplotlib marker identifier.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
a matplotlib marker identifier
"""
values = sorted(list(set(data[self.column])))
if len(values) > len(self.shapes):
raise ValueError("Too many different values of the categorical attribute for ScaleShape")
x = data[self.column].iget(index)
return self.shapes[values.index(x)]
class ScaleRandomColour(Scale):
"""
Maps a random colour to a DataFrame attribute.
"""
def __init__(self, column):
"""Initialize ScaleRandomColour instance.
Parameters:
-----------
column: string, pandas DataFrame column name
"""
self.column = column
self.categorical = True
def __call__(self, data, index):
"""Return a tuple of three floats, representing
an RGB colour.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
"""
random.seed(data[self.column].iget(index))
return [random.random() for _ in range(3)]
class ScaleConstant(Scale):
"""
Constant returning scale. Usually used automatically.
"""
def __init__(self, value):
"""Initialize ScaleConstant instance.
Parameters:
-----------
value: any Python value to be returned when called
"""
self.value = value
self.categorical = False
def __call__(self, data, index):
"""Return the constant value.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
A constant value specified during initialisation
"""
return self.value
def default_aes(x=None, y=None):
"""Create the default aesthetics dictionary.
Parameters:
-----------
x: string, DataFrame column name
y: string, DataFrame column name
Returns:
--------
a dictionary with aesthetics bindings
"""
return {
'x' : x,
'y' : y,
'size' : ScaleConstant(40.0),
'colour' : ScaleConstant('grey'),
'shape' : ScaleConstant('o'),
'alpha' : ScaleConstant(1.0),
}
def make_aes(x=None, y=None, size=None, colour=None, shape=None, alpha=None):
"""Create an empty aesthetics dictionary.
Parameters:
-----------
x: string, DataFrame column name
y: string, DataFrame column name
size: function, binding for size attribute of Geoms
colour: function, binding for colour attribute of Geoms
shape: function, binding for shape attribute of Geoms
alpha: function, binding for alpha attribute of Geoms
Returns:
--------
a dictionary with aesthetics bindings
"""
if not hasattr(size, '__call__') and size is not None:
size = ScaleConstant(size)
if not hasattr(colour, '__call__') and colour is not None:
colour = ScaleConstant(colour)
if not hasattr(shape, '__call__') and shape is not None:
shape = ScaleConstant(shape)
if not hasattr(alpha, '__call__') and alpha is not None:
alpha = ScaleConstant(alpha)
if any([isinstance(size, scale) for scale in [ScaleConstant, ScaleSize]]) or size is None:
pass
else:
raise ValueError('size mapping should be done through ScaleConstant or ScaleSize')
if any([isinstance(colour, scale) for scale in [ScaleConstant, ScaleGradient, ScaleGradient2, ScaleRandomColour]]) or colour is None:
pass
else:
raise ValueError('colour mapping should be done through ScaleConstant, ScaleRandomColour, ScaleGradient or ScaleGradient2')
if any([isinstance(shape, scale) for scale in [ScaleConstant, ScaleShape]]) or shape is None:
pass
else:
raise ValueError('shape mapping should be done through ScaleConstant or ScaleShape')
if any([isinstance(alpha, scale) for scale in [ScaleConstant]]) or alpha is None:
pass
else:
raise ValueError('alpha mapping should be done through ScaleConstant')
return {
'x' : x,
'y' : y,
'size' : size,
'colour' : colour,
'shape' : shape,
'alpha' : alpha,
}
class Layer:
"""
Layer object representing a single plot layer.
"""
def __init__(self, data=None, **kwds):
"""Initialize layer object.
Parameters:
-----------
data: pandas DataFrame instance
aes: aesthetics dictionary with bindings
"""
self.data = data
self.aes = make_aes(**kwds)
self.legend = {}
def work(self, fig=None, ax=None):
"""Do the drawing (usually) work.
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis object
Returns:
--------
a tuple with the same figure and axis instances
"""
return fig, ax
class GeomPoint(Layer):
def work(self, fig=None, ax=None):
"""Render the layer on a matplotlib axis.
You can specify either a figure or an axis to draw on.
Parameters:
-----------
fig: matplotlib figure object
ax: matplotlib axis object to draw on
Returns:
--------
fig, ax: matplotlib figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
for index in range(len(self.data)):
row = self.data.irow(index)
x = row[self.aes['x']]
y = row[self.aes['y']]
size_scaler = self.aes['size']
colour_scaler = self.aes['colour']
shape_scaler = self.aes['shape']
alpha = self.aes['alpha']
size_value = size_scaler(self.data, index)
colour_value = colour_scaler(self.data, index)
marker_value = shape_scaler(self.data, index)
alpha_value = alpha(self.data, index)
patch = ax.scatter(x, y,
s=size_value,
c=colour_value,
marker=marker_value,
alpha=alpha_value)
label = []
if colour_scaler.categorical:
label += [colour_scaler.column, row[colour_scaler.column]]
if shape_scaler.categorical:
label += [shape_scaler.column, row[shape_scaler.column]]
self.legend[tuple(label)] = patch
ax.set_xlabel(self.aes['x'])
ax.set_ylabel(self.aes['y'])
return fig, ax
class GeomPolyFit(Layer):
"""
Draw a polynomial fit of specified degree.
"""
def __init__(self, degree, lw=2.0, colour='grey'):
"""Initialize GeomPolyFit object.
Parameters:
-----------
degree: an integer, polynomial degree
lw: line width
colour: matplotlib colour
"""
self.degree = degree
self.lw = lw
self.colour = colour
Layer.__init__(self)
def work(self, fig=None, ax=None):
"""Draw the polynomial fit on matplotlib figure or axis
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis
Returns:
--------
a tuple with figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
from numpy.polynomial.polynomial import polyfit
from numpy.polynomial.polynomial import polyval
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
min_x = min(x)
max_x = max(x)
c = polyfit(x, y, self.degree)
x_ = np.linspace(min_x, max_x, len(x))
y_ = polyval(x_, c)
ax.plot(x_, y_, lw=self.lw, c=self.colour)
return fig, ax
class GeomScatter(Layer):
"""
An efficient scatter plot, use this instead of GeomPoint for speed.
"""
def __init__(self, marker='o', colour='lightblue', alpha=1.0):
"""Initialize GeomScatter instance.
Parameters:
-----------
marker: matplotlib marker string
colour: matplotlib colour
alpha: matplotlib alpha
"""
self.marker = marker
self.colour = colour
self.alpha = alpha
Layer.__init__(self)
def work(self, fig=None, ax=None):
"""Draw a scatter plot on matplotlib figure or axis
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis
Returns:
--------
a tuple with figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
ax.scatter(x, y, marker=self.marker, c=self.colour, alpha=self.alpha)
return fig, ax
class GeomHistogram(Layer):
"""
An efficient histogram, use this instead of GeomBar for speed.
"""
def __init__(self, bins=10, colour='lightblue'):
"""Initialize GeomHistogram instance.
Parameters:
-----------
bins: integer, number of histogram bins
colour: matplotlib colour
"""
self.bins = bins
self.colour = colour
Layer.__init__(self)
def work(self, fig=None, ax=None):
"""Draw a histogram on matplotlib figure or axis
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis
Returns:
--------
a tuple with figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
ax.hist(_values_from_object(x), self.bins, facecolor=self.colour)
ax.set_xlabel(self.aes['x'])
return fig, ax
class GeomDensity(Layer):
"""
A kernel density estimation plot.
"""
def work(self, fig=None, ax=None):
"""Draw a one dimensional kernel density plot.
You can specify either a figure or an axis to draw on.
Parameters:
-----------
fig: matplotlib figure object
ax: matplotlib axis object to draw on
Returns:
--------
fig, ax: matplotlib figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
from scipy.stats import gaussian_kde
x = self.data[self.aes['x']]
gkde = gaussian_kde(x)
ind = np.linspace(x.min(), x.max(), 200)
ax.plot(ind, gkde.evaluate(ind))
return fig, ax
class GeomDensity2D(Layer):
def work(self, fig=None, ax=None):
"""Draw a two dimensional kernel density plot.
You can specify either a figure or an axis to draw on.
Parameters:
-----------
fig: matplotlib figure object
ax: matplotlib axis object to draw on
Returns:
--------
fig, ax: matplotlib figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
rvs = np.array([x, y])
x_min = x.min()
x_max = x.max()
y_min = y.min()
y_max = y.max()
X, Y = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
import scipy.stats as stats
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
ax.contour(Z, extent=[x_min, x_max, y_min, y_max])
return fig, ax
class TrellisGrid(Layer):
def __init__(self, by):
"""Initialize TreelisGrid instance.
Parameters:
-----------
by: column names to group by
"""
if len(by) != 2:
raise ValueError("You must give a list of length 2 to group by")
elif by[0] == '.' and by[1] == '.':
raise ValueError("At least one of grouping attributes must be not a dot")
self.by = by
def trellis(self, layers):
"""Create a trellis structure for a list of layers.
Each layer will be cloned with different data in to a two dimensional grid.
Parameters:
-----------
layers: a list of Layer objects
Returns:
--------
trellised_layers: Clones of each layer in the list arranged in a trellised latice
"""
trellised_layers = []
for layer in layers:
data = layer.data
if self.by[0] == '.':
grouped = data.groupby(self.by[1])
elif self.by[1] == '.':
grouped = data.groupby(self.by[0])
else:
grouped = data.groupby(self.by)
groups = list(grouped.groups.keys())
if self.by[0] == '.' or self.by[1] == '.':
shingle1 = set([g for g in groups])
else:
shingle1 = set([g[0] for g in groups])
shingle2 = set([g[1] for g in groups])
if self.by[0] == '.':
self.rows = 1
self.cols = len(shingle1)
elif self.by[1] == '.':
self.rows = len(shingle1)
self.cols = 1
else:
self.rows = len(shingle1)
self.cols = len(shingle2)
trellised = [[None for _ in range(self.cols)] for _ in range(self.rows)]
self.group_grid = [[None for _ in range(self.cols)] for _ in range(self.rows)]
row = 0
col = 0
for group, data in grouped:
new_layer = deepcopy(layer)
new_layer.data = data
trellised[row][col] = new_layer
self.group_grid[row][col] = group
col += 1
if col >= self.cols:
col = 0
row += 1
trellised_layers.append(trellised)
return trellised_layers
def dictionary_union(dict1, dict2):
"""Take two dictionaries, return dictionary union.
Parameters:
-----------
dict1: Python dictionary
dict2: Python dictionary
Returns:
--------
A union of the dictionaries. It assumes that values
with the same keys are identical.
"""
keys1 = list(dict1.keys())
keys2 = list(dict2.keys())
result = {}
for key1 in keys1:
result[key1] = dict1[key1]
for key2 in keys2:
result[key2] = dict2[key2]
return result
def merge_aes(layer1, layer2):
"""Merges the aesthetics dictionaries for the two layers.
Look up sequence_layers function. Which layer is first and which
one is second is important.
Parameters:
-----------
layer1: Layer object
layer2: Layer object
"""
for key in layer2.aes.keys():
if layer2.aes[key] is None:
layer2.aes[key] = layer1.aes[key]
def sequence_layers(layers):
"""Go through the list of layers and fill in the missing bits of information.
The basic rules are this:
* If the current layer has data set to None, take the data from previous layer.
* For each aesthetic mapping, if that mapping is set to None, take it from previous layer.
Parameters:
-----------
layers: a list of Layer objects
"""
for layer1, layer2 in zip(layers[:-1], layers[1:]):
if layer2.data is None:
layer2.data = layer1.data
merge_aes(layer1, layer2)
return layers
def sequence_grids(layer_grids):
"""Go through the list of layer girds and perform the same thing as sequence_layers.
Parameters:
-----------
layer_grids: a list of two dimensional layer grids
"""
for grid1, grid2 in zip(layer_grids[:-1], layer_grids[1:]):
for row1, row2 in zip(grid1, grid2):
for layer1, layer2 in zip(row1, row2):
if layer2.data is None:
layer2.data = layer1.data
merge_aes(layer1, layer2)
return layer_grids
def work_grid(grid, fig):
"""Take a two dimensional grid, add subplots to a figure for each cell and do layer work.
Parameters:
-----------
grid: a two dimensional grid of layers
fig: matplotlib figure to draw on
Returns:
--------
axes: a two dimensional list of matplotlib axes
"""
nrows = len(grid)
ncols = len(grid[0])
axes = [[None for _ in range(ncols)] for _ in range(nrows)]
for row in range(nrows):
for col in range(ncols):
axes[row][col] = fig.add_subplot(nrows, ncols, ncols * row + col + 1)
grid[row][col].work(ax=axes[row][col])
return axes
def adjust_subplots(fig, axes, trellis, layers):
"""Adjust the subtplots on matplotlib figure with the
fact that we have a trellis plot in mind.
Parameters:
-----------
fig: matplotlib figure
axes: a two dimensional grid of matplotlib axes
trellis: TrellisGrid object
layers: last grid of layers in the plot
"""
# Flatten the axes grid
axes = [ax for row in axes for ax in row]
min_x = min([ax.get_xlim()[0] for ax in axes])
max_x = max([ax.get_xlim()[1] for ax in axes])
min_y = min([ax.get_ylim()[0] for ax in axes])
max_y = max([ax.get_ylim()[1] for ax in axes])
[ax.set_xlim(min_x, max_x) for ax in axes]
[ax.set_ylim(min_y, max_y) for ax in axes]
for index, axis in enumerate(axes):
if index % trellis.cols == 0:
pass
else:
axis.get_yaxis().set_ticks([])
axis.set_ylabel('')
if index / trellis.cols == trellis.rows - 1:
pass
else:
axis.get_xaxis().set_ticks([])
axis.set_xlabel('')
if trellis.by[0] == '.':
label1 = "%s = %s" % (trellis.by[1], trellis.group_grid[index // trellis.cols][index % trellis.cols])
label2 = None
elif trellis.by[1] == '.':
label1 = "%s = %s" % (trellis.by[0], trellis.group_grid[index // trellis.cols][index % trellis.cols])
label2 = None
else:
label1 = "%s = %s" % (trellis.by[0], trellis.group_grid[index // trellis.cols][index % trellis.cols][0])
label2 = "%s = %s" % (trellis.by[1], trellis.group_grid[index // trellis.cols][index % trellis.cols][1])
if label2 is not None:
axis.table(cellText=[[label1], [label2]],
loc='top', cellLoc='center',
cellColours=[['lightgrey'], ['lightgrey']])
else:
axis.table(cellText=[[label1]], loc='top', cellLoc='center', cellColours=[['lightgrey']])
# Flatten the layer grid
layers = [layer for row in layers for layer in row]
legend = {}
for layer in layers:
legend = dictionary_union(legend, layer.legend)
patches = []
labels = []
if len(list(legend.keys())) == 0:
key_function = lambda tup: tup
elif len(list(legend.keys())[0]) == 2:
key_function = lambda tup: (tup[1])
else:
key_function = lambda tup: (tup[1], tup[3])
for key in sorted(list(legend.keys()), key=key_function):
value = legend[key]
patches.append(value)
if len(key) == 2:
col, val = key
labels.append("%s" % str(val))
elif len(key) == 4:
col1, val1, col2, val2 = key
labels.append("%s, %s" % (str(val1), str(val2)))
else:
raise ValueError("Maximum 2 categorical attributes to display a lengend of")
if len(legend):
fig.legend(patches, labels, loc='upper right')
fig.subplots_adjust(wspace=0.05, hspace=0.2)
class RPlot:
"""
The main plot object. Add layers to an instance of this object to create a plot.
"""
def __init__(self, data, x=None, y=None):
"""Initialize RPlot instance.
Parameters:
-----------
data: pandas DataFrame instance
x: string, DataFrame column name
y: string, DataFrame column name
"""
self.layers = [Layer(data, **default_aes(x=x, y=y))]
trellised = False
def add(self, layer):
"""Add a layer to RPlot instance.
Parameters:
-----------
layer: Layer instance
"""
if not isinstance(layer, Layer):
raise TypeError("The operand on the right side of + must be a Layer instance")
self.layers.append(layer)
def render(self, fig=None):
"""Render all the layers on a matplotlib figure.
Parameters:
-----------
fig: matplotlib figure
"""
import matplotlib.pyplot as plt
if fig is None:
fig = plt.gcf()
# Look for the last TrellisGrid instance in the layer list
last_trellis = None
for layer in self.layers:
if isinstance(layer, TrellisGrid):
last_trellis = layer
if last_trellis is None:
# We have a simple, non-trellised plot
new_layers = sequence_layers(self.layers)
for layer in new_layers:
layer.work(fig=fig)
legend = {}
for layer in new_layers:
legend = dictionary_union(legend, layer.legend)
patches = []
labels = []
if len(list(legend.keys())) == 0:
key_function = lambda tup: tup
elif len(list(legend.keys())[0]) == 2:
key_function = lambda tup: (tup[1])
else:
key_function = lambda tup: (tup[1], tup[3])
for key in sorted(list(legend.keys()), key=key_function):
value = legend[key]
patches.append(value)
if len(key) == 2:
col, val = key
labels.append("%s" % str(val))
elif len(key) == 4:
col1, val1, col2, val2 = key
labels.append("%s, %s" % (str(val1), str(val2)))
else:
raise ValueError("Maximum 2 categorical attributes to display a lengend of")
if len(legend):
fig.legend(patches, labels, loc='upper right')
else:
# We have a trellised plot.
# First let's remove all other TrellisGrid instances from the layer list,
# including this one.
new_layers = []
for layer in self.layers:
if not isinstance(layer, TrellisGrid):
new_layers.append(layer)
new_layers = sequence_layers(new_layers)
# Now replace the old layers by their trellised versions
new_layers = last_trellis.trellis(new_layers)
# Prepare the subplots and draw on them
new_layers = sequence_grids(new_layers)
axes_grids = [work_grid(grid, fig) for grid in new_layers]
axes_grid = axes_grids[-1]
adjust_subplots(fig, axes_grid, last_trellis, new_layers[-1])
# And we're done
return fig
| 32.34763 | 137 | 0.554361 | import random
from copy import deepcopy
from pandas.core.common import _values_from_object
import numpy as np
from pandas.compat import range, zip
class Scale:
pass
class ScaleGradient(Scale):
def __init__(self, column, colour1, colour2):
self.column = column
self.colour1 = colour1
self.colour2 = colour2
self.categorical = False
def __call__(self, data, index):
x = data[self.column].iget(index)
a = min(data[self.column])
b = max(data[self.column])
r1, g1, b1 = self.colour1
r2, g2, b2 = self.colour2
x_scaled = (x - a) / (b - a)
return (r1 + (r2 - r1) * x_scaled,
g1 + (g2 - g1) * x_scaled,
b1 + (b2 - b1) * x_scaled)
class ScaleGradient2(Scale):
def __init__(self, column, colour1, colour2, colour3):
self.column = column
self.colour1 = colour1
self.colour2 = colour2
self.colour3 = colour3
self.categorical = False
def __call__(self, data, index):
x = data[self.column].iget(index)
a = min(data[self.column])
b = max(data[self.column])
r1, g1, b1 = self.colour1
r2, g2, b2 = self.colour2
r3, g3, b3 = self.colour3
x_scaled = (x - a) / (b - a)
if x_scaled < 0.5:
x_scaled *= 2.0
return (r1 + (r2 - r1) * x_scaled,
g1 + (g2 - g1) * x_scaled,
b1 + (b2 - b1) * x_scaled)
else:
x_scaled = (x_scaled - 0.5) * 2.0
return (r2 + (r3 - r2) * x_scaled,
g2 + (g3 - g2) * x_scaled,
b2 + (b3 - b2) * x_scaled)
class ScaleSize(Scale):
def __init__(self, column, min_size=5.0, max_size=100.0, transform=lambda x: x):
self.column = column
self.min_size = min_size
self.max_size = max_size
self.transform = transform
self.categorical = False
def __call__(self, data, index):
x = data[self.column].iget(index)
a = float(min(data[self.column]))
b = float(max(data[self.column]))
return self.transform(self.min_size + ((x - a) / (b - a)) *
(self.max_size - self.min_size))
class ScaleShape(Scale):
def __init__(self, column):
self.column = column
self.shapes = ['o', '+', 's', '*', '^', '<', '>', 'v', '|', 'x']
self.legends = set([])
self.categorical = True
def __call__(self, data, index):
values = sorted(list(set(data[self.column])))
if len(values) > len(self.shapes):
raise ValueError("Too many different values of the categorical attribute for ScaleShape")
x = data[self.column].iget(index)
return self.shapes[values.index(x)]
class ScaleRandomColour(Scale):
def __init__(self, column):
self.column = column
self.categorical = True
def __call__(self, data, index):
random.seed(data[self.column].iget(index))
return [random.random() for _ in range(3)]
class ScaleConstant(Scale):
def __init__(self, value):
self.value = value
self.categorical = False
def __call__(self, data, index):
return self.value
def default_aes(x=None, y=None):
return {
'x' : x,
'y' : y,
'size' : ScaleConstant(40.0),
'colour' : ScaleConstant('grey'),
'shape' : ScaleConstant('o'),
'alpha' : ScaleConstant(1.0),
}
def make_aes(x=None, y=None, size=None, colour=None, shape=None, alpha=None):
if not hasattr(size, '__call__') and size is not None:
size = ScaleConstant(size)
if not hasattr(colour, '__call__') and colour is not None:
colour = ScaleConstant(colour)
if not hasattr(shape, '__call__') and shape is not None:
shape = ScaleConstant(shape)
if not hasattr(alpha, '__call__') and alpha is not None:
alpha = ScaleConstant(alpha)
if any([isinstance(size, scale) for scale in [ScaleConstant, ScaleSize]]) or size is None:
pass
else:
raise ValueError('size mapping should be done through ScaleConstant or ScaleSize')
if any([isinstance(colour, scale) for scale in [ScaleConstant, ScaleGradient, ScaleGradient2, ScaleRandomColour]]) or colour is None:
pass
else:
raise ValueError('colour mapping should be done through ScaleConstant, ScaleRandomColour, ScaleGradient or ScaleGradient2')
if any([isinstance(shape, scale) for scale in [ScaleConstant, ScaleShape]]) or shape is None:
pass
else:
raise ValueError('shape mapping should be done through ScaleConstant or ScaleShape')
if any([isinstance(alpha, scale) for scale in [ScaleConstant]]) or alpha is None:
pass
else:
raise ValueError('alpha mapping should be done through ScaleConstant')
return {
'x' : x,
'y' : y,
'size' : size,
'colour' : colour,
'shape' : shape,
'alpha' : alpha,
}
class Layer:
def __init__(self, data=None, **kwds):
self.data = data
self.aes = make_aes(**kwds)
self.legend = {}
def work(self, fig=None, ax=None):
return fig, ax
class GeomPoint(Layer):
def work(self, fig=None, ax=None):
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
for index in range(len(self.data)):
row = self.data.irow(index)
x = row[self.aes['x']]
y = row[self.aes['y']]
size_scaler = self.aes['size']
colour_scaler = self.aes['colour']
shape_scaler = self.aes['shape']
alpha = self.aes['alpha']
size_value = size_scaler(self.data, index)
colour_value = colour_scaler(self.data, index)
marker_value = shape_scaler(self.data, index)
alpha_value = alpha(self.data, index)
patch = ax.scatter(x, y,
s=size_value,
c=colour_value,
marker=marker_value,
alpha=alpha_value)
label = []
if colour_scaler.categorical:
label += [colour_scaler.column, row[colour_scaler.column]]
if shape_scaler.categorical:
label += [shape_scaler.column, row[shape_scaler.column]]
self.legend[tuple(label)] = patch
ax.set_xlabel(self.aes['x'])
ax.set_ylabel(self.aes['y'])
return fig, ax
class GeomPolyFit(Layer):
def __init__(self, degree, lw=2.0, colour='grey'):
self.degree = degree
self.lw = lw
self.colour = colour
Layer.__init__(self)
def work(self, fig=None, ax=None):
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
from numpy.polynomial.polynomial import polyfit
from numpy.polynomial.polynomial import polyval
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
min_x = min(x)
max_x = max(x)
c = polyfit(x, y, self.degree)
x_ = np.linspace(min_x, max_x, len(x))
y_ = polyval(x_, c)
ax.plot(x_, y_, lw=self.lw, c=self.colour)
return fig, ax
class GeomScatter(Layer):
def __init__(self, marker='o', colour='lightblue', alpha=1.0):
self.marker = marker
self.colour = colour
self.alpha = alpha
Layer.__init__(self)
def work(self, fig=None, ax=None):
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
ax.scatter(x, y, marker=self.marker, c=self.colour, alpha=self.alpha)
return fig, ax
class GeomHistogram(Layer):
def __init__(self, bins=10, colour='lightblue'):
self.bins = bins
self.colour = colour
Layer.__init__(self)
def work(self, fig=None, ax=None):
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
ax.hist(_values_from_object(x), self.bins, facecolor=self.colour)
ax.set_xlabel(self.aes['x'])
return fig, ax
class GeomDensity(Layer):
def work(self, fig=None, ax=None):
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
from scipy.stats import gaussian_kde
x = self.data[self.aes['x']]
gkde = gaussian_kde(x)
ind = np.linspace(x.min(), x.max(), 200)
ax.plot(ind, gkde.evaluate(ind))
return fig, ax
class GeomDensity2D(Layer):
def work(self, fig=None, ax=None):
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
rvs = np.array([x, y])
x_min = x.min()
x_max = x.max()
y_min = y.min()
y_max = y.max()
X, Y = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
import scipy.stats as stats
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
ax.contour(Z, extent=[x_min, x_max, y_min, y_max])
return fig, ax
class TrellisGrid(Layer):
def __init__(self, by):
if len(by) != 2:
raise ValueError("You must give a list of length 2 to group by")
elif by[0] == '.' and by[1] == '.':
raise ValueError("At least one of grouping attributes must be not a dot")
self.by = by
def trellis(self, layers):
trellised_layers = []
for layer in layers:
data = layer.data
if self.by[0] == '.':
grouped = data.groupby(self.by[1])
elif self.by[1] == '.':
grouped = data.groupby(self.by[0])
else:
grouped = data.groupby(self.by)
groups = list(grouped.groups.keys())
if self.by[0] == '.' or self.by[1] == '.':
shingle1 = set([g for g in groups])
else:
shingle1 = set([g[0] for g in groups])
shingle2 = set([g[1] for g in groups])
if self.by[0] == '.':
self.rows = 1
self.cols = len(shingle1)
elif self.by[1] == '.':
self.rows = len(shingle1)
self.cols = 1
else:
self.rows = len(shingle1)
self.cols = len(shingle2)
trellised = [[None for _ in range(self.cols)] for _ in range(self.rows)]
self.group_grid = [[None for _ in range(self.cols)] for _ in range(self.rows)]
row = 0
col = 0
for group, data in grouped:
new_layer = deepcopy(layer)
new_layer.data = data
trellised[row][col] = new_layer
self.group_grid[row][col] = group
col += 1
if col >= self.cols:
col = 0
row += 1
trellised_layers.append(trellised)
return trellised_layers
def dictionary_union(dict1, dict2):
keys1 = list(dict1.keys())
keys2 = list(dict2.keys())
result = {}
for key1 in keys1:
result[key1] = dict1[key1]
for key2 in keys2:
result[key2] = dict2[key2]
return result
def merge_aes(layer1, layer2):
for key in layer2.aes.keys():
if layer2.aes[key] is None:
layer2.aes[key] = layer1.aes[key]
def sequence_layers(layers):
for layer1, layer2 in zip(layers[:-1], layers[1:]):
if layer2.data is None:
layer2.data = layer1.data
merge_aes(layer1, layer2)
return layers
def sequence_grids(layer_grids):
for grid1, grid2 in zip(layer_grids[:-1], layer_grids[1:]):
for row1, row2 in zip(grid1, grid2):
for layer1, layer2 in zip(row1, row2):
if layer2.data is None:
layer2.data = layer1.data
merge_aes(layer1, layer2)
return layer_grids
def work_grid(grid, fig):
nrows = len(grid)
ncols = len(grid[0])
axes = [[None for _ in range(ncols)] for _ in range(nrows)]
for row in range(nrows):
for col in range(ncols):
axes[row][col] = fig.add_subplot(nrows, ncols, ncols * row + col + 1)
grid[row][col].work(ax=axes[row][col])
return axes
def adjust_subplots(fig, axes, trellis, layers):
axes = [ax for row in axes for ax in row]
min_x = min([ax.get_xlim()[0] for ax in axes])
max_x = max([ax.get_xlim()[1] for ax in axes])
min_y = min([ax.get_ylim()[0] for ax in axes])
max_y = max([ax.get_ylim()[1] for ax in axes])
[ax.set_xlim(min_x, max_x) for ax in axes]
[ax.set_ylim(min_y, max_y) for ax in axes]
for index, axis in enumerate(axes):
if index % trellis.cols == 0:
pass
else:
axis.get_yaxis().set_ticks([])
axis.set_ylabel('')
if index / trellis.cols == trellis.rows - 1:
pass
else:
axis.get_xaxis().set_ticks([])
axis.set_xlabel('')
if trellis.by[0] == '.':
label1 = "%s = %s" % (trellis.by[1], trellis.group_grid[index // trellis.cols][index % trellis.cols])
label2 = None
elif trellis.by[1] == '.':
label1 = "%s = %s" % (trellis.by[0], trellis.group_grid[index // trellis.cols][index % trellis.cols])
label2 = None
else:
label1 = "%s = %s" % (trellis.by[0], trellis.group_grid[index // trellis.cols][index % trellis.cols][0])
label2 = "%s = %s" % (trellis.by[1], trellis.group_grid[index // trellis.cols][index % trellis.cols][1])
if label2 is not None:
axis.table(cellText=[[label1], [label2]],
loc='top', cellLoc='center',
cellColours=[['lightgrey'], ['lightgrey']])
else:
axis.table(cellText=[[label1]], loc='top', cellLoc='center', cellColours=[['lightgrey']])
layers = [layer for row in layers for layer in row]
legend = {}
for layer in layers:
legend = dictionary_union(legend, layer.legend)
patches = []
labels = []
if len(list(legend.keys())) == 0:
key_function = lambda tup: tup
elif len(list(legend.keys())[0]) == 2:
key_function = lambda tup: (tup[1])
else:
key_function = lambda tup: (tup[1], tup[3])
for key in sorted(list(legend.keys()), key=key_function):
value = legend[key]
patches.append(value)
if len(key) == 2:
col, val = key
labels.append("%s" % str(val))
elif len(key) == 4:
col1, val1, col2, val2 = key
labels.append("%s, %s" % (str(val1), str(val2)))
else:
raise ValueError("Maximum 2 categorical attributes to display a lengend of")
if len(legend):
fig.legend(patches, labels, loc='upper right')
fig.subplots_adjust(wspace=0.05, hspace=0.2)
class RPlot:
def __init__(self, data, x=None, y=None):
self.layers = [Layer(data, **default_aes(x=x, y=y))]
trellised = False
def add(self, layer):
if not isinstance(layer, Layer):
raise TypeError("The operand on the right side of + must be a Layer instance")
self.layers.append(layer)
def render(self, fig=None):
import matplotlib.pyplot as plt
if fig is None:
fig = plt.gcf()
last_trellis = None
for layer in self.layers:
if isinstance(layer, TrellisGrid):
last_trellis = layer
if last_trellis is None:
new_layers = sequence_layers(self.layers)
for layer in new_layers:
layer.work(fig=fig)
legend = {}
for layer in new_layers:
legend = dictionary_union(legend, layer.legend)
patches = []
labels = []
if len(list(legend.keys())) == 0:
key_function = lambda tup: tup
elif len(list(legend.keys())[0]) == 2:
key_function = lambda tup: (tup[1])
else:
key_function = lambda tup: (tup[1], tup[3])
for key in sorted(list(legend.keys()), key=key_function):
value = legend[key]
patches.append(value)
if len(key) == 2:
col, val = key
labels.append("%s" % str(val))
elif len(key) == 4:
col1, val1, col2, val2 = key
labels.append("%s, %s" % (str(val1), str(val2)))
else:
raise ValueError("Maximum 2 categorical attributes to display a lengend of")
if len(legend):
fig.legend(patches, labels, loc='upper right')
else:
# including this one.
new_layers = []
for layer in self.layers:
if not isinstance(layer, TrellisGrid):
new_layers.append(layer)
new_layers = sequence_layers(new_layers)
# Now replace the old layers by their trellised versions
new_layers = last_trellis.trellis(new_layers)
# Prepare the subplots and draw on them
new_layers = sequence_grids(new_layers)
axes_grids = [work_grid(grid, fig) for grid in new_layers]
axes_grid = axes_grids[-1]
adjust_subplots(fig, axes_grid, last_trellis, new_layers[-1])
# And we're done
return fig
| true | true |
1c3d18bc1bb33eb2e502018ab53a1332e73e46b0 | 834 | py | Python | h/presenters/user_json.py | jenkins-hypothesis/h | 328be7f5fa3abf3f05aba73d2311cf1eaf7b2277 | [
"BSD-2-Clause"
] | null | null | null | h/presenters/user_json.py | jenkins-hypothesis/h | 328be7f5fa3abf3f05aba73d2311cf1eaf7b2277 | [
"BSD-2-Clause"
] | null | null | null | h/presenters/user_json.py | jenkins-hypothesis/h | 328be7f5fa3abf3f05aba73d2311cf1eaf7b2277 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
class UserJSONPresenter(object):
"""
Present a user in the JSON format returned by API requests.
Note that this presenter as of now returns some information
that should not be publicly available, like the users email
address. This is fine for now because it is only used in
places where the caller has access to this. We would need
to refactor this as soon as we use this presenter for a
public API.
"""
def __init__(self, user):
self.user = user
def asdict(self):
return {
"authority": self.user.authority,
"email": self.user.email,
"userid": self.user.userid,
"username": self.user.username,
"display_name": self.user.display_name,
}
| 28.758621 | 63 | 0.640288 |
from __future__ import unicode_literals
class UserJSONPresenter(object):
def __init__(self, user):
self.user = user
def asdict(self):
return {
"authority": self.user.authority,
"email": self.user.email,
"userid": self.user.userid,
"username": self.user.username,
"display_name": self.user.display_name,
}
| true | true |
1c3d1982463790d343ae0b1b55926222d7262b7d | 122 | py | Python | tests/components/hint/test_hint.py | Crown-Commercial-Service/govuk-frontend-jinja | ddbe208a976ffa4ca330881c506c5200dfa69851 | [
"MIT"
] | 7 | 2019-09-25T13:59:35.000Z | 2021-06-30T11:13:22.000Z | tests/components/hint/test_hint.py | Crown-Commercial-Service/govuk-frontend-jinja | ddbe208a976ffa4ca330881c506c5200dfa69851 | [
"MIT"
] | 23 | 2019-08-20T10:52:49.000Z | 2021-06-02T14:21:16.000Z | tests/components/hint/test_hint.py | Crown-Commercial-Service/govuk-frontend-jinja | ddbe208a976ffa4ca330881c506c5200dfa69851 | [
"MIT"
] | 6 | 2019-08-29T14:02:25.000Z | 2021-04-10T20:20:23.000Z | def test_hint(env, template, expected):
template = env.from_string(template)
assert template.render() == expected
| 30.5 | 40 | 0.729508 | def test_hint(env, template, expected):
template = env.from_string(template)
assert template.render() == expected
| true | true |
1c3d19aca39c51b03373b7a4807c22d627923035 | 31 | py | Python | custom_components/metlocationforecast/__init__.py | toringer/home-assistant-metlocationforecast | aa336683aa944050d730b719cc691ef643c46bf9 | [
"MIT"
] | null | null | null | custom_components/metlocationforecast/__init__.py | toringer/home-assistant-metlocationforecast | aa336683aa944050d730b719cc691ef643c46bf9 | [
"MIT"
] | null | null | null | custom_components/metlocationforecast/__init__.py | toringer/home-assistant-metlocationforecast | aa336683aa944050d730b719cc691ef643c46bf9 | [
"MIT"
] | null | null | null | """met.no location forecast.""" | 31 | 31 | 0.677419 | true | true | |
1c3d19dd9cb90eb6fc36527ce8f8ba2b73de07a4 | 2,234 | py | Python | gameplay/Item.py | WilliamDASILVA/TheMysteryOfSchweitzer | f81edc2e202bd5009fc96ccfbbfcc40bc135a17a | [
"MIT"
] | null | null | null | gameplay/Item.py | WilliamDASILVA/TheMysteryOfSchweitzer | f81edc2e202bd5009fc96ccfbbfcc40bc135a17a | [
"MIT"
] | null | null | null | gameplay/Item.py | WilliamDASILVA/TheMysteryOfSchweitzer | f81edc2e202bd5009fc96ccfbbfcc40bc135a17a | [
"MIT"
] | null | null | null | from engine.render.image import Image;
class Item():
def __init__(self, name = None, description = None, iconPath = None):
self.setName(name);
self.setDescription(description);
self.functionsToCallWhenAction = [];
self.icon = None;
# icon
if iconPath != None:
drawable = Image(iconPath);
self.icon = drawable;
# --------------------------------------------------- *\
# [function] setName(name)
#
# * Set the item's name *
# Return : nil
# --------------------------------------------------- */
def setName(self, name):
self.name = name;
# --------------------------------------------------- *\
# [function] getName()
#
# * Return the item's name *
# Return : name
# --------------------------------------------------- */
def getName(self):
return self.name;
# --------------------------------------------------- *\
# [function] setDescription(description)
#
# * Set the item's description *
# Return : nil
# --------------------------------------------------- */
def setDescription(self, description):
self.description = description;
# --------------------------------------------------- *\
# [function] getDescription()
#
# * Return the description of the item *
# Return : description
# --------------------------------------------------- */
def getDescription(self):
return self.description;
# --------------------------------------------------- *\
# [function] getIcon()
#
# * Return the icon of the item *
# Return : drawable
# --------------------------------------------------- */
def getIcon(self):
return self.icon;
# --------------------------------------------------- *\
# [function] onSelection(functionToCall)
#
# * Function to throw when selection is made *
# Return : nil
# --------------------------------------------------- */
def onSelection(self, functionToCall):
self.functionsToCallWhenAction.append(functionToCall);
# --------------------------------------------------- *\
# [function] callFunction()
#
# * Call the associated function *
# Return : nil
# --------------------------------------------------- */
def callFunction(self):
for function in self.functionsToCallWhenAction:
function(); | 28.278481 | 70 | 0.432408 | from engine.render.image import Image;
class Item():
def __init__(self, name = None, description = None, iconPath = None):
self.setName(name);
self.setDescription(description);
self.functionsToCallWhenAction = [];
self.icon = None;
if iconPath != None:
drawable = Image(iconPath);
self.icon = drawable;
# Return : nil
# --------------------------------------------------- */
def setName(self, name):
self.name = name;
# --------------------------------------------------- *\
# [function] getName()
#
# * Return the item's name *
def getName(self):
return self.name;
# Return : nil
# --------------------------------------------------- */
def setDescription(self, description):
self.description = description;
# --------------------------------------------------- *\
# [function] getDescription()
#
# * Return the description of the item *
# Return : description
# --------------------------------------------------- */
def getDescription(self):
return self.description;
# --------------------------------------------------- *\
# [function] getIcon()
#
# * Return the icon of the item *
# Return : drawable
# --------------------------------------------------- */
def getIcon(self):
return self.icon;
# --------------------------------------------------- *\
# [function] onSelection(functionToCall)
#
# * Function to throw when selection is made *
# Return : nil
# --------------------------------------------------- */
def onSelection(self, functionToCall):
self.functionsToCallWhenAction.append(functionToCall);
# --------------------------------------------------- *\
# [function] callFunction()
#
# * Call the associated function *
# Return : nil
# --------------------------------------------------- */
def callFunction(self):
for function in self.functionsToCallWhenAction:
function(); | true | true |
1c3d1acc3757a398b6d132af41d24dff3cad0fb7 | 22,803 | py | Python | necrobot/match/matchroom.py | saturnin55/necrobot | 5f54634050939fbf9a0218260925d17713cd57ba | [
"MIT"
] | null | null | null | necrobot/match/matchroom.py | saturnin55/necrobot | 5f54634050939fbf9a0218260925d17713cd57ba | [
"MIT"
] | null | null | null | necrobot/match/matchroom.py | saturnin55/necrobot | 5f54634050939fbf9a0218260925d17713cd57ba | [
"MIT"
] | null | null | null | """Room for scheduling and running a "match", a series of games between a pair of racers."""
import asyncio
import datetime
import discord
import pytz
import typing
from necrobot.botbase import server
from necrobot.util import console
from necrobot.util import ordinal
from necrobot.util import timestr
from necrobot.race import cmd_race
from necrobot.match import cmd_match
from necrobot.test import cmd_test
from necrobot.user import cmd_user
from necrobot.database import ratingsdb, matchdb, racedb
from necrobot.ladder import ratingutil
from necrobot.race import raceinfo
from necrobot.botbase.botchannel import BotChannel
from necrobot.config import Config
from necrobot.match.match import Match
from necrobot.match.matchracedata import MatchRaceData
from necrobot.necroevent.necroevent import NEDispatch
from necrobot.race.raceconfig import RaceConfig
from necrobot.race.race import Race, RaceEvent
class MatchRoom(BotChannel):
def __init__(self, match_discord_channel: discord.Channel, match: Match):
"""BotChannel where a match is taking place.
Parameters
----------
match_discord_channel: discord.Channel
The discord channel corresponding to this BotChannel.
match: Match
The Match object for the match.
"""
BotChannel.__init__(self)
self._channel = match_discord_channel # type: discord.Channel
self._match = match # type: Match
self._current_race = None # type: Race
self._last_begun_race = None # type: Race
self._countdown_to_match_future = None # type: asyncio.Future
self._current_race_number = None # type: typing.Optional[int]
self._last_begun_race_number = None # type: typing.Optional[int]
self._current_race_contested = False # type: bool
self._match_race_data = None # type: typing.Optional[MatchRaceData]
self._prematch_channel_commands = [
cmd_match.Confirm(self),
cmd_match.GetMatchInfo(self),
cmd_match.Suggest(self),
cmd_match.Unconfirm(self),
cmd_match.ForceBegin(self),
cmd_match.ForceConfirm(self),
cmd_match.ForceReschedule(self),
cmd_match.Postpone(self),
cmd_match.RebootRoom(self),
cmd_match.SetMatchType(self),
cmd_match.Update(self),
cmd_test.TestMatch(self),
cmd_user.UserInfo(self),
]
self._during_match_channel_commands = [
cmd_match.CancelRace(self),
cmd_match.ChangeWinner(self),
cmd_match.Contest(self),
cmd_match.ForceNewRace(self),
cmd_match.ForceRecordRace(self),
cmd_match.GetMatchInfo(self),
cmd_match.Postpone(self),
cmd_match.RebootRoom(self),
cmd_match.SetMatchType(self),
cmd_match.Update(self),
cmd_race.Ready(self),
cmd_race.Unready(self),
cmd_race.Done(self),
cmd_race.Undone(self),
cmd_race.Time(self),
cmd_race.Pause(self),
cmd_race.Unpause(self),
cmd_race.Reseed(self),
cmd_race.ChangeRules(self),
cmd_test.TestMatch(self),
cmd_user.UserInfo(self),
]
self._postmatch_channel_commands = [
cmd_match.CancelRace(self),
cmd_match.ChangeWinner(self),
cmd_match.Contest(self),
cmd_match.ForceNewRace(self),
cmd_match.ForceRecordRace(self),
cmd_match.GetMatchInfo(self),
cmd_match.Postpone(self),
cmd_match.RebootRoom(self),
cmd_match.SetMatchType(self),
cmd_match.Update(self),
cmd_race.ChangeRules(self),
cmd_test.TestMatch(self),
cmd_user.UserInfo(self),
]
self.channel_commands = self._prematch_channel_commands
# Properties
@property
def channel(self) -> discord.Channel:
return self._channel
@property
def match(self) -> Match:
return self._match
@property
def current_race(self) -> typing.Optional[Race]:
"""The "main" Race; the one that most commands should apply to. Not None if self.before_races is False."""
return self._current_race
@property
def last_begun_race(self) -> typing.Optional[Race]:
"""The last race to begin (sent a RaceEvent.RACE_BEGIN to this room). Useful for allowing commands to apply
to a finished race during the ready-up phase of the subsequent race.
"""
return self._last_begun_race
@property
def played_all_races(self) -> bool:
"""True if the match is over."""
if self._match_race_data is None:
return False
if self.match.is_best_of:
return self._match_race_data.leader_wins > self.match.number_of_races // 2
else:
return self._match_race_data.num_finished >= self.match.number_of_races
async def during_races(self) -> bool:
"""True if the match has started but not finished."""
return self.current_race is not None and not self.played_all_races
async def contest_last_begun_race(self) -> None:
"""Mark the last begun race as contested."""
if self._last_begun_race is not None and not self._last_begun_race.final:
self._current_race_contested = True
return
if self._last_begun_race_number == 0:
return
contest_race_number = self._last_begun_race_number
await matchdb.set_match_race_contested(
match=self.match,
race_number=contest_race_number,
contested=True
)
async def initialize(self) -> None:
"""Async initialization method"""
if self._countdown_to_match_future is not None:
self._countdown_to_match_future.cancel()
self._countdown_to_match_future = asyncio.ensure_future(self._countdown_to_match_start(warn=True))
self._match_race_data = await matchdb.get_match_race_data(self.match.match_id)
self._current_race_number = self._match_race_data.num_finished + self._match_race_data.num_canceled
self._last_begun_race_number = self._current_race_number
self._set_channel_commands()
async def send_channel_start_text(self) -> None:
msg = '\n \N{BULLET} To suggest a time, use `.suggest`. (See `.help suggest` for more info.) Give the time ' \
'in your own local timezone (which you\'ve registered using `.timezone`).\n' \
'\N{BULLET} Confirm a suggested time with `.confirm`. You may remove a confirmation with ' \
'`.unconfirm`.\n' \
'\N{BULLET} To reschedule a time both racers have confirmed, both racers must call `.unconfirm`.\n' \
'\N{BULLET} You may alert CoNDOR staff at any time by calling `.staff`.\n'
if self.match.racer_1.timezone is not None and self.match.racer_2.timezone is not None:
utcnow = pytz.utc.localize(datetime.datetime.utcnow())
r1off = utcnow.astimezone(self.match.racer_1.timezone).utcoffset()
r2off = utcnow.astimezone(self.match.racer_2.timezone).utcoffset()
if r1off > r2off:
ahead_racer_name = self.match.racer_1.display_name
behind_racer_name = self.match.racer_2.display_name
diff_str = timestr.timedelta_to_str(r1off - r2off)
# noinspection PyUnresolvedReferences
msg += '\N{BULLET} {0} is currently {1} ahead of {2}.\n'.format(
ahead_racer_name, diff_str, behind_racer_name
)
elif r1off < r2off:
ahead_racer_name = self.match.racer_2.display_name
behind_racer_name = self.match.racer_1.display_name
diff_str = timestr.timedelta_to_str(r2off - r1off)
# noinspection PyUnresolvedReferences
msg += '\N{BULLET} {0} is currently {1} ahead of {2}.\n'.format(
ahead_racer_name, diff_str, behind_racer_name
)
else:
# noinspection PyUnresolvedReferences
msg += '\N{BULLET} The two racers in this match currently have the same UTC offset.\n'
else:
if self.match.racer_1.timezone is None and self.match.racer_2.timezone is not None:
# noinspection PyUnresolvedReferences
msg += '\N{BULLET} {0} has not registered a timezone. Please call `.timezone`.\n'.format(
self.match.racer_1.display_name
)
elif self.match.racer_1.timezone is not None and self.match.racer_2.timezone is None:
# noinspection PyUnresolvedReferences
msg += '\N{BULLET} {0} has not registered a timezone. Please call `.timezone`.\n'.format(
self.match.racer_2.display_name
)
else:
# noinspection PyUnresolvedReferences
msg += '\N{BULLET} {0} and {1} have not registered a timezone. Please call `.timezone`.\n'.format(
self.match.racer_1.display_name,
self.match.racer_2.display_name
)
# noinspection PyUnresolvedReferences
msg += '\N{BULLET} This match is a {0}.'.format(self.match.format_str)
await self.client.send_message(self.channel, msg)
async def update(self) -> None:
if self.match.is_scheduled and self.current_race is None:
if self._countdown_to_match_future is not None:
self._countdown_to_match_future.cancel()
self._countdown_to_match_future = asyncio.ensure_future(self._countdown_to_match_start())
elif not self.match.is_scheduled:
if self._countdown_to_match_future is not None:
self._countdown_to_match_future.cancel()
self._current_race = None
self._set_channel_commands()
if self.played_all_races:
self._end_match()
async def change_race_info(self, command_args: list) -> None:
"""Change the RaceInfo for this room by parsing the input args"""
new_race_info = raceinfo.parse_args_modify(
command_args,
raceinfo.RaceInfo.copy(self.match.race_info)
)
if new_race_info:
self.match.set_race_info(new_race_info)
if self.current_race.before_race:
self.current_race.race_info = raceinfo.RaceInfo.copy(self.match.race_info)
await self.write('Changed rules for the next race.')
await self.update()
async def process(self, race_event: RaceEvent) -> None:
"""Process a RaceEvent"""
if race_event.event == RaceEvent.EventType.RACE_BEGIN:
self._last_begun_race = self._current_race
self._last_begun_race_number = self._current_race_number
elif race_event.event == RaceEvent.EventType.RACE_BEGIN_COUNTDOWN:
await NEDispatch().publish(event_type='begin_match_race', match=self.match)
elif race_event.event == RaceEvent.EventType.RACE_END:
await asyncio.sleep(1) # Waiting for a short time feels good UI-wise
await self.write('The race will end in {} seconds.'.format(self.current_race.race_config.finalize_time_sec))
elif race_event.event == RaceEvent.EventType.RACE_FINALIZE:
await NEDispatch().publish(event_type='end_match_race', match=self.match)
race_winner = race_event.race.racers[0]
race_loser = race_event.race.racers[1]
auto_contest = (
race_winner.is_finished
and race_loser.is_finished
and race_loser.time - race_winner.time <= Config.MATCH_AUTOCONTEST_IF_WITHIN_HUNDREDTHS
)
if auto_contest:
self._current_race_contested = True
await NEDispatch().publish(
'notify',
message='A race has been automatically contested in channel {0}, because the finish times were '
'close.'.format(self.channel.mention)
)
await self._record_race(race_event.race, self._race_winner(race_event.race))
# await self._record_new_ratings(race_winner)
# Write end-of-race message
end_race_msg = 'The race has ended.'
if auto_contest:
if server.staff_role is not None:
end_race_msg += ' {0}:'.format(server.staff_role.mention)
end_race_msg += ' This match has been automatically marked as contested because the finish times ' \
'were close.'
await self.write(end_race_msg)
# Begin a new race if appropriate, or end the match.
if self.played_all_races:
await self._end_match()
else:
await self._begin_new_race()
elif race_event.event == RaceEvent.EventType.RACE_CANCEL:
await self.write('The race has been canceled.')
if not self.played_all_races:
await self._begin_new_race()
async def write(self, text: str) -> None:
"""Write text to the channel"""
await self.client.send_message(self.channel, text)
async def alert_racers(self) -> None:
"""Post an alert pinging both racers in the match"""
member_1 = self.match.racer_1.member
member_2 = self.match.racer_2.member
alert_str = ''
if member_1 is not None:
alert_str += member_1.mention + ', '
if member_2 is not None:
alert_str += member_2.mention + ', '
if alert_str:
minutes_until_match = int((self.match.time_until_match.total_seconds() + 30) // 60)
await self.write('{0}: The match is scheduled to begin in {1} minutes.'.format(
alert_str[:-2], int(minutes_until_match))
)
async def force_new_race(self) -> None:
"""Begin a new race, canceling the old one if necessary"""
if self.current_race is not None and not self.current_race.complete:
await self.current_race.cancel()
# Only directly call begin_new_race if cancelling the old one did not begin a new one already
if self.current_race is None or self.current_race.complete:
await self._begin_new_race()
async def cancel_race(self, race_number: int) -> bool:
"""Mark a race as canceled
Parameters
----------
race_number: int
The number of the race to cancel, counting only uncanceled races.
"""
race_number = race_number - self._match_race_data.num_canceled
success = await matchdb.cancel_race(self.match, race_number)
if success:
self._match_race_data.num_finished -= 1
self._match_race_data.num_canceled += 1
return success
async def force_record_race(self, winner: int) -> None:
"""Record a "fake" race with the given winner"""
await matchdb.record_match_race(
match=self.match,
winner=winner
)
self._update_race_data(race_winner=winner)
async def _countdown_to_match_start(self, warn: bool = False) -> None:
"""Does things at certain times before the match
Posts alerts to racers in this channel, and sends NecroEvents at alert times. Begins the match
at the appropriate time. This is stored as a future in this object, and is meant to be canceled
if this object closes.
"""
try:
if not self.match.is_scheduled:
return
time_until_match = self.match.time_until_match
# Begin match now if appropriate
if time_until_match < datetime.timedelta(seconds=0):
if not self.played_all_races:
if warn:
await self.write(
'I believe that I was just restarted; an error may have occurred. I am '
'beginning a new race and attempting to pick up this match where we left '
'off. If this is an error, or if there are unrecorded races, please contact '
'an admin.')
await self._begin_new_race()
return
# Wait until the first warning
if time_until_match > Config.MATCH_FIRST_WARNING:
await asyncio.sleep((time_until_match - Config.MATCH_FIRST_WARNING).total_seconds())
await self.alert_racers()
await NEDispatch().publish('match_alert', match=self.match, final=False)
# Wait until the final warning
time_until_match = self.match.time_until_match
if time_until_match > Config.MATCH_FINAL_WARNING:
await asyncio.sleep((time_until_match - Config.MATCH_FINAL_WARNING).total_seconds())
# At this time, we've either just passed the FINAL_MATCH_WARNING or the function was just called
# (happens if the call comes sometime after the FINAL_MATCH_WARNING but before the match).
await self.alert_racers()
await NEDispatch().publish('match_alert', match=self.match, final=True)
await asyncio.sleep(self.match.time_until_match.total_seconds())
await self._begin_new_race()
except asyncio.CancelledError:
console.info('MatchRoom._countdown_to_match_start() was cancelled.')
raise
async def _begin_new_race(self):
"""Begin a new race"""
# Shift to during-match commands
self.channel_commands = self._during_match_channel_commands
# Make the race
match_race_data = await matchdb.get_match_race_data(self.match.match_id)
self._current_race = Race(self, self.match.race_info,
race_config=RaceConfig(finalize_time_sec=15, auto_forfeit=1))
self._current_race_number = match_race_data.num_races + 1
await self._current_race.initialize()
# Enter the racers automatically
for racer in self.match.racers:
await self.current_race.enter_member(racer.member, mute=True)
# Output text
await self.write(
'Please input the seed ({1}) and type `.ready` when you are ready for the {0} race. '
'When both racers `.ready`, the race will begin.'.format(
ordinal.num_to_text(match_race_data.num_finished + 1),
self.current_race.race_info.seed))
if self._countdown_to_match_future is not None:
self._countdown_to_match_future.cancel()
async def _end_match(self):
"""End the match"""
self._current_race = None
self.channel_commands = self._postmatch_channel_commands
# Send event
if self._match_race_data.r1_wins > self._match_race_data.r2_wins:
winner = self.match.racer_1
winner_wins = self._match_race_data.r1_wins
loser_wins = self._match_race_data.r2_wins
elif self._match_race_data.r2_wins > self._match_race_data.r1_wins:
winner = self.match.racer_2
winner_wins = self._match_race_data.r2_wins
loser_wins = self._match_race_data.r1_wins
else:
winner = '[Tied]'
winner_wins = self._match_race_data.r1_wins
loser_wins = self._match_race_data.r2_wins
self.match.set_finish_time(pytz.utc.localize(datetime.datetime.utcnow()))
await NEDispatch().publish(
'end_match',
match=self.match,
winner=winner,
winner_wins=winner_wins,
loser_wins=loser_wins,
r1_wins=self._match_race_data.r1_wins,
r2_wins=self._match_race_data.r2_wins
)
await self.write('Match complete.')
async def _record_race(self, race: Race, race_winner: int) -> None:
"""Record the given race as part of this match"""
await racedb.record_race(race)
await matchdb.record_match_race(
match=self.match,
race_number=self._current_race_number,
race_id=self.current_race.race_id,
winner=race_winner,
contested=self._current_race_contested,
canceled=False
)
self._update_race_data(race_winner=race_winner)
async def _record_new_ratings(self, race_winner: int) -> None:
"""Get new ratings for the racers in this match and record them"""
racer_1 = self.match.racer_1
racer_2 = self.match.racer_2
rating_1 = await ratingsdb.get_rating(racer_1.discord_id)
rating_2 = await ratingsdb.get_rating(racer_2.discord_id)
new_ratings = ratingutil.get_new_ratings(rating_1=rating_1, rating_2=rating_2, winner=race_winner)
await ratingsdb.set_rating(racer_1.discord_id, new_ratings[0])
await ratingsdb.set_rating(racer_2.discord_id, new_ratings[1])
# this isn't working
# if Config.RATINGS_IN_NICKNAMES:
# for pair in [(racer_1, rating_1,), (racer_2, rating_2,)]:
# member = pair[0].member
# nick = '{0} ({1})'.format(pair[0].member.name, pair[1].displayed_rating)
# await self.client.change_nickname(member=member, nickname=nick)
def _set_channel_commands(self) -> None:
if self.current_race is None:
if self.played_all_races:
self.channel_commands = self._postmatch_channel_commands
else:
self.channel_commands = self._prematch_channel_commands
else:
self.channel_commands = self._during_match_channel_commands
def _race_winner(self, race: Race) -> int:
"""Get the number of the race's winner (1 or 2, for match.racer_1 or match.racer_2)"""
race_winner_id = int(race.winner.member.id)
if race_winner_id == int(self.match.racer_1.member.id):
return 1
elif race_winner_id == int(self.match.racer_2.member.id):
return 2
else:
return 0
def _update_race_data(self, race_winner: int) -> None:
"""Update this object's MatchRaceData"""
self._match_race_data.num_finished += 1
if race_winner == 1:
self._match_race_data.r1_wins += 1
else:
self._match_race_data.r2_wins += 1
| 42.071956 | 120 | 0.627944 |
import asyncio
import datetime
import discord
import pytz
import typing
from necrobot.botbase import server
from necrobot.util import console
from necrobot.util import ordinal
from necrobot.util import timestr
from necrobot.race import cmd_race
from necrobot.match import cmd_match
from necrobot.test import cmd_test
from necrobot.user import cmd_user
from necrobot.database import ratingsdb, matchdb, racedb
from necrobot.ladder import ratingutil
from necrobot.race import raceinfo
from necrobot.botbase.botchannel import BotChannel
from necrobot.config import Config
from necrobot.match.match import Match
from necrobot.match.matchracedata import MatchRaceData
from necrobot.necroevent.necroevent import NEDispatch
from necrobot.race.raceconfig import RaceConfig
from necrobot.race.race import Race, RaceEvent
class MatchRoom(BotChannel):
def __init__(self, match_discord_channel: discord.Channel, match: Match):
BotChannel.__init__(self)
self._channel = match_discord_channel
self._match = match
self._current_race = None
self._last_begun_race = None
self._countdown_to_match_future = None
self._current_race_number = None
self._last_begun_race_number = None
self._current_race_contested = False
self._match_race_data = None
self._prematch_channel_commands = [
cmd_match.Confirm(self),
cmd_match.GetMatchInfo(self),
cmd_match.Suggest(self),
cmd_match.Unconfirm(self),
cmd_match.ForceBegin(self),
cmd_match.ForceConfirm(self),
cmd_match.ForceReschedule(self),
cmd_match.Postpone(self),
cmd_match.RebootRoom(self),
cmd_match.SetMatchType(self),
cmd_match.Update(self),
cmd_test.TestMatch(self),
cmd_user.UserInfo(self),
]
self._during_match_channel_commands = [
cmd_match.CancelRace(self),
cmd_match.ChangeWinner(self),
cmd_match.Contest(self),
cmd_match.ForceNewRace(self),
cmd_match.ForceRecordRace(self),
cmd_match.GetMatchInfo(self),
cmd_match.Postpone(self),
cmd_match.RebootRoom(self),
cmd_match.SetMatchType(self),
cmd_match.Update(self),
cmd_race.Ready(self),
cmd_race.Unready(self),
cmd_race.Done(self),
cmd_race.Undone(self),
cmd_race.Time(self),
cmd_race.Pause(self),
cmd_race.Unpause(self),
cmd_race.Reseed(self),
cmd_race.ChangeRules(self),
cmd_test.TestMatch(self),
cmd_user.UserInfo(self),
]
self._postmatch_channel_commands = [
cmd_match.CancelRace(self),
cmd_match.ChangeWinner(self),
cmd_match.Contest(self),
cmd_match.ForceNewRace(self),
cmd_match.ForceRecordRace(self),
cmd_match.GetMatchInfo(self),
cmd_match.Postpone(self),
cmd_match.RebootRoom(self),
cmd_match.SetMatchType(self),
cmd_match.Update(self),
cmd_race.ChangeRules(self),
cmd_test.TestMatch(self),
cmd_user.UserInfo(self),
]
self.channel_commands = self._prematch_channel_commands
@property
def channel(self) -> discord.Channel:
return self._channel
@property
def match(self) -> Match:
return self._match
@property
def current_race(self) -> typing.Optional[Race]:
return self._current_race
@property
def last_begun_race(self) -> typing.Optional[Race]:
return self._last_begun_race
@property
def played_all_races(self) -> bool:
if self._match_race_data is None:
return False
if self.match.is_best_of:
return self._match_race_data.leader_wins > self.match.number_of_races // 2
else:
return self._match_race_data.num_finished >= self.match.number_of_races
async def during_races(self) -> bool:
return self.current_race is not None and not self.played_all_races
async def contest_last_begun_race(self) -> None:
if self._last_begun_race is not None and not self._last_begun_race.final:
self._current_race_contested = True
return
if self._last_begun_race_number == 0:
return
contest_race_number = self._last_begun_race_number
await matchdb.set_match_race_contested(
match=self.match,
race_number=contest_race_number,
contested=True
)
async def initialize(self) -> None:
if self._countdown_to_match_future is not None:
self._countdown_to_match_future.cancel()
self._countdown_to_match_future = asyncio.ensure_future(self._countdown_to_match_start(warn=True))
self._match_race_data = await matchdb.get_match_race_data(self.match.match_id)
self._current_race_number = self._match_race_data.num_finished + self._match_race_data.num_canceled
self._last_begun_race_number = self._current_race_number
self._set_channel_commands()
async def send_channel_start_text(self) -> None:
msg = '\n \N{BULLET} To suggest a time, use `.suggest`. (See `.help suggest` for more info.) Give the time ' \
'in your own local timezone (which you\'ve registered using `.timezone`).\n' \
'\N{BULLET} Confirm a suggested time with `.confirm`. You may remove a confirmation with ' \
'`.unconfirm`.\n' \
'\N{BULLET} To reschedule a time both racers have confirmed, both racers must call `.unconfirm`.\n' \
'\N{BULLET} You may alert CoNDOR staff at any time by calling `.staff`.\n'
if self.match.racer_1.timezone is not None and self.match.racer_2.timezone is not None:
utcnow = pytz.utc.localize(datetime.datetime.utcnow())
r1off = utcnow.astimezone(self.match.racer_1.timezone).utcoffset()
r2off = utcnow.astimezone(self.match.racer_2.timezone).utcoffset()
if r1off > r2off:
ahead_racer_name = self.match.racer_1.display_name
behind_racer_name = self.match.racer_2.display_name
diff_str = timestr.timedelta_to_str(r1off - r2off)
# noinspection PyUnresolvedReferences
msg += '\N{BULLET} {0} is currently {1} ahead of {2}.\n'.format(
ahead_racer_name, diff_str, behind_racer_name
)
elif r1off < r2off:
ahead_racer_name = self.match.racer_2.display_name
behind_racer_name = self.match.racer_1.display_name
diff_str = timestr.timedelta_to_str(r2off - r1off)
# noinspection PyUnresolvedReferences
msg += '\N{BULLET} {0} is currently {1} ahead of {2}.\n'.format(
ahead_racer_name, diff_str, behind_racer_name
)
else:
# noinspection PyUnresolvedReferences
msg += '\N{BULLET} The two racers in this match currently have the same UTC offset.\n'
else:
if self.match.racer_1.timezone is None and self.match.racer_2.timezone is not None:
# noinspection PyUnresolvedReferences
msg += '\N{BULLET} {0} has not registered a timezone. Please call `.timezone`.\n'.format(
self.match.racer_1.display_name
)
elif self.match.racer_1.timezone is not None and self.match.racer_2.timezone is None:
# noinspection PyUnresolvedReferences
msg += '\N{BULLET} {0} has not registered a timezone. Please call `.timezone`.\n'.format(
self.match.racer_2.display_name
)
else:
# noinspection PyUnresolvedReferences
msg += '\N{BULLET} {0} and {1} have not registered a timezone. Please call `.timezone`.\n'.format(
self.match.racer_1.display_name,
self.match.racer_2.display_name
)
# noinspection PyUnresolvedReferences
msg += '\N{BULLET} This match is a {0}.'.format(self.match.format_str)
await self.client.send_message(self.channel, msg)
async def update(self) -> None:
if self.match.is_scheduled and self.current_race is None:
if self._countdown_to_match_future is not None:
self._countdown_to_match_future.cancel()
self._countdown_to_match_future = asyncio.ensure_future(self._countdown_to_match_start())
elif not self.match.is_scheduled:
if self._countdown_to_match_future is not None:
self._countdown_to_match_future.cancel()
self._current_race = None
self._set_channel_commands()
if self.played_all_races:
self._end_match()
async def change_race_info(self, command_args: list) -> None:
new_race_info = raceinfo.parse_args_modify(
command_args,
raceinfo.RaceInfo.copy(self.match.race_info)
)
if new_race_info:
self.match.set_race_info(new_race_info)
if self.current_race.before_race:
self.current_race.race_info = raceinfo.RaceInfo.copy(self.match.race_info)
await self.write('Changed rules for the next race.')
await self.update()
async def process(self, race_event: RaceEvent) -> None:
if race_event.event == RaceEvent.EventType.RACE_BEGIN:
self._last_begun_race = self._current_race
self._last_begun_race_number = self._current_race_number
elif race_event.event == RaceEvent.EventType.RACE_BEGIN_COUNTDOWN:
await NEDispatch().publish(event_type='begin_match_race', match=self.match)
elif race_event.event == RaceEvent.EventType.RACE_END:
await asyncio.sleep(1) # Waiting for a short time feels good UI-wise
await self.write('The race will end in {} seconds.'.format(self.current_race.race_config.finalize_time_sec))
elif race_event.event == RaceEvent.EventType.RACE_FINALIZE:
await NEDispatch().publish(event_type='end_match_race', match=self.match)
race_winner = race_event.race.racers[0]
race_loser = race_event.race.racers[1]
auto_contest = (
race_winner.is_finished
and race_loser.is_finished
and race_loser.time - race_winner.time <= Config.MATCH_AUTOCONTEST_IF_WITHIN_HUNDREDTHS
)
if auto_contest:
self._current_race_contested = True
await NEDispatch().publish(
'notify',
message='A race has been automatically contested in channel {0}, because the finish times were '
'close.'.format(self.channel.mention)
)
await self._record_race(race_event.race, self._race_winner(race_event.race))
# await self._record_new_ratings(race_winner)
# Write end-of-race message
end_race_msg = 'The race has ended.'
if auto_contest:
if server.staff_role is not None:
end_race_msg += ' {0}:'.format(server.staff_role.mention)
end_race_msg += ' This match has been automatically marked as contested because the finish times ' \
'were close.'
await self.write(end_race_msg)
# Begin a new race if appropriate, or end the match.
if self.played_all_races:
await self._end_match()
else:
await self._begin_new_race()
elif race_event.event == RaceEvent.EventType.RACE_CANCEL:
await self.write('The race has been canceled.')
if not self.played_all_races:
await self._begin_new_race()
async def write(self, text: str) -> None:
await self.client.send_message(self.channel, text)
async def alert_racers(self) -> None:
member_1 = self.match.racer_1.member
member_2 = self.match.racer_2.member
alert_str = ''
if member_1 is not None:
alert_str += member_1.mention + ', '
if member_2 is not None:
alert_str += member_2.mention + ', '
if alert_str:
minutes_until_match = int((self.match.time_until_match.total_seconds() + 30) // 60)
await self.write('{0}: The match is scheduled to begin in {1} minutes.'.format(
alert_str[:-2], int(minutes_until_match))
)
async def force_new_race(self) -> None:
if self.current_race is not None and not self.current_race.complete:
await self.current_race.cancel()
# Only directly call begin_new_race if cancelling the old one did not begin a new one already
if self.current_race is None or self.current_race.complete:
await self._begin_new_race()
async def cancel_race(self, race_number: int) -> bool:
race_number = race_number - self._match_race_data.num_canceled
success = await matchdb.cancel_race(self.match, race_number)
if success:
self._match_race_data.num_finished -= 1
self._match_race_data.num_canceled += 1
return success
async def force_record_race(self, winner: int) -> None:
await matchdb.record_match_race(
match=self.match,
winner=winner
)
self._update_race_data(race_winner=winner)
async def _countdown_to_match_start(self, warn: bool = False) -> None:
try:
if not self.match.is_scheduled:
return
time_until_match = self.match.time_until_match
# Begin match now if appropriate
if time_until_match < datetime.timedelta(seconds=0):
if not self.played_all_races:
if warn:
await self.write(
'I believe that I was just restarted; an error may have occurred. I am '
'beginning a new race and attempting to pick up this match where we left '
'off. If this is an error, or if there are unrecorded races, please contact '
'an admin.')
await self._begin_new_race()
return
# Wait until the first warning
if time_until_match > Config.MATCH_FIRST_WARNING:
await asyncio.sleep((time_until_match - Config.MATCH_FIRST_WARNING).total_seconds())
await self.alert_racers()
await NEDispatch().publish('match_alert', match=self.match, final=False)
# Wait until the final warning
time_until_match = self.match.time_until_match
if time_until_match > Config.MATCH_FINAL_WARNING:
await asyncio.sleep((time_until_match - Config.MATCH_FINAL_WARNING).total_seconds())
# At this time, we've either just passed the FINAL_MATCH_WARNING or the function was just called
await self.alert_racers()
await NEDispatch().publish('match_alert', match=self.match, final=True)
await asyncio.sleep(self.match.time_until_match.total_seconds())
await self._begin_new_race()
except asyncio.CancelledError:
console.info('MatchRoom._countdown_to_match_start() was cancelled.')
raise
async def _begin_new_race(self):
self.channel_commands = self._during_match_channel_commands
match_race_data = await matchdb.get_match_race_data(self.match.match_id)
self._current_race = Race(self, self.match.race_info,
race_config=RaceConfig(finalize_time_sec=15, auto_forfeit=1))
self._current_race_number = match_race_data.num_races + 1
await self._current_race.initialize()
for racer in self.match.racers:
await self.current_race.enter_member(racer.member, mute=True)
await self.write(
'Please input the seed ({1}) and type `.ready` when you are ready for the {0} race. '
'When both racers `.ready`, the race will begin.'.format(
ordinal.num_to_text(match_race_data.num_finished + 1),
self.current_race.race_info.seed))
if self._countdown_to_match_future is not None:
self._countdown_to_match_future.cancel()
async def _end_match(self):
self._current_race = None
self.channel_commands = self._postmatch_channel_commands
if self._match_race_data.r1_wins > self._match_race_data.r2_wins:
winner = self.match.racer_1
winner_wins = self._match_race_data.r1_wins
loser_wins = self._match_race_data.r2_wins
elif self._match_race_data.r2_wins > self._match_race_data.r1_wins:
winner = self.match.racer_2
winner_wins = self._match_race_data.r2_wins
loser_wins = self._match_race_data.r1_wins
else:
winner = '[Tied]'
winner_wins = self._match_race_data.r1_wins
loser_wins = self._match_race_data.r2_wins
self.match.set_finish_time(pytz.utc.localize(datetime.datetime.utcnow()))
await NEDispatch().publish(
'end_match',
match=self.match,
winner=winner,
winner_wins=winner_wins,
loser_wins=loser_wins,
r1_wins=self._match_race_data.r1_wins,
r2_wins=self._match_race_data.r2_wins
)
await self.write('Match complete.')
async def _record_race(self, race: Race, race_winner: int) -> None:
await racedb.record_race(race)
await matchdb.record_match_race(
match=self.match,
race_number=self._current_race_number,
race_id=self.current_race.race_id,
winner=race_winner,
contested=self._current_race_contested,
canceled=False
)
self._update_race_data(race_winner=race_winner)
async def _record_new_ratings(self, race_winner: int) -> None:
racer_1 = self.match.racer_1
racer_2 = self.match.racer_2
rating_1 = await ratingsdb.get_rating(racer_1.discord_id)
rating_2 = await ratingsdb.get_rating(racer_2.discord_id)
new_ratings = ratingutil.get_new_ratings(rating_1=rating_1, rating_2=rating_2, winner=race_winner)
await ratingsdb.set_rating(racer_1.discord_id, new_ratings[0])
await ratingsdb.set_rating(racer_2.discord_id, new_ratings[1])
# if Config.RATINGS_IN_NICKNAMES:
# for pair in [(racer_1, rating_1,), (racer_2, rating_2,)]:
# member = pair[0].member
# nick = '{0} ({1})'.format(pair[0].member.name, pair[1].displayed_rating)
# await self.client.change_nickname(member=member, nickname=nick)
def _set_channel_commands(self) -> None:
if self.current_race is None:
if self.played_all_races:
self.channel_commands = self._postmatch_channel_commands
else:
self.channel_commands = self._prematch_channel_commands
else:
self.channel_commands = self._during_match_channel_commands
def _race_winner(self, race: Race) -> int:
race_winner_id = int(race.winner.member.id)
if race_winner_id == int(self.match.racer_1.member.id):
return 1
elif race_winner_id == int(self.match.racer_2.member.id):
return 2
else:
return 0
def _update_race_data(self, race_winner: int) -> None:
self._match_race_data.num_finished += 1
if race_winner == 1:
self._match_race_data.r1_wins += 1
else:
self._match_race_data.r2_wins += 1
| true | true |
1c3d1b6042a8dc94a843134c38cb9f370cffab76 | 863 | py | Python | DQMServices/Diagnostic/scripts/Database/Python/PopulateDB.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 3 | 2018-08-24T19:10:26.000Z | 2019-02-19T11:45:32.000Z | DQMServices/Diagnostic/scripts/Database/Python/PopulateDB.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 7 | 2016-07-17T02:34:54.000Z | 2019-08-13T07:58:37.000Z | DQMServices/Diagnostic/scripts/Database/Python/PopulateDB.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 5 | 2018-08-21T16:37:52.000Z | 2020-01-09T13:33:17.000Z | from __future__ import print_function
import os
class PopulateDB:
def run(self):
os.system("cat "+self.TemplatesDir+"/template_"+self.DetName+"HistoryDQMService_cfg.py | sed -e \"s@RUNNUMBER@"+self.RunNumber+"@g\" -e \"s@FILENAME@"+self.FileName+"@\" -e \"s@TAGNAME@"+self.TagName+"@g\" -e \"s@DATABASE@"+self.Database+"@\" -e \"s@AUTHENTICATIONPATH@"+self.AuthenticationPath+"@\" > "+self.Dir+"Run_"+self.DetName+"_"+self.RunNumber+".py")
print("cd "+self.CMSSW_Version+"; eval `scramv1 r -sh`; cd "+self.Dir+"; cmsRun "+self.Dir+"Run_"+self.DetName+"_"+self.RunNumber+".py > "+self.Dir+"Run_"+self.DetName+"_"+self.RunNumber+".log")
os.system("cd "+self.CMSSW_Version+"; eval `scramv1 r -sh`; cd "+self.Dir+"; cmsRun "+self.Dir+"Run_"+self.DetName+"_"+self.RunNumber+".py > "+self.Dir+"Run_"+self.DetName+"_"+self.RunNumber+".log")
| 95.888889 | 366 | 0.663963 | from __future__ import print_function
import os
class PopulateDB:
def run(self):
os.system("cat "+self.TemplatesDir+"/template_"+self.DetName+"HistoryDQMService_cfg.py | sed -e \"s@RUNNUMBER@"+self.RunNumber+"@g\" -e \"s@FILENAME@"+self.FileName+"@\" -e \"s@TAGNAME@"+self.TagName+"@g\" -e \"s@DATABASE@"+self.Database+"@\" -e \"s@AUTHENTICATIONPATH@"+self.AuthenticationPath+"@\" > "+self.Dir+"Run_"+self.DetName+"_"+self.RunNumber+".py")
print("cd "+self.CMSSW_Version+"; eval `scramv1 r -sh`; cd "+self.Dir+"; cmsRun "+self.Dir+"Run_"+self.DetName+"_"+self.RunNumber+".py > "+self.Dir+"Run_"+self.DetName+"_"+self.RunNumber+".log")
os.system("cd "+self.CMSSW_Version+"; eval `scramv1 r -sh`; cd "+self.Dir+"; cmsRun "+self.Dir+"Run_"+self.DetName+"_"+self.RunNumber+".py > "+self.Dir+"Run_"+self.DetName+"_"+self.RunNumber+".log")
| true | true |
1c3d1c0cd26edabf48602a9f3c44aea4a3e97f8a | 7,275 | py | Python | AICamera/app/src/main/cpp/caffe2/python/layers/batch_distill_lr_loss.py | blackxer/AICamera | 4f0a6a09a2288da2ec7140744b5c2862df114c78 | [
"MIT"
] | 1 | 2020-01-10T02:56:03.000Z | 2020-01-10T02:56:03.000Z | AICamera/app/src/main/cpp/caffe2/python/layers/batch_distill_lr_loss.py | blackxer/AICamera | 4f0a6a09a2288da2ec7140744b5c2862df114c78 | [
"MIT"
] | null | null | null | AICamera/app/src/main/cpp/caffe2/python/layers/batch_distill_lr_loss.py | blackxer/AICamera | 4f0a6a09a2288da2ec7140744b5c2862df114c78 | [
"MIT"
] | null | null | null | ## @package batch_distill_lr_loss
# Module caffe2.python.layers.batch_distill_lr_loss
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
ModelLayer,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class BatchDistillLRLoss(ModelLayer):
def __init__(
self, model, input_record,
name='batch_distill_lr_loss', teacher_weight=0.0,
filter_invalid_teacher_label=False, **kwargs):
super(BatchDistillLRLoss, self).__init__(model, name, input_record, **kwargs)
assert teacher_weight >= 0 and teacher_weight <= 1, (
'teacher_weight=%0.2f should be in [0, 1]' % teacher_weight
)
self._teacher_weight = teacher_weight
self._filter_invalid_teacher_label = filter_invalid_teacher_label
# hyper-parameter determines whether to filter out bad teacehr labels,
# i.e., teacher labels that are zero.
if self._filter_invalid_teacher_label:
self.threshold = model.add_global_constant(
str(model.net.NextScopedBlob('threshold')),
[0.0], # threshold for filtering teacher weight.
dtype=np.float
)
self.neg_ONE = model.add_global_constant(
str(model.net.NextScopedBlob('neg_ONE')),
[-1.0],
dtype=np.float
)
self.ONE = model._GetOne()
assert schema.is_schema_subset(
schema.Struct(
('teacher_label', schema.Scalar()),
('label', schema.Scalar()),
('logit', schema.Scalar()),
),
input_record
)
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
self.output_schema = schema.Scalar(
np.float32,
self.get_next_blob_reference('output')
)
def add_ops(self, net):
label = self.input_record.label()
if self.input_record.label.field_type() != np.float32:
label = net.Cast(
label,
net.NextScopedBlob('float_label'),
to=core.DataType.FLOAT,
)
# Assuming 1-D input
label = net.ExpandDims(label, net.NextScopedBlob('expanded_label'),
dims=[1])
teacher_label = self.input_record.teacher_label()
if self.input_record.teacher_label.field_type() != np.float32:
teacher_label = net.Cast(
teacher_label,
net.NextScopedBlob('float_teacher_label'),
to=core.DataType.FLOAT,
)
teacher_label = net.ExpandDims(
teacher_label, net.NextScopedBlob('expanded_teacher_label'),
dims=[1])
true_xent = net.SigmoidCrossEntropyWithLogits(
[self.input_record.logit(), label],
net.NextScopedBlob('cross_entropy')
)
teacher_xent = net.SigmoidCrossEntropyWithLogits(
[self.input_record.logit(), teacher_label],
net.NextScopedBlob('teacher_cross_entropy')
)
if self._filter_invalid_teacher_label:
squeezed_teacher_label = net.Squeeze(
teacher_label,
net.NextScopedBlob('squeezed_teacher_label'),
dims=[1]
)
# blob used to contain the original teacher weights
keep_weights = net.ConstantFill(
[squeezed_teacher_label],
net.NextScopedBlob('keep_weights'),
value=self._teacher_weight,
dtype=core.DataType.FLOAT
)
#blob used to zero out the teacher weights
zero_weights = net.ConstantFill(
[squeezed_teacher_label],
net.NextScopedBlob('zero_weights'),
value=0.0,
dtype=core.DataType.FLOAT
)
#Indicating which teacher labels are bad, i.e., are zero.
judge = net.GT(
[squeezed_teacher_label, self.threshold],
net.NextScopedBlob('judge'),
broadcast=1
)
#zero out bad teacher weights corresponding to bad teacher labels.
screened_teacher_weights = net.Conditional(
[judge, keep_weights, zero_weights],
net.NextScopedBlob('screened_teacher_weights')
)
neg_screened_teacher_weights = net.Mul(
[screened_teacher_weights, self.neg_ONE],
net.NextScopedBlob('neg_screened_teacher_weights'),
broadcast=1
)
one_minus_screened_teacher_weights = net.Add(
[neg_screened_teacher_weights, self.ONE],
net.NextScopedBlob('one_minus_screened_teacher_weights'),
broadcast=1
)
scaled_true_xent = net.Mul(
[true_xent, one_minus_screened_teacher_weights],
net.NextScopedBlob('scaled_cross_entropy'),
broadcast=1
)
scaled_teacher_xent = net.Mul(
[teacher_xent, screened_teacher_weights],
net.NextScopedBlob('scaled_teacher_cross_entropy'),
broadcast=1
)
else:
scaled_true_xent = net.Scale(
true_xent,
net.NextScopedBlob('scaled_cross_entropy'),
scale=float(1.0 - self._teacher_weight),
)
scaled_teacher_xent = net.Scale(
teacher_xent,
net.NextScopedBlob('scaled_teacher_cross_entropy'),
scale=float(self._teacher_weight),
)
if 'weight' in self.input_record.fields:
weight_blob = self.input_record.weight()
if self.input_record.weight.field_type().base != np.float32:
weight_blob = net.Cast(
weight_blob,
weight_blob + '_float32',
to=core.DataType.FLOAT
)
weight_blob = net.StopGradient(
[weight_blob],
[net.NextScopedBlob('weight_stop_gradient')],
)
scaled_true_xent = net.Mul(
[scaled_true_xent, weight_blob],
net.NextScopedBlob('weighted_xent_label'),
)
scaled_teacher_xent = net.Mul(
[scaled_teacher_xent, weight_blob],
net.NextScopedBlob('weighted_xent_teacher'),
)
true_loss = net.AveragedLoss(
scaled_true_xent,
net.NextScopedBlob('true_loss')
)
teacher_loss = net.AveragedLoss(
scaled_teacher_xent,
net.NextScopedBlob('teacher_loss')
)
net.Add(
[true_loss, teacher_loss],
self.output_schema.field_blobs()
)
| 37.890625 | 86 | 0.55299 | _import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
ModelLayer,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class BatchDistillLRLoss(ModelLayer):
def __init__(
self, model, input_record,
name='batch_distill_lr_loss', teacher_weight=0.0,
filter_invalid_teacher_label=False, **kwargs):
super(BatchDistillLRLoss, self).__init__(model, name, input_record, **kwargs)
assert teacher_weight >= 0 and teacher_weight <= 1, (
'teacher_weight=%0.2f should be in [0, 1]' % teacher_weight
)
self._teacher_weight = teacher_weight
self._filter_invalid_teacher_label = filter_invalid_teacher_label
if self._filter_invalid_teacher_label:
self.threshold = model.add_global_constant(
str(model.net.NextScopedBlob('threshold')),
[0.0],
dtype=np.float
)
self.neg_ONE = model.add_global_constant(
str(model.net.NextScopedBlob('neg_ONE')),
[-1.0],
dtype=np.float
)
self.ONE = model._GetOne()
assert schema.is_schema_subset(
schema.Struct(
('teacher_label', schema.Scalar()),
('label', schema.Scalar()),
('logit', schema.Scalar()),
),
input_record
)
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
self.output_schema = schema.Scalar(
np.float32,
self.get_next_blob_reference('output')
)
def add_ops(self, net):
label = self.input_record.label()
if self.input_record.label.field_type() != np.float32:
label = net.Cast(
label,
net.NextScopedBlob('float_label'),
to=core.DataType.FLOAT,
)
label = net.ExpandDims(label, net.NextScopedBlob('expanded_label'),
dims=[1])
teacher_label = self.input_record.teacher_label()
if self.input_record.teacher_label.field_type() != np.float32:
teacher_label = net.Cast(
teacher_label,
net.NextScopedBlob('float_teacher_label'),
to=core.DataType.FLOAT,
)
teacher_label = net.ExpandDims(
teacher_label, net.NextScopedBlob('expanded_teacher_label'),
dims=[1])
true_xent = net.SigmoidCrossEntropyWithLogits(
[self.input_record.logit(), label],
net.NextScopedBlob('cross_entropy')
)
teacher_xent = net.SigmoidCrossEntropyWithLogits(
[self.input_record.logit(), teacher_label],
net.NextScopedBlob('teacher_cross_entropy')
)
if self._filter_invalid_teacher_label:
squeezed_teacher_label = net.Squeeze(
teacher_label,
net.NextScopedBlob('squeezed_teacher_label'),
dims=[1]
)
keep_weights = net.ConstantFill(
[squeezed_teacher_label],
net.NextScopedBlob('keep_weights'),
value=self._teacher_weight,
dtype=core.DataType.FLOAT
)
zero_weights = net.ConstantFill(
[squeezed_teacher_label],
net.NextScopedBlob('zero_weights'),
value=0.0,
dtype=core.DataType.FLOAT
)
judge = net.GT(
[squeezed_teacher_label, self.threshold],
net.NextScopedBlob('judge'),
broadcast=1
)
screened_teacher_weights = net.Conditional(
[judge, keep_weights, zero_weights],
net.NextScopedBlob('screened_teacher_weights')
)
neg_screened_teacher_weights = net.Mul(
[screened_teacher_weights, self.neg_ONE],
net.NextScopedBlob('neg_screened_teacher_weights'),
broadcast=1
)
one_minus_screened_teacher_weights = net.Add(
[neg_screened_teacher_weights, self.ONE],
net.NextScopedBlob('one_minus_screened_teacher_weights'),
broadcast=1
)
scaled_true_xent = net.Mul(
[true_xent, one_minus_screened_teacher_weights],
net.NextScopedBlob('scaled_cross_entropy'),
broadcast=1
)
scaled_teacher_xent = net.Mul(
[teacher_xent, screened_teacher_weights],
net.NextScopedBlob('scaled_teacher_cross_entropy'),
broadcast=1
)
else:
scaled_true_xent = net.Scale(
true_xent,
net.NextScopedBlob('scaled_cross_entropy'),
scale=float(1.0 - self._teacher_weight),
)
scaled_teacher_xent = net.Scale(
teacher_xent,
net.NextScopedBlob('scaled_teacher_cross_entropy'),
scale=float(self._teacher_weight),
)
if 'weight' in self.input_record.fields:
weight_blob = self.input_record.weight()
if self.input_record.weight.field_type().base != np.float32:
weight_blob = net.Cast(
weight_blob,
weight_blob + '_float32',
to=core.DataType.FLOAT
)
weight_blob = net.StopGradient(
[weight_blob],
[net.NextScopedBlob('weight_stop_gradient')],
)
scaled_true_xent = net.Mul(
[scaled_true_xent, weight_blob],
net.NextScopedBlob('weighted_xent_label'),
)
scaled_teacher_xent = net.Mul(
[scaled_teacher_xent, weight_blob],
net.NextScopedBlob('weighted_xent_teacher'),
)
true_loss = net.AveragedLoss(
scaled_true_xent,
net.NextScopedBlob('true_loss')
)
teacher_loss = net.AveragedLoss(
scaled_teacher_xent,
net.NextScopedBlob('teacher_loss')
)
net.Add(
[true_loss, teacher_loss],
self.output_schema.field_blobs()
)
| true | true |
1c3d1c2f9ec0e9adbd1e4c5d5dd86197d544fb49 | 251 | py | Python | SourceModel/SM_IfStmt.py | crossminer/CrossPuppeteer | ab99f67f9c3440752e767ad284de5049f6fd1da9 | [
"Apache-2.0"
] | 47 | 2016-02-08T08:46:17.000Z | 2021-01-17T23:56:34.000Z | SourceModel/SM_IfStmt.py | crossminer/CrossPuppeteer | ab99f67f9c3440752e767ad284de5049f6fd1da9 | [
"Apache-2.0"
] | null | null | null | SourceModel/SM_IfStmt.py | crossminer/CrossPuppeteer | ab99f67f9c3440752e767ad284de5049f6fd1da9 | [
"Apache-2.0"
] | 15 | 2016-02-09T13:34:48.000Z | 2021-05-12T14:34:26.000Z | import SourceModel.SM_Element
class SM_IfStmt(SourceModel.SM_Element.SM_Element):
def __init__(self, text):
self.resourceText = text
super().__init__(text)
def getUsedVariables(self):
return super().getUsedVariables() | 27.888889 | 51 | 0.709163 | import SourceModel.SM_Element
class SM_IfStmt(SourceModel.SM_Element.SM_Element):
def __init__(self, text):
self.resourceText = text
super().__init__(text)
def getUsedVariables(self):
return super().getUsedVariables() | true | true |
1c3d1cf0862dd080899f11bdb8a3ac8422160442 | 2,492 | py | Python | instagram/migrations/0001_initial.py | lilian-2021/Insta-clone | 8c9f4aa011fd5c174a1ab3f757a13a7468477cf9 | [
"Unlicense"
] | null | null | null | instagram/migrations/0001_initial.py | lilian-2021/Insta-clone | 8c9f4aa011fd5c174a1ab3f757a13a7468477cf9 | [
"Unlicense"
] | null | null | null | instagram/migrations/0001_initial.py | lilian-2021/Insta-clone | 8c9f4aa011fd5c174a1ab3f757a13a7468477cf9 | [
"Unlicense"
] | null | null | null | # Generated by Django 4.0.3 on 2022-03-06 13:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, null=True, upload_to='media/')),
('name', models.CharField(max_length=40)),
('caption', models.CharField(max_length=200)),
('posted_on', models.DateTimeField(auto_now_add=True)),
('comment', models.IntegerField(blank=True, default=True, null=True)),
('liked', models.ManyToManyField(blank=True, default=None, related_name='liked', to=settings.AUTH_USER_MODEL)),
('profile', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='user_images', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('response', models.CharField(choices=[('Like', 'Like'), ('Unlike', 'Unlike')], default='like', max_length=70)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='instagram.image')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(max_length=300)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='instagram.image')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)),
],
),
]
| 49.84 | 146 | 0.624398 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, null=True, upload_to='media/')),
('name', models.CharField(max_length=40)),
('caption', models.CharField(max_length=200)),
('posted_on', models.DateTimeField(auto_now_add=True)),
('comment', models.IntegerField(blank=True, default=True, null=True)),
('liked', models.ManyToManyField(blank=True, default=None, related_name='liked', to=settings.AUTH_USER_MODEL)),
('profile', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='user_images', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('response', models.CharField(choices=[('Like', 'Like'), ('Unlike', 'Unlike')], default='like', max_length=70)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='instagram.image')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(max_length=300)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='instagram.image')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
1c3d1e1475316883ec2671ef1469c5fea39d9574 | 2,809 | py | Python | characters.py | tomviner/dojo-adventure-game | c8f19f6d9fa848b77fa015d2b0920f5f18729f8f | [
"MIT"
] | null | null | null | characters.py | tomviner/dojo-adventure-game | c8f19f6d9fa848b77fa015d2b0920f5f18729f8f | [
"MIT"
] | null | null | null | characters.py | tomviner/dojo-adventure-game | c8f19f6d9fa848b77fa015d2b0920f5f18729f8f | [
"MIT"
] | null | null | null | from adventurelib import Item, Bag, when
class Man(Item):
subject_pronoun = 'he'
object_pronoun = 'him'
class Woman(Item):
subject_pronoun = 'she'
object_pronoun = 'her'
dr_black = the_victim = Man('Dr. Black', 'Dr Black', 'the victim')
dr_black.def_name = 'the victim'
dr_black.description = """\
Dr. Black was the much beloved host and owner of Albermore Manor. His untimely
death has come as a shock and surprise to most of tonight's guests.
"""
miss_scarlet = Woman('Miss Scarlet')
miss_scarlet.def_name = 'Miss Scarlet'
miss_scarlet.description = """\
Miss Scarlet is well liked by the younger gentlemen at tonight's gathering.
She is mistrusted by some and seems to have quite the salacious reputation.
"""
col_mustard = Man('Colonel Mustard', 'Col. Mustard', 'Col Mustard')
col_mustard.def_name = 'Colonel Mustard'
col_mustard.description = """\
The Colonel is a stern man who accepts no "nonsense". His long and esteemed
military career has left him with a stiff upper lip and a stiffer drinking
problem.
"""
mrs_white = Woman('Mrs. White', 'Mrs White')
mrs_white.def_name = 'Mrs. White'
mrs_white.description = """\
Mrs. White is usually found waiting on the Manor's guests. However tonight she
has been invited to dine with the others. She seems frazzled and distressed,
she is nervously glancing around the room.
"""
rev_green = Man(
'Reverend Green', 'Rev. Green', 'Rev Green', 'Mr. Green', 'Mr Green')
rev_green.def_name = 'Reverend Green'
rev_green.description = """\
Reverend Green is a kindly, wizened old man. Rumour has it that his gambling
debts make rich men wince.
"""
mrs_peacock = Woman('Mrs. Peacock', 'Mrs Peacock')
mrs_peacock.def_name = 'Mrs. Peacock'
mrs_peacock.description = """\
Mrs. Peacock commands the respect of all she meets. She is the eldest and
wisest of tonight's guests, her fierce eyes have been known to scare the local
children.
"""
prof_plum = Man('Professor Plum', 'Prof. Plum', 'Prof Plum')
prof_plum.def_name = 'Professor Plum'
prof_plum.description = """\
Professor Plum is young for a professor, and very ambitious. His latest
academic paper was widely and loudly critised by the victim.
"""
butler = Man('Mr. Butler', 'mr butler', 'butler')
butler.def_name = 'the butler'
butler.description = """\
The butler is immaculately dressed in a smart uniform. He stands to one side
and avoids your gaze. Implausibly, his name really is Mr. Butler.
"""
guests = Bag([
miss_scarlet, col_mustard, mrs_white, rev_green, mrs_peacock, prof_plum
])
@when('list guests')
def list_guests():
print("A nearby guest list for tonight's gathering has the following names:")
for c in guests:
print(c)
if __name__ == '__main__':
assert prof_plum == guests.find('Prof. Plum')
assert prof_plum != guests.find('Plum')
| 31.561798 | 81 | 0.728373 | from adventurelib import Item, Bag, when
class Man(Item):
subject_pronoun = 'he'
object_pronoun = 'him'
class Woman(Item):
subject_pronoun = 'she'
object_pronoun = 'her'
dr_black = the_victim = Man('Dr. Black', 'Dr Black', 'the victim')
dr_black.def_name = 'the victim'
dr_black.description = """\
Dr. Black was the much beloved host and owner of Albermore Manor. His untimely
death has come as a shock and surprise to most of tonight's guests.
"""
miss_scarlet = Woman('Miss Scarlet')
miss_scarlet.def_name = 'Miss Scarlet'
miss_scarlet.description = """\
Miss Scarlet is well liked by the younger gentlemen at tonight's gathering.
She is mistrusted by some and seems to have quite the salacious reputation.
"""
col_mustard = Man('Colonel Mustard', 'Col. Mustard', 'Col Mustard')
col_mustard.def_name = 'Colonel Mustard'
col_mustard.description = """\
The Colonel is a stern man who accepts no "nonsense". His long and esteemed
military career has left him with a stiff upper lip and a stiffer drinking
problem.
"""
mrs_white = Woman('Mrs. White', 'Mrs White')
mrs_white.def_name = 'Mrs. White'
mrs_white.description = """\
Mrs. White is usually found waiting on the Manor's guests. However tonight she
has been invited to dine with the others. She seems frazzled and distressed,
she is nervously glancing around the room.
"""
rev_green = Man(
'Reverend Green', 'Rev. Green', 'Rev Green', 'Mr. Green', 'Mr Green')
rev_green.def_name = 'Reverend Green'
rev_green.description = """\
Reverend Green is a kindly, wizened old man. Rumour has it that his gambling
debts make rich men wince.
"""
mrs_peacock = Woman('Mrs. Peacock', 'Mrs Peacock')
mrs_peacock.def_name = 'Mrs. Peacock'
mrs_peacock.description = """\
Mrs. Peacock commands the respect of all she meets. She is the eldest and
wisest of tonight's guests, her fierce eyes have been known to scare the local
children.
"""
prof_plum = Man('Professor Plum', 'Prof. Plum', 'Prof Plum')
prof_plum.def_name = 'Professor Plum'
prof_plum.description = """\
Professor Plum is young for a professor, and very ambitious. His latest
academic paper was widely and loudly critised by the victim.
"""
butler = Man('Mr. Butler', 'mr butler', 'butler')
butler.def_name = 'the butler'
butler.description = """\
The butler is immaculately dressed in a smart uniform. He stands to one side
and avoids your gaze. Implausibly, his name really is Mr. Butler.
"""
guests = Bag([
miss_scarlet, col_mustard, mrs_white, rev_green, mrs_peacock, prof_plum
])
@when('list guests')
def list_guests():
print("A nearby guest list for tonight's gathering has the following names:")
for c in guests:
print(c)
if __name__ == '__main__':
assert prof_plum == guests.find('Prof. Plum')
assert prof_plum != guests.find('Plum')
| true | true |
1c3d1e3adf335147a41c0f14755cb1e1b82ef576 | 620 | py | Python | portal/_extensions/cookbook_gallery_generator.py | ProjectPythia/portal | 57407bfc5c38208f3a85d9a3ef8d80a4657ea4fb | [
"Apache-2.0"
] | 1 | 2020-12-10T09:57:35.000Z | 2020-12-10T09:57:35.000Z | portal/_extensions/cookbook_gallery_generator.py | ProjectPythia/portal | 57407bfc5c38208f3a85d9a3ef8d80a4657ea4fb | [
"Apache-2.0"
] | 2 | 2020-12-10T19:13:08.000Z | 2020-12-10T19:13:19.000Z | portal/_extensions/cookbook_gallery_generator.py | ProjectPythia/portal | 57407bfc5c38208f3a85d9a3ef8d80a4657ea4fb | [
"Apache-2.0"
] | 2 | 2020-12-09T22:29:43.000Z | 2020-12-10T09:57:49.000Z | import yaml
from gallery_generator import build_from_items, generate_menu
def main(app):
with open('cookbook_gallery.yaml') as fid:
all_items = yaml.safe_load(fid)
title = 'Cookbooks Gallery'
subtext = 'Pythia Cookbooks provide example workflows on more advanced and domain-specific problems developed by the Pythia community. Cookbooks build on top of skills you learn in Pythia Foundations.'
menu_html = generate_menu(all_items)
build_from_items(all_items, 'cookbook-gallery', title=title, subtext=subtext, menu_html=menu_html)
def setup(app):
app.connect('builder-inited', main)
| 34.444444 | 205 | 0.762903 | import yaml
from gallery_generator import build_from_items, generate_menu
def main(app):
with open('cookbook_gallery.yaml') as fid:
all_items = yaml.safe_load(fid)
title = 'Cookbooks Gallery'
subtext = 'Pythia Cookbooks provide example workflows on more advanced and domain-specific problems developed by the Pythia community. Cookbooks build on top of skills you learn in Pythia Foundations.'
menu_html = generate_menu(all_items)
build_from_items(all_items, 'cookbook-gallery', title=title, subtext=subtext, menu_html=menu_html)
def setup(app):
app.connect('builder-inited', main)
| true | true |
1c3d1ee3b003f80ec96564cadfc120daff279bb5 | 834 | py | Python | src/lesson_runtime_features/sys_shelve_importer_module.py | jasonwee/asus-rt-n14uhp-mrtg | 4fa96c3406e32ea6631ce447db6d19d70b2cd061 | [
"Apache-2.0"
] | 3 | 2018-08-14T09:33:52.000Z | 2022-03-21T12:31:58.000Z | src/lesson_runtime_features/sys_shelve_importer_module.py | jasonwee/asus-rt-n14uhp-mrtg | 4fa96c3406e32ea6631ce447db6d19d70b2cd061 | [
"Apache-2.0"
] | null | null | null | src/lesson_runtime_features/sys_shelve_importer_module.py | jasonwee/asus-rt-n14uhp-mrtg | 4fa96c3406e32ea6631ce447db6d19d70b2cd061 | [
"Apache-2.0"
] | null | null | null | import sys
import sys_shelve_importer
def show_module_details(module):
print(' message :', module.message)
print(' __name__ :', module.__name__)
print(' __package__:', module.__package__)
print(' __file__ :', module.__file__)
print(' __path__ :', module.__path__)
print(' __loader__ :', module.__loader__)
filename = '/tmp/pymotw_import_example.shelve'
sys.path_hooks.append(sys_shelve_importer.ShelveFinder)
sys.path.insert(0, filename)
print('Import of "package.module1":')
import package.module1
print()
print('Examine package.module1 details:')
show_module_details(package.module1)
print()
print('Import of "package.subpackage.module2":')
import package.subpackage.module2
print()
print('Examine package.subpackage.module2 details:')
show_module_details(package.subpackage.module2)
| 25.272727 | 55 | 0.747002 | import sys
import sys_shelve_importer
def show_module_details(module):
print(' message :', module.message)
print(' __name__ :', module.__name__)
print(' __package__:', module.__package__)
print(' __file__ :', module.__file__)
print(' __path__ :', module.__path__)
print(' __loader__ :', module.__loader__)
filename = '/tmp/pymotw_import_example.shelve'
sys.path_hooks.append(sys_shelve_importer.ShelveFinder)
sys.path.insert(0, filename)
print('Import of "package.module1":')
import package.module1
print()
print('Examine package.module1 details:')
show_module_details(package.module1)
print()
print('Import of "package.subpackage.module2":')
import package.subpackage.module2
print()
print('Examine package.subpackage.module2 details:')
show_module_details(package.subpackage.module2)
| true | true |
1c3d1f86424ef27c1000aa797e7cb8d7760227a4 | 719 | py | Python | fingerprint/CMSeeK/VersionDetect/abda.py | c0dejump/CredzCheckr | cac325b50464cf2eb853540be3efdf993c903691 | [
"MIT"
] | null | null | null | fingerprint/CMSeeK/VersionDetect/abda.py | c0dejump/CredzCheckr | cac325b50464cf2eb853540be3efdf993c903691 | [
"MIT"
] | null | null | null | fingerprint/CMSeeK/VersionDetect/abda.py | c0dejump/CredzCheckr | cac325b50464cf2eb853540be3efdf993c903691 | [
"MIT"
] | 1 | 2021-11-28T18:01:33.000Z | 2021-11-28T18:01:33.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# This is a part of CMSeeK, check the LICENSE file for more information
# Copyright (c) 2018 - 2020 Tuhinshubhra
# Al Mubda version detection
# Rev 1
import cmseekdb.basic as cmseek
import re
def start(source):
cmseek.statement("Detecting Al Mubda version using source code [Method 1 of 1]")
regex = re.findall(r'Powered by Al Mubda version (\d.*?)</a>', source)
if regex != []:
if regex[0] != '' and regex[0] != ' ':
version = regex[0]
cmseek.success('Al Mubda version ' + cmseek.bold + cmseek.fgreen + version + cmseek.cln + ' detected')
return version
cmseek.error('Version detection failed!')
return '0'
| 31.26087 | 114 | 0.635605 |
import cmseekdb.basic as cmseek
import re
def start(source):
cmseek.statement("Detecting Al Mubda version using source code [Method 1 of 1]")
regex = re.findall(r'Powered by Al Mubda version (\d.*?)</a>', source)
if regex != []:
if regex[0] != '' and regex[0] != ' ':
version = regex[0]
cmseek.success('Al Mubda version ' + cmseek.bold + cmseek.fgreen + version + cmseek.cln + ' detected')
return version
cmseek.error('Version detection failed!')
return '0'
| true | true |
1c3d2266e24d867d18539d922e5b57ca45ce667c | 162 | py | Python | airtest/aircv/__init__.py | zhangzhao4444/Airtest | cb896ac166179f5095bfe889586e9c32d97b082e | [
"Apache-2.0"
] | 1 | 2021-05-02T08:13:00.000Z | 2021-05-02T08:13:00.000Z | airtest/aircv/__init__.py | dongxue1012/Airtest | cb896ac166179f5095bfe889586e9c32d97b082e | [
"Apache-2.0"
] | null | null | null | airtest/aircv/__init__.py | dongxue1012/Airtest | cb896ac166179f5095bfe889586e9c32d97b082e | [
"Apache-2.0"
] | 5 | 2018-03-27T09:56:04.000Z | 2022-02-06T10:28:51.000Z | from .aircv import *
from .error import *
from .sift import find_sift
from .template import find_template, find_all_template
from .template2 import find_template2 | 32.4 | 54 | 0.82716 | from .aircv import *
from .error import *
from .sift import find_sift
from .template import find_template, find_all_template
from .template2 import find_template2 | true | true |
1c3d22fcf848845a754d1a723e1d7c7d1ffa3d2b | 1,478 | py | Python | gh/__init__.py | woosal1337/omeniscient | 4b963013f21165d9ccce210588cc3009ba53857b | [
"MIT"
] | 3 | 2021-07-29T14:15:46.000Z | 2021-07-30T11:53:15.000Z | gh/__init__.py | woosal1337/omeniscient | 4b963013f21165d9ccce210588cc3009ba53857b | [
"MIT"
] | null | null | null | gh/__init__.py | woosal1337/omeniscient | 4b963013f21165d9ccce210588cc3009ba53857b | [
"MIT"
] | 1 | 2021-10-10T22:42:33.000Z | 2021-10-10T22:42:33.000Z | import requests
import json
import rich
from rich import pretty
from rich.console import Console
console = Console()
class gith():
def __init__(self):
pass
def get_info(self, username):
"""
Gets a username on GitHub as an argument, gets the information regarding that person on GitHub
:param username:
:return:
"""
try:
user = requests.get(f"https://api.github.com/users/{username}").json()
console.print(f'ID: {user["id"]}',style="green")
console.print(f'Avatar URL: {user["avatar_url"]}',style="green")
console.print(f'Name: {user["name"]}',style="green")
console.print(f'Companies: {user["company"]}', style="green")
console.print(f'Blog: {user["blog"]}', style="green")
console.print(f'Location: {user["location"]}', style="green")
console.print(f'Email: {user["email"]}', style="green")
console.print(f'Bio: {user["bio"]}', style="green")
console.print(f'Public Repos: {user["public_repos"]}', style="green")
console.print(f'Public Gists: {user["public_gists"]}', style="green")
console.print(f'Followers: {user["followers"]}', style="green")
console.print(f'Following: {user["following"]}', style="green")
return True
except:
console.print("An Error has Occurred in GitHub Requests")
return False | 32.844444 | 102 | 0.583897 | import requests
import json
import rich
from rich import pretty
from rich.console import Console
console = Console()
class gith():
def __init__(self):
pass
def get_info(self, username):
try:
user = requests.get(f"https://api.github.com/users/{username}").json()
console.print(f'ID: {user["id"]}',style="green")
console.print(f'Avatar URL: {user["avatar_url"]}',style="green")
console.print(f'Name: {user["name"]}',style="green")
console.print(f'Companies: {user["company"]}', style="green")
console.print(f'Blog: {user["blog"]}', style="green")
console.print(f'Location: {user["location"]}', style="green")
console.print(f'Email: {user["email"]}', style="green")
console.print(f'Bio: {user["bio"]}', style="green")
console.print(f'Public Repos: {user["public_repos"]}', style="green")
console.print(f'Public Gists: {user["public_gists"]}', style="green")
console.print(f'Followers: {user["followers"]}', style="green")
console.print(f'Following: {user["following"]}', style="green")
return True
except:
console.print("An Error has Occurred in GitHub Requests")
return False | true | true |
1c3d241305124a6414a95fd3d8aa6ddba59be2ec | 743 | py | Python | spotify-albums-organizer/import_images.py | savoy1211/discogs-spotify-import | 7a9687c96fbef1d1d41242de85f7aa0da12c65d8 | [
"MIT"
] | 9 | 2019-08-09T19:02:08.000Z | 2021-11-19T20:21:33.000Z | spotify-albums-organizer/import_images.py | savoy1211/discogs-spotify-import | 7a9687c96fbef1d1d41242de85f7aa0da12c65d8 | [
"MIT"
] | 8 | 2020-03-24T17:28:51.000Z | 2022-01-13T01:34:07.000Z | spotify-albums-organizer/import_images.py | savoy1211/spotify-albums-organizer | 7a9687c96fbef1d1d41242de85f7aa0da12c65d8 | [
"MIT"
] | null | null | null | """
Imports album images to the app for easy access for display_albums.py.
"""
import os.path
import os
import shutil
import urllib.request
import pymongo
from sys import stdout
from time import sleep
client = pymongo.MongoClient("localhost", 27017)
db = client.discogs_masters
all_albums = db.current_albums.find()
i = 1
for album in all_albums:
try:
url = album["items"]["album"]["images"][0]["url"]
album_id = album["items"]["album"]["id"]
filename = str(url)+".jpg"
file = album_id
urllib.request.urlretrieve(url,file)
shutil.move(file, "data/images/"+file)
i+=1
denom = '/'+str(all_albums.count())
stdout.write("\r%d" % i +denom)
stdout.flush()
except Exception:
i+=1
print(str(i)+" doesn't work.")
| 18.575 | 71 | 0.683715 |
import os.path
import os
import shutil
import urllib.request
import pymongo
from sys import stdout
from time import sleep
client = pymongo.MongoClient("localhost", 27017)
db = client.discogs_masters
all_albums = db.current_albums.find()
i = 1
for album in all_albums:
try:
url = album["items"]["album"]["images"][0]["url"]
album_id = album["items"]["album"]["id"]
filename = str(url)+".jpg"
file = album_id
urllib.request.urlretrieve(url,file)
shutil.move(file, "data/images/"+file)
i+=1
denom = '/'+str(all_albums.count())
stdout.write("\r%d" % i +denom)
stdout.flush()
except Exception:
i+=1
print(str(i)+" doesn't work.")
| true | true |
1c3d24c5e190cd2bbc826d2d7a6a832a29f23871 | 267 | py | Python | slackcat/compat.py | nficano/slackcat | 004a0e28dc8ac58cc74c68e48a3542e58009964a | [
"MIT-0"
] | 25 | 2017-08-23T15:41:17.000Z | 2019-09-07T16:26:08.000Z | slackcat/compat.py | nficano/slackcat | 004a0e28dc8ac58cc74c68e48a3542e58009964a | [
"MIT-0"
] | 45 | 2017-09-01T07:11:42.000Z | 2022-01-12T17:51:22.000Z | slackcat/compat.py | nficano/slackcat | 004a0e28dc8ac58cc74c68e48a3542e58009964a | [
"MIT-0"
] | 4 | 2017-09-01T22:50:33.000Z | 2018-06-14T14:41:07.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
# Python 3.x
from urllib.parse import urlencode
import urllib.request as urlrequest
except ImportError:
# Python 2.x
from urllib import urlencode # noqa
import urllib2 as urlrequest # noqa
| 22.25 | 40 | 0.674157 |
try:
from urllib.parse import urlencode
import urllib.request as urlrequest
except ImportError:
from urllib import urlencode
import urllib2 as urlrequest
| true | true |
1c3d2561e8dc9562b8fbb9576367da0c5b1fc15a | 28,296 | py | Python | success_metrics.py | michaelmworthington/iq-success-metrics | 861743926fc4750731d5e1577ef9e0d2b3126221 | [
"Apache-2.0"
] | null | null | null | success_metrics.py | michaelmworthington/iq-success-metrics | 861743926fc4750731d5e1577ef9e0d2b3126221 | [
"Apache-2.0"
] | null | null | null | success_metrics.py | michaelmworthington/iq-success-metrics | 861743926fc4750731d5e1577ef9e0d2b3126221 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# Copyright 2019 Sonatype Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import sys
import argparse
import requests
import os
#---------------------------------
iq_session = requests.Session()
config = {
"VulDisTime" : 2, "FixManTime" : 2, "FixAutoTime" : 0.3, "WaiManTime" : 7, "WaiAutoTime" : 0.3, "ProductiveHoursDay" : 7, "AvgHourCost" : 100,
"risk" : ["LOW", "MODERATE", "SEVERE", "CRITICAL"], "category" : ["SECURITY", "LICENSE", "QUALITY", "OTHER"],
"status" : ["discoveredCounts", "fixedCounts", "waivedCounts", "openCountsAtTimePeriodEnd"],
"mttr" : ["mttrLowThreat", "mttrModerateThreat", "mttrSevereThreat", "mttrCriticalThreat"],
"statRates": ["FixRate", "WaiveRate", "DealtRate", "FixPercent", "WaiPercent"],
"rates": ["fixedRate","waivedRate","dealtRate"]
}
def main():
parser = argparse.ArgumentParser(description='get some Success Metrics')
parser.add_argument('-a','--auth', help='', default="admin:admin123", required=False)
parser.add_argument('-s','--scope', help='', type=int, default="6", required=False)
parser.add_argument('-u','--url', help='', default="http://localhost:8070", required=False)
parser.add_argument('-k','--insecure', help='', action='store_true', required=False)
parser.add_argument('-i','--appId', help='', required=False)
parser.add_argument('-o','--orgId', help='', required=False)
parser.add_argument('-p','--pretty', help='', action='store_true', required=False)
parser.add_argument('-r','--reports', help='', action='store_true',required=False)
parser.add_argument('-rs','--reportsSec', help='', action='store_true',required=False)
parser.add_argument('-rl','--reportsLic', help='', action='store_true',required=False)
args = vars(parser.parse_args())
creds = args["auth"].split(":",1)
iq_session.auth = requests.auth.HTTPBasicAuth(str(creds[0]), str(creds[1]) )
if args["insecure"] == True:
print("WARNING: Ignoring SSL Certificate Validation")
iq_session.verify = False
if not os.path.exists("output"):
os.mkdir("output")
#search for applicationId
appId = searchApps(args["appId"], args["url"])
#search for organizationId
orgId = searchOrgs(args["orgId"], args["url"])
# get success metrics
data = get_metrics(args["url"], args["scope"], appId, orgId ) #collects data with or without filters according to appId and orgId
if data is None:
print("No results found.")
raise SystemExit
#-----------------------------------------------------------------------------------
#reportCounts is used to aggregate totals from the filtered set of applications.
#reportAverages will calculate averages for MTTR.
#reportSummary will return the final results.
#reportLic will return the final results for Licence vulnerabilities only
#reportSec will return the final results for security vulnerabilities only
reportAverages, reportCounts, reportSummary = {}, {}, {"appNames":[], "orgNames":[], "weeks":[], "dates":[], "timePeriodStart" : []}
reportAveragesLic, reportCountsLic, reportLic = {}, {}, {"appNames":[], "orgNames":[], "weeks":[], "dates":[], "timePeriodStart" : []}
reportAveragesSec, reportCountsSec, reportSec = {}, {}, {"appNames":[], "orgNames":[], "weeks":[], "dates":[], "timePeriodStart" : []}
# set the weeks range in the report summary for the required scope.
for recency in range(args["scope"], 0, -1):
reportSummary["timePeriodStart"].append( get_week_start( recency ) )
reportLic["timePeriodStart"].append( get_week_start( recency ) )
reportSec["timePeriodStart"].append( get_week_start( recency ) )
reportSummary["weeks"].append( get_week_only( recency ) )
reportLic["weeks"].append( get_week_only( recency ) )
reportSec["weeks"].append( get_week_only( recency ) )
# building aggregated set of fields for MTTR
for mttr in config["mttr"]:
reportAverages.update({mttr: empties(reportSummary["weeks"]) })
reportAveragesLic.update({mttr: empties(reportLic["weeks"]) })
reportAveragesSec.update({mttr: empties(reportSec["weeks"]) })
# set empty range for scope
for fields in ["appNumberScan", "appOnboard", "weeklyScans","riskRatioCritical","riskRatioSevere","riskRatioModerate","riskRatioLow"]:
reportCounts.update({ fields : zeros(reportSummary["weeks"]) })
reportCountsLic.update({ fields : zeros(reportLic["weeks"]) })
reportCountsSec.update({ fields : zeros(reportSec["weeks"]) })
# building aggregated set of fields.
for status in config["status"]:
reportCounts.update({ status: {} })
reportCountsLic.update({ status: {} })
reportCountsSec.update({ status: {} })
for risk in config["risk"]:
reportCounts[status].update({ risk: zeros(reportSummary["weeks"]) })
reportCountsLic[status].update({ risk: zeros(reportLic["weeks"]) })
reportCountsSec[status].update({ risk: zeros(reportSec["weeks"]) })
reportCounts[status].update({ "TOTAL" : zeros(reportSummary["weeks"]) })
reportCountsLic[status].update({ "TOTAL" : zeros(reportLic["weeks"]) })
reportCountsSec[status].update({ "TOTAL" : zeros(reportSec["weeks"]) })
#-----------------------------------------------------------------------------------
# loop through applications in success metric data.
for app in data:
reportSummary['appNames'].append( app["applicationName"] )
reportLic['appNames'].append( app["applicationName"] )
reportSec['appNames'].append( app["applicationName"] )
reportSummary['orgNames'].append( app["organizationName"] )
reportLic['orgNames'].append( app["organizationName"] )
reportSec['orgNames'].append( app["organizationName"] )
app_summary = get_aggs_list() # zeroed summary template.
for aggregation in app["aggregations"]:
# process the weekly reports for application.
process_week(aggregation, app_summary)
compute_summary(app_summary)
app.update( {"summary": app_summary} )
app.update( {"licences": app_summary} )
app.update( {"security": app_summary} )
for week_no in app_summary["weeks"]:
position = app_summary["weeks"].index(week_no)
reportCounts["appOnboard"][week_no] += 1
reportCountsLic["appOnboard"][week_no] += 1
reportCountsSec["appOnboard"][week_no] += 1
# only include the app's week when they have a value
for mttr in config["mttr"]:
value = app_summary[mttr]["rng"][position]
if not value is None:
reportAverages[mttr][week_no].append( value )
reportAveragesLic[mttr][week_no].append( value )
reportAveragesSec[mttr][week_no].append( value )
if app_summary["evaluationCount"]["rng"][position] != 0:
reportCounts["appNumberScan"][week_no] += 1
reportCountsLic["appNumberScan"][week_no] += 1
reportCountsSec["appNumberScan"][week_no] += 1
reportCounts["weeklyScans"][week_no] += app_summary["evaluationCount"]["rng"][position]
reportCountsLic["weeklyScans"][week_no] += app_summary["evaluationCount"]["rng"][position]
reportCountsSec["weeklyScans"][week_no] += app_summary["evaluationCount"]["rng"][position]
for status in config["status"]:
for risk in config["risk"]:
reportCounts[status][risk][week_no] += app_summary[status]["TOTAL"][risk]["rng"][position]
reportCountsLic[status][risk][week_no] += app_summary[status]["LICENSE"][risk]["rng"][position]
reportCountsSec[status][risk][week_no] += app_summary[status]["SECURITY"][risk]["rng"][position]
reportCounts[status]["TOTAL"][week_no] += app_summary[status]["TOTAL"]["rng"][position]
reportCountsLic[status]["TOTAL"][week_no] += app_summary[status]["LICENSE"]["TOTAL"]["rng"][position]
reportCountsSec[status]["TOTAL"][week_no] += app_summary[status]["SECURITY"]["TOTAL"]["rng"][position]
#for rates in config["rates"]:
# for risk in config["risk"]:
# reportCounts[rates][risk][week_no] += app_summary[rates]["TOTAL"][risk]["rng"][position]
# reportCounts[rates]["TOTAL"][week_no] += app_summary[rates]["TOTAL"]["rng"][position]
#-----------------------------------------------------------------------------------
#convert the dicts to arrays.
for fields in ["appNumberScan", "appOnboard", "weeklyScans"]:
reportSummary.update({ fields : list( reportCounts[fields].values() ) })
reportLic.update({ fields : list( reportCountsLic[fields].values() ) })
reportSec.update({ fields : list( reportCountsSec[fields].values() ) })
# calculate the averages for each week. Returns None when no values are available for a given week.
for mttr in config["mttr"]:
reportSummary.update({ mttr: list( avg(value) for value in reportAverages[mttr].values()) })
reportLic.update({ mttr: list( avg(value) for value in reportAveragesLic[mttr].values()) })
reportSec.update({ mttr: list( avg(value) for value in reportAveragesSec[mttr].values()) })
for status in config["status"]:
reportSummary.update({ status: {} })
reportLic.update({ status: {} })
reportSec.update({ status: {} })
for risk in config["risk"]:
reportSummary[status].update({ risk: list( reportCounts[status][risk].values() ) })
reportLic[status].update({ risk: list( reportCountsLic[status][risk].values() ) })
reportSec[status].update({ risk: list( reportCountsSec[status][risk].values() ) })
reportSummary[status].update({ "LIST" : list( reportSummary[status].values() ) })
reportLic[status].update({ "LIST" : list( reportLic[status].values() ) })
reportSec[status].update({ "LIST" : list( reportSec[status].values() ) })
reportSummary[status].update({ "TOTAL" : list( reportCounts[status]["TOTAL"].values() ) })
reportLic[status].update({ "TOTAL" : list( reportCountsLic[status]["TOTAL"].values() ) })
reportSec[status].update({ "TOTAL" : list( reportCountsSec[status]["TOTAL"].values() ) })
#for rates in config["rates"]:
# reportSummary.update({ rates: {} })
#
# for risk in config["risk"]:
# reportSummary[rates].update({ risk: list( reportCounts[rates][risk].values() ) })
#
# reportSummary[rates].update({ "LIST" : list( reportSummary[rates].values() ) })
# reportSummary[rates].update({ "TOTAL" : list( reportCounts[rates]["TOTAL"].values() ) })
riskRatioCri, riskRatioSev, riskRatioMod, riskRatioLow = [],[],[],[]
for week_no in range(0,len(reportSummary['weeks'])):
if reportSummary['appOnboard'][week_no] != 0:
riskRatioCri.append(str(round((reportSummary['openCountsAtTimePeriodEnd']['CRITICAL'][week_no])/(reportSummary['appOnboard'][week_no]),2)))
riskRatioSev.append(str(round((reportSummary['openCountsAtTimePeriodEnd']['SEVERE'][week_no])/(reportSummary['appOnboard'][week_no]),2)))
riskRatioMod.append(str(round((reportSummary['openCountsAtTimePeriodEnd']['MODERATE'][week_no])/(reportSummary['appOnboard'][week_no]),2)))
riskRatioLow.append(str(round((reportSummary['openCountsAtTimePeriodEnd']['LOW'][week_no])/(reportSummary['appOnboard'][week_no]),2)))
else:
riskRatioCri.append(str(0))
riskRatioSev.append(str(0))
riskRatioMod.append(str(0))
riskRatioLow.append(str(0))
reportSummary.update({'riskRatioCritical' : riskRatioCri})
reportSummary.update({'riskRatioSevere' : riskRatioSev})
reportSummary.update({'riskRatioModerate' : riskRatioMod})
reportSummary.update({'riskRatioLow' : riskRatioLow})
#-----------------------------------------------------------------------------------------
riskRatioCri, riskRatioSev, riskRatioMod, riskRatioLow = [],[],[],[]
for week_no in range(0,len(reportLic['weeks'])):
if reportLic['appOnboard'][week_no] != 0:
riskRatioCri.append(str(round((reportLic['openCountsAtTimePeriodEnd']['CRITICAL'][week_no])/(reportLic['appOnboard'][week_no]),2)))
riskRatioSev.append(str(round((reportLic['openCountsAtTimePeriodEnd']['SEVERE'][week_no])/(reportLic['appOnboard'][week_no]),2)))
riskRatioMod.append(str(round((reportLic['openCountsAtTimePeriodEnd']['MODERATE'][week_no])/(reportLic['appOnboard'][week_no]),2)))
riskRatioLow.append(str(round((reportLic['openCountsAtTimePeriodEnd']['LOW'][week_no])/(reportLic['appOnboard'][week_no]),2)))
else:
riskRatioCri.append(str(0))
riskRatioSev.append(str(0))
riskRatioMod.append(str(0))
riskRatioLow.append(str(0))
reportLic.update({'riskRatioCritical' : riskRatioCri})
reportLic.update({'riskRatioSevere' : riskRatioSev})
reportLic.update({'riskRatioModerate' : riskRatioMod})
reportLic.update({'riskRatioLow' : riskRatioLow})
#-----------------------------------------------------------------------------------------
riskRatioCri, riskRatioSev, riskRatioMod, riskRatioLow = [],[],[],[]
for week_no in range(0,len(reportSec['weeks'])):
if reportSec['appOnboard'][week_no] != 0:
riskRatioCri.append(str(round((reportSec['openCountsAtTimePeriodEnd']['CRITICAL'][week_no])/(reportSec['appOnboard'][week_no]),2)))
riskRatioSev.append(str(round((reportSec['openCountsAtTimePeriodEnd']['SEVERE'][week_no])/(reportSec['appOnboard'][week_no]),2)))
riskRatioMod.append(str(round((reportSec['openCountsAtTimePeriodEnd']['MODERATE'][week_no])/(reportSec['appOnboard'][week_no]),2)))
riskRatioLow.append(str(round((reportSec['openCountsAtTimePeriodEnd']['LOW'][week_no])/(reportSec['appOnboard'][week_no]),2)))
else:
riskRatioCri.append(str(0))
riskRatioSev.append(str(0))
riskRatioMod.append(str(0))
riskRatioLow.append(str(0))
reportSec.update({'riskRatioCritical' : riskRatioCri})
reportSec.update({'riskRatioSevere' : riskRatioSev})
reportSec.update({'riskRatioModerate' : riskRatioMod})
reportSec.update({'riskRatioLow' : riskRatioLow})
#-----------------------------------------------------------------------------------------
# Final report with summary and data objects.
report = {"summary": reportSummary, "apps": data, "licences": reportLic, "security": reportSec}
#-----------------------------------------------------------------------------------
# Setting the default to output to json file with the option to format it to human readable.
## make an output directory
os.makedirs("output", exist_ok=True)
print("Generating successmetrics.json")
with open("output/successmetrics.json",'w') as f:
if args["pretty"]:
f.write(json.dumps(report, indent=4))
else:
json.dump(report, f)
print( "saved to output/successmetrics.json" )
#-----------------------------------------------------------------------------------
# one more thing...
if args["reports"] == True:
print("Generating the Executive report")
os.system('python3 ./reports.py -e')
print("Generating the Table report")
os.system('python3 ./reports.py -t')
if args["reportsSec"] == True:
print("Generating the Executive report just for Security violations")
os.system('python3 ./reports.py -es')
print("Generating the Table report just for Security violations")
os.system('python3 ./reports.py -ts')
if args["reportsLic"] == True:
print("Generating the Executive report just for Licensing violations")
os.system('python3 ./reports.py -el')
print("Generating the Table report just for Licensing violations")
os.system('python3 ./reports.py -tl')
#-----------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------
def searchApps(search, iq_url):
appId = []
if search is not None and len(search) > 0:
search = search.split(",")
url = '{}/api/v2/applications'.format(iq_url)
response = iq_session.get(url).json()
for app in response["applications"]:
for item in search:
if item in [app["name"], app["id"], app["publicId"]]:
appId.append(app["id"]) #if app "name", "id" or "publicId" in arguments, then creates array of app IDs in appId
return appId
def searchOrgs(search, iq_url):
orgId = []
if search is not None and len(search) > 0:
search = search.split(",")
url = '{}/api/v2/organizations'.format(iq_url)
response = iq_session.get(url).json()
for org in response["organizations"]:
for item in search:
if item in [org["name"], org["id"]]:
orgId.append(org["id"]) #if org "name", "id" or "publicId" in arguments, then creates array of org IDs in orgId
return orgId
def get_week_start(recency = 0):
d = datetime.date.today()
d = d - datetime.timedelta(days=d.weekday()+(recency*7) )
period = '{}'.format( d.isoformat() )
return period
def get_week_only(recency = 0):
d = datetime.date.today() - datetime.timedelta(days=(recency*7))
period = '{}'.format(d.isocalendar()[1])
return period
def get_week(recency = 0): # recency is number of weeks prior to current week.
d = datetime.date.today() - datetime.timedelta(days=(recency*7))
period = '{}-W{}'.format(d.year , d.isocalendar()[1])
return period
def get_week_date(s):
d = datetime.datetime.strptime(s, "%Y-%m-%d")
period = '{}'.format(d.isocalendar()[1])
return period
def get_metrics(iq_url, scope = 6, appId = [], orgId = []): # scope is number of week prior to current week.
url = "{}/api/v2/reports/metrics".format(iq_url)
iq_header = {'Content-Type':'application/json', 'Accept':'application/json'}
r_body = {"timePeriod": "WEEK", "firstTimePeriod": get_week(scope) ,"lastTimePeriod": get_week(1), #use get_week(0) instead if looking for Year-To-Date data instead of fully completed weeks
"applicationIds": appId, "organizationIds": orgId}
response = iq_session.post( url, json=r_body, headers=iq_header)
return response.json()
def rnd(n): return round(n,2)
def avg(n): return 0 if len(n) == 0 else rnd(sum(n)/len(n))
def rate(n, d): return 0 if d == 0 else (n/d)
def percent(n, d): return rnd(rate(n, d)*100)
def zeros(n): return dict.fromkeys( n, 0)
def empties(keys): return { key : list([]) for key in keys }
def ms_days(v): #convert ms to days
if v is None: return 0
else: return round(v/86400000)
def get_aggs_list():
s = {"weeks":[], "dates": [], "fixedRate":[], "waivedRate":[], "dealtRate":[]}
s.update(zeros(config["statRates"]))
s.update({"evaluationCount":{"rng":[]}})
for m in config["mttr"]:
s.update({m:{"avg":0,"rng":[]}})
for t in config["status"]:
g = {"TOTAL":{"avg":0,"rng":[]}}
for c in config["category"]:
k = {"TOTAL":{"avg":0,"rng":[]}}
for r in config["risk"]:
k.update({r:{"avg":0,"rng":[]}})
g.update({c:k})
for r in config["risk"]:
g["TOTAL"].update({r:{"avg":0,"rng":[]}})
s.update({t:g})
#for rates in config["rates"]:
# g = {"TOTAL":{"avg":0,"rng":[]}}
# for c in config["category"]:
# k = {"TOTAL":{"avg":0,"rng":[]}}
# for r in config["risk"]:
# k.update({r:{"avg":0,"rng":[]}})
# g.update({c:k})
# for r in config["risk"]:
# g["TOTAL"].update({r:{"avg":0,"rng":[]}})
# s.update({rates:g})
return s
#----------------------------------
# Helpers
def get_dCnt(d): return d["discoveredCounts"]["TOTAL"]["rng"]
def get_oCnt(d): return d["openCountsAtTimePeriodEnd"]["TOTAL"]["rng"]
def get_fCnt(d): return d["fixedCounts"]["TOTAL"]["rng"]
def get_wCnt(d): return d["waivedCounts"]["TOTAL"]["rng"]
def calc_FixedRate(d, last=True):
f, o = get_fCnt(d), get_oCnt(d)
if last: f, o = f[-1], o[-1]
else: f, o = sum(f), sum(o)
return percent(f, o)
def calc_WaivedRate(d, last=True):
w, o = get_wCnt(d), get_oCnt(d)
if last: w, o = w[-1], o[-1]
else: w, o = sum(w), sum(o)
return percent(w, o)
def calc_DealtRate(d, last=True):
f, w, o = get_fCnt(d), get_wCnt(d), get_oCnt(d)
if last: f, w, o = f[-1], w[-1], o[-1]
else: f, w, o = sum(f), sum(w), sum(o)
return percent(f+w, o)
def calc_FixPercent(d):
f, w = sum(get_fCnt(d)), sum(get_wCnt(d))
return 0 if (f+w) == 0 else (f/(f+w))
def calc_WaiPercent(d):
f, w = sum( get_fCnt(d)), sum(get_wCnt(d))
return 0 if (f+w) == 0 else (w/(f+w))
def calc_DisManCost(d):
return sum(get_dCnt(d)) * config["AvgHourCost"] * config["VulDisTime"]
def calc_DebtManCost(d):
return sum(get_oCnt(d)) * config["AvgHourCost"] * ( (calc_FixPercent(d) * config["FixManTime"]) + ( calc_WaiPercent(d) * config["WaiManTime"] ) )
def calc_DebtAutoCost(d):
return sum(get_oCnt(d)) * config["AvgHourCost"] * ( (calc_FixPercent(d) * config["FixAutoTime"]) + ( calc_WaiPercent(d) * config["WaiAutoTime"] ) )
def calc_TotalSonatypeValue(d):
return calc_DisManCost(d) + ( calc_DebtManCost(d) - calc_DebtAutoCost(d) )
#------------------------------------------------------------------------------------
def process_week(a, s):
for mttr in config["mttr"]:
if mttr in a:
value = a[mttr]
if not value is None:
value = ms_days(value)
s[mttr]["rng"].append(value)
for status in config["status"]:
for category in config["category"]:
Totals = 0
for risk in config["risk"]:
if status in a and category in a[status] and risk in a[status][category]:
value = a[status][category][risk]
s[status][category][risk]["rng"].append(value)
Totals += value
s[status][category]["TOTAL"]["rng"].append(Totals)
# Totals for status including risk levels
Totals = 0
for risk in config["risk"]:
value = 0
for category in config["category"]:
value += s[status][category][risk]["rng"][-1]
s[status]["TOTAL"][risk]["rng"].append(value)
Totals += value
s[status]["TOTAL"]["rng"].append(Totals)
#INCLUDE fixedRate, waivedRate, dealtRate loop here?
s["evaluationCount"]["rng"].append( a["evaluationCount"] )
s["weeks"].append( get_week_date( a["timePeriodStart"]) ) #set week list for images
s["dates"].append(a["timePeriodStart"]) #set dates list for images
s["fixedRate"].append( calc_FixedRate(s, True) )
s["waivedRate"].append( calc_WaivedRate(s, True) )
s["dealtRate"].append( calc_DealtRate(s, True) )
def compute_summary(s):
for mttr in config["mttr"]:
t = []
for w in s[mttr]["rng"]:
if not w is None:
t.append(w)
s[mttr]["avg"] = avg(t)
for status in config["status"]:
for category in config["category"]:
for risk in config["risk"]:
s[status][category][risk]["avg"] = avg(s[status][category][risk]["rng"])
s[status][category]["TOTAL"]["avg"] = avg(s[status][category]["TOTAL"]["rng"])
for risk in config["risk"]:
s[status]["TOTAL"][risk]["avg"] = avg(s[status]["TOTAL"][risk]["rng"])
s[status]["TOTAL"]["avg"] = avg(s[status]["TOTAL"]["rng"])
#INCLUDE fixedRate, waivedRate, dealtRate loop here?
s["SonatypeValue"] = calc_TotalSonatypeValue(s)
s["FixRate"] = calc_FixedRate(s, False)
s["WaiveRate"] = calc_WaivedRate(s, False)
s["DealtRate"] = calc_DealtRate(s, False)
s["FixPercent"] = calc_FixPercent(s)
s["WaiPercent"] = calc_WaiPercent(s)
#------------------------------------------------------------------------------------
if __name__ == "__main__":
main()
#raise SystemExit
| 54.731141 | 197 | 0.531418 |
import datetime
import json
import sys
import argparse
import requests
import os
iq_session = requests.Session()
config = {
"VulDisTime" : 2, "FixManTime" : 2, "FixAutoTime" : 0.3, "WaiManTime" : 7, "WaiAutoTime" : 0.3, "ProductiveHoursDay" : 7, "AvgHourCost" : 100,
"risk" : ["LOW", "MODERATE", "SEVERE", "CRITICAL"], "category" : ["SECURITY", "LICENSE", "QUALITY", "OTHER"],
"status" : ["discoveredCounts", "fixedCounts", "waivedCounts", "openCountsAtTimePeriodEnd"],
"mttr" : ["mttrLowThreat", "mttrModerateThreat", "mttrSevereThreat", "mttrCriticalThreat"],
"statRates": ["FixRate", "WaiveRate", "DealtRate", "FixPercent", "WaiPercent"],
"rates": ["fixedRate","waivedRate","dealtRate"]
}
def main():
parser = argparse.ArgumentParser(description='get some Success Metrics')
parser.add_argument('-a','--auth', help='', default="admin:admin123", required=False)
parser.add_argument('-s','--scope', help='', type=int, default="6", required=False)
parser.add_argument('-u','--url', help='', default="http://localhost:8070", required=False)
parser.add_argument('-k','--insecure', help='', action='store_true', required=False)
parser.add_argument('-i','--appId', help='', required=False)
parser.add_argument('-o','--orgId', help='', required=False)
parser.add_argument('-p','--pretty', help='', action='store_true', required=False)
parser.add_argument('-r','--reports', help='', action='store_true',required=False)
parser.add_argument('-rs','--reportsSec', help='', action='store_true',required=False)
parser.add_argument('-rl','--reportsLic', help='', action='store_true',required=False)
args = vars(parser.parse_args())
creds = args["auth"].split(":",1)
iq_session.auth = requests.auth.HTTPBasicAuth(str(creds[0]), str(creds[1]) )
if args["insecure"] == True:
print("WARNING: Ignoring SSL Certificate Validation")
iq_session.verify = False
if not os.path.exists("output"):
os.mkdir("output")
appId = searchApps(args["appId"], args["url"])
orgId = searchOrgs(args["orgId"], args["url"])
data = get_metrics(args["url"], args["scope"], appId, orgId )
if data is None:
print("No results found.")
raise SystemExit
reportAverages, reportCounts, reportSummary = {}, {}, {"appNames":[], "orgNames":[], "weeks":[], "dates":[], "timePeriodStart" : []}
reportAveragesLic, reportCountsLic, reportLic = {}, {}, {"appNames":[], "orgNames":[], "weeks":[], "dates":[], "timePeriodStart" : []}
reportAveragesSec, reportCountsSec, reportSec = {}, {}, {"appNames":[], "orgNames":[], "weeks":[], "dates":[], "timePeriodStart" : []}
for recency in range(args["scope"], 0, -1):
reportSummary["timePeriodStart"].append( get_week_start( recency ) )
reportLic["timePeriodStart"].append( get_week_start( recency ) )
reportSec["timePeriodStart"].append( get_week_start( recency ) )
reportSummary["weeks"].append( get_week_only( recency ) )
reportLic["weeks"].append( get_week_only( recency ) )
reportSec["weeks"].append( get_week_only( recency ) )
for mttr in config["mttr"]:
reportAverages.update({mttr: empties(reportSummary["weeks"]) })
reportAveragesLic.update({mttr: empties(reportLic["weeks"]) })
reportAveragesSec.update({mttr: empties(reportSec["weeks"]) })
for fields in ["appNumberScan", "appOnboard", "weeklyScans","riskRatioCritical","riskRatioSevere","riskRatioModerate","riskRatioLow"]:
reportCounts.update({ fields : zeros(reportSummary["weeks"]) })
reportCountsLic.update({ fields : zeros(reportLic["weeks"]) })
reportCountsSec.update({ fields : zeros(reportSec["weeks"]) })
for status in config["status"]:
reportCounts.update({ status: {} })
reportCountsLic.update({ status: {} })
reportCountsSec.update({ status: {} })
for risk in config["risk"]:
reportCounts[status].update({ risk: zeros(reportSummary["weeks"]) })
reportCountsLic[status].update({ risk: zeros(reportLic["weeks"]) })
reportCountsSec[status].update({ risk: zeros(reportSec["weeks"]) })
reportCounts[status].update({ "TOTAL" : zeros(reportSummary["weeks"]) })
reportCountsLic[status].update({ "TOTAL" : zeros(reportLic["weeks"]) })
reportCountsSec[status].update({ "TOTAL" : zeros(reportSec["weeks"]) })
for app in data:
reportSummary['appNames'].append( app["applicationName"] )
reportLic['appNames'].append( app["applicationName"] )
reportSec['appNames'].append( app["applicationName"] )
reportSummary['orgNames'].append( app["organizationName"] )
reportLic['orgNames'].append( app["organizationName"] )
reportSec['orgNames'].append( app["organizationName"] )
app_summary = get_aggs_list()
for aggregation in app["aggregations"]:
process_week(aggregation, app_summary)
compute_summary(app_summary)
app.update( {"summary": app_summary} )
app.update( {"licences": app_summary} )
app.update( {"security": app_summary} )
for week_no in app_summary["weeks"]:
position = app_summary["weeks"].index(week_no)
reportCounts["appOnboard"][week_no] += 1
reportCountsLic["appOnboard"][week_no] += 1
reportCountsSec["appOnboard"][week_no] += 1
for mttr in config["mttr"]:
value = app_summary[mttr]["rng"][position]
if not value is None:
reportAverages[mttr][week_no].append( value )
reportAveragesLic[mttr][week_no].append( value )
reportAveragesSec[mttr][week_no].append( value )
if app_summary["evaluationCount"]["rng"][position] != 0:
reportCounts["appNumberScan"][week_no] += 1
reportCountsLic["appNumberScan"][week_no] += 1
reportCountsSec["appNumberScan"][week_no] += 1
reportCounts["weeklyScans"][week_no] += app_summary["evaluationCount"]["rng"][position]
reportCountsLic["weeklyScans"][week_no] += app_summary["evaluationCount"]["rng"][position]
reportCountsSec["weeklyScans"][week_no] += app_summary["evaluationCount"]["rng"][position]
for status in config["status"]:
for risk in config["risk"]:
reportCounts[status][risk][week_no] += app_summary[status]["TOTAL"][risk]["rng"][position]
reportCountsLic[status][risk][week_no] += app_summary[status]["LICENSE"][risk]["rng"][position]
reportCountsSec[status][risk][week_no] += app_summary[status]["SECURITY"][risk]["rng"][position]
reportCounts[status]["TOTAL"][week_no] += app_summary[status]["TOTAL"]["rng"][position]
reportCountsLic[status]["TOTAL"][week_no] += app_summary[status]["LICENSE"]["TOTAL"]["rng"][position]
reportCountsSec[status]["TOTAL"][week_no] += app_summary[status]["SECURITY"]["TOTAL"]["rng"][position]
#for rates in config["rates"]:
# for risk in config["risk"]:
# reportCounts[rates][risk][week_no] += app_summary[rates]["TOTAL"][risk]["rng"][position]
# reportCounts[rates]["TOTAL"][week_no] += app_summary[rates]["TOTAL"]["rng"][position]
#-----------------------------------------------------------------------------------
#convert the dicts to arrays.
for fields in ["appNumberScan", "appOnboard", "weeklyScans"]:
reportSummary.update({ fields : list( reportCounts[fields].values() ) })
reportLic.update({ fields : list( reportCountsLic[fields].values() ) })
reportSec.update({ fields : list( reportCountsSec[fields].values() ) })
# calculate the averages for each week. Returns None when no values are available for a given week.
for mttr in config["mttr"]:
reportSummary.update({ mttr: list( avg(value) for value in reportAverages[mttr].values()) })
reportLic.update({ mttr: list( avg(value) for value in reportAveragesLic[mttr].values()) })
reportSec.update({ mttr: list( avg(value) for value in reportAveragesSec[mttr].values()) })
for status in config["status"]:
reportSummary.update({ status: {} })
reportLic.update({ status: {} })
reportSec.update({ status: {} })
for risk in config["risk"]:
reportSummary[status].update({ risk: list( reportCounts[status][risk].values() ) })
reportLic[status].update({ risk: list( reportCountsLic[status][risk].values() ) })
reportSec[status].update({ risk: list( reportCountsSec[status][risk].values() ) })
reportSummary[status].update({ "LIST" : list( reportSummary[status].values() ) })
reportLic[status].update({ "LIST" : list( reportLic[status].values() ) })
reportSec[status].update({ "LIST" : list( reportSec[status].values() ) })
reportSummary[status].update({ "TOTAL" : list( reportCounts[status]["TOTAL"].values() ) })
reportLic[status].update({ "TOTAL" : list( reportCountsLic[status]["TOTAL"].values() ) })
reportSec[status].update({ "TOTAL" : list( reportCountsSec[status]["TOTAL"].values() ) })
#for rates in config["rates"]:
# reportSummary.update({ rates: {} })
#
# for risk in config["risk"]:
# reportSummary[rates].update({ risk: list( reportCounts[rates][risk].values() ) })
#
# reportSummary[rates].update({ "LIST" : list( reportSummary[rates].values() ) })
# reportSummary[rates].update({ "TOTAL" : list( reportCounts[rates]["TOTAL"].values() ) })
riskRatioCri, riskRatioSev, riskRatioMod, riskRatioLow = [],[],[],[]
for week_no in range(0,len(reportSummary['weeks'])):
if reportSummary['appOnboard'][week_no] != 0:
riskRatioCri.append(str(round((reportSummary['openCountsAtTimePeriodEnd']['CRITICAL'][week_no])/(reportSummary['appOnboard'][week_no]),2)))
riskRatioSev.append(str(round((reportSummary['openCountsAtTimePeriodEnd']['SEVERE'][week_no])/(reportSummary['appOnboard'][week_no]),2)))
riskRatioMod.append(str(round((reportSummary['openCountsAtTimePeriodEnd']['MODERATE'][week_no])/(reportSummary['appOnboard'][week_no]),2)))
riskRatioLow.append(str(round((reportSummary['openCountsAtTimePeriodEnd']['LOW'][week_no])/(reportSummary['appOnboard'][week_no]),2)))
else:
riskRatioCri.append(str(0))
riskRatioSev.append(str(0))
riskRatioMod.append(str(0))
riskRatioLow.append(str(0))
reportSummary.update({'riskRatioCritical' : riskRatioCri})
reportSummary.update({'riskRatioSevere' : riskRatioSev})
reportSummary.update({'riskRatioModerate' : riskRatioMod})
reportSummary.update({'riskRatioLow' : riskRatioLow})
#-----------------------------------------------------------------------------------------
riskRatioCri, riskRatioSev, riskRatioMod, riskRatioLow = [],[],[],[]
for week_no in range(0,len(reportLic['weeks'])):
if reportLic['appOnboard'][week_no] != 0:
riskRatioCri.append(str(round((reportLic['openCountsAtTimePeriodEnd']['CRITICAL'][week_no])/(reportLic['appOnboard'][week_no]),2)))
riskRatioSev.append(str(round((reportLic['openCountsAtTimePeriodEnd']['SEVERE'][week_no])/(reportLic['appOnboard'][week_no]),2)))
riskRatioMod.append(str(round((reportLic['openCountsAtTimePeriodEnd']['MODERATE'][week_no])/(reportLic['appOnboard'][week_no]),2)))
riskRatioLow.append(str(round((reportLic['openCountsAtTimePeriodEnd']['LOW'][week_no])/(reportLic['appOnboard'][week_no]),2)))
else:
riskRatioCri.append(str(0))
riskRatioSev.append(str(0))
riskRatioMod.append(str(0))
riskRatioLow.append(str(0))
reportLic.update({'riskRatioCritical' : riskRatioCri})
reportLic.update({'riskRatioSevere' : riskRatioSev})
reportLic.update({'riskRatioModerate' : riskRatioMod})
reportLic.update({'riskRatioLow' : riskRatioLow})
#-----------------------------------------------------------------------------------------
riskRatioCri, riskRatioSev, riskRatioMod, riskRatioLow = [],[],[],[]
for week_no in range(0,len(reportSec['weeks'])):
if reportSec['appOnboard'][week_no] != 0:
riskRatioCri.append(str(round((reportSec['openCountsAtTimePeriodEnd']['CRITICAL'][week_no])/(reportSec['appOnboard'][week_no]),2)))
riskRatioSev.append(str(round((reportSec['openCountsAtTimePeriodEnd']['SEVERE'][week_no])/(reportSec['appOnboard'][week_no]),2)))
riskRatioMod.append(str(round((reportSec['openCountsAtTimePeriodEnd']['MODERATE'][week_no])/(reportSec['appOnboard'][week_no]),2)))
riskRatioLow.append(str(round((reportSec['openCountsAtTimePeriodEnd']['LOW'][week_no])/(reportSec['appOnboard'][week_no]),2)))
else:
riskRatioCri.append(str(0))
riskRatioSev.append(str(0))
riskRatioMod.append(str(0))
riskRatioLow.append(str(0))
reportSec.update({'riskRatioCritical' : riskRatioCri})
reportSec.update({'riskRatioSevere' : riskRatioSev})
reportSec.update({'riskRatioModerate' : riskRatioMod})
reportSec.update({'riskRatioLow' : riskRatioLow})
#-----------------------------------------------------------------------------------------
# Final report with summary and data objects.
report = {"summary": reportSummary, "apps": data, "licences": reportLic, "security": reportSec}
#-----------------------------------------------------------------------------------
# Setting the default to output to json file with the option to format it to human readable.
## make an output directory
os.makedirs("output", exist_ok=True)
print("Generating successmetrics.json")
with open("output/successmetrics.json",'w') as f:
if args["pretty"]:
f.write(json.dumps(report, indent=4))
else:
json.dump(report, f)
print( "saved to output/successmetrics.json" )
#-----------------------------------------------------------------------------------
# one more thing...
if args["reports"] == True:
print("Generating the Executive report")
os.system('python3 ./reports.py -e')
print("Generating the Table report")
os.system('python3 ./reports.py -t')
if args["reportsSec"] == True:
print("Generating the Executive report just for Security violations")
os.system('python3 ./reports.py -es')
print("Generating the Table report just for Security violations")
os.system('python3 ./reports.py -ts')
if args["reportsLic"] == True:
print("Generating the Executive report just for Licensing violations")
os.system('python3 ./reports.py -el')
print("Generating the Table report just for Licensing violations")
os.system('python3 ./reports.py -tl')
#-----------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------
def searchApps(search, iq_url):
appId = []
if search is not None and len(search) > 0:
search = search.split(",")
url = '{}/api/v2/applications'.format(iq_url)
response = iq_session.get(url).json()
for app in response["applications"]:
for item in search:
if item in [app["name"], app["id"], app["publicId"]]:
appId.append(app["id"]) #if app "name", "id" or "publicId" in arguments, then creates array of app IDs in appId
return appId
def searchOrgs(search, iq_url):
orgId = []
if search is not None and len(search) > 0:
search = search.split(",")
url = '{}/api/v2/organizations'.format(iq_url)
response = iq_session.get(url).json()
for org in response["organizations"]:
for item in search:
if item in [org["name"], org["id"]]:
orgId.append(org["id"]) #if org "name", "id" or "publicId" in arguments, then creates array of org IDs in orgId
return orgId
def get_week_start(recency = 0):
d = datetime.date.today()
d = d - datetime.timedelta(days=d.weekday()+(recency*7) )
period = '{}'.format( d.isoformat() )
return period
def get_week_only(recency = 0):
d = datetime.date.today() - datetime.timedelta(days=(recency*7))
period = '{}'.format(d.isocalendar()[1])
return period
def get_week(recency = 0): # recency is number of weeks prior to current week.
d = datetime.date.today() - datetime.timedelta(days=(recency*7))
period = '{}-W{}'.format(d.year , d.isocalendar()[1])
return period
def get_week_date(s):
d = datetime.datetime.strptime(s, "%Y-%m-%d")
period = '{}'.format(d.isocalendar()[1])
return period
def get_metrics(iq_url, scope = 6, appId = [], orgId = []): # scope is number of week prior to current week.
url = "{}/api/v2/reports/metrics".format(iq_url)
iq_header = {'Content-Type':'application/json', 'Accept':'application/json'}
r_body = {"timePeriod": "WEEK", "firstTimePeriod": get_week(scope) ,"lastTimePeriod": get_week(1), #use get_week(0) instead if looking for Year-To-Date data instead of fully completed weeks
"applicationIds": appId, "organizationIds": orgId}
response = iq_session.post( url, json=r_body, headers=iq_header)
return response.json()
def rnd(n): return round(n,2)
def avg(n): return 0 if len(n) == 0 else rnd(sum(n)/len(n))
def rate(n, d): return 0 if d == 0 else (n/d)
def percent(n, d): return rnd(rate(n, d)*100)
def zeros(n): return dict.fromkeys( n, 0)
def empties(keys): return { key : list([]) for key in keys }
def ms_days(v): #convert ms to days
if v is None: return 0
else: return round(v/86400000)
def get_aggs_list():
s = {"weeks":[], "dates": [], "fixedRate":[], "waivedRate":[], "dealtRate":[]}
s.update(zeros(config["statRates"]))
s.update({"evaluationCount":{"rng":[]}})
for m in config["mttr"]:
s.update({m:{"avg":0,"rng":[]}})
for t in config["status"]:
g = {"TOTAL":{"avg":0,"rng":[]}}
for c in config["category"]:
k = {"TOTAL":{"avg":0,"rng":[]}}
for r in config["risk"]:
k.update({r:{"avg":0,"rng":[]}})
g.update({c:k})
for r in config["risk"]:
g["TOTAL"].update({r:{"avg":0,"rng":[]}})
s.update({t:g})
#for rates in config["rates"]:
# g = {"TOTAL":{"avg":0,"rng":[]}}
# for c in config["category"]:
# k = {"TOTAL":{"avg":0,"rng":[]}}
# for r in config["risk"]:
# k.update({r:{"avg":0,"rng":[]}})
# g.update({c:k})
# for r in config["risk"]:
# g["TOTAL"].update({r:{"avg":0,"rng":[]}})
# s.update({rates:g})
return s
#----------------------------------
# Helpers
def get_dCnt(d): return d["discoveredCounts"]["TOTAL"]["rng"]
def get_oCnt(d): return d["openCountsAtTimePeriodEnd"]["TOTAL"]["rng"]
def get_fCnt(d): return d["fixedCounts"]["TOTAL"]["rng"]
def get_wCnt(d): return d["waivedCounts"]["TOTAL"]["rng"]
def calc_FixedRate(d, last=True):
f, o = get_fCnt(d), get_oCnt(d)
if last: f, o = f[-1], o[-1]
else: f, o = sum(f), sum(o)
return percent(f, o)
def calc_WaivedRate(d, last=True):
w, o = get_wCnt(d), get_oCnt(d)
if last: w, o = w[-1], o[-1]
else: w, o = sum(w), sum(o)
return percent(w, o)
def calc_DealtRate(d, last=True):
f, w, o = get_fCnt(d), get_wCnt(d), get_oCnt(d)
if last: f, w, o = f[-1], w[-1], o[-1]
else: f, w, o = sum(f), sum(w), sum(o)
return percent(f+w, o)
def calc_FixPercent(d):
f, w = sum(get_fCnt(d)), sum(get_wCnt(d))
return 0 if (f+w) == 0 else (f/(f+w))
def calc_WaiPercent(d):
f, w = sum( get_fCnt(d)), sum(get_wCnt(d))
return 0 if (f+w) == 0 else (w/(f+w))
def calc_DisManCost(d):
return sum(get_dCnt(d)) * config["AvgHourCost"] * config["VulDisTime"]
def calc_DebtManCost(d):
return sum(get_oCnt(d)) * config["AvgHourCost"] * ( (calc_FixPercent(d) * config["FixManTime"]) + ( calc_WaiPercent(d) * config["WaiManTime"] ) )
def calc_DebtAutoCost(d):
return sum(get_oCnt(d)) * config["AvgHourCost"] * ( (calc_FixPercent(d) * config["FixAutoTime"]) + ( calc_WaiPercent(d) * config["WaiAutoTime"] ) )
def calc_TotalSonatypeValue(d):
return calc_DisManCost(d) + ( calc_DebtManCost(d) - calc_DebtAutoCost(d) )
#------------------------------------------------------------------------------------
def process_week(a, s):
for mttr in config["mttr"]:
if mttr in a:
value = a[mttr]
if not value is None:
value = ms_days(value)
s[mttr]["rng"].append(value)
for status in config["status"]:
for category in config["category"]:
Totals = 0
for risk in config["risk"]:
if status in a and category in a[status] and risk in a[status][category]:
value = a[status][category][risk]
s[status][category][risk]["rng"].append(value)
Totals += value
s[status][category]["TOTAL"]["rng"].append(Totals)
# Totals for status including risk levels
Totals = 0
for risk in config["risk"]:
value = 0
for category in config["category"]:
value += s[status][category][risk]["rng"][-1]
s[status]["TOTAL"][risk]["rng"].append(value)
Totals += value
s[status]["TOTAL"]["rng"].append(Totals)
#INCLUDE fixedRate, waivedRate, dealtRate loop here?
s["evaluationCount"]["rng"].append( a["evaluationCount"] )
s["weeks"].append( get_week_date( a["timePeriodStart"]) ) #set week list for images
s["dates"].append(a["timePeriodStart"]) #set dates list for images
s["fixedRate"].append( calc_FixedRate(s, True) )
s["waivedRate"].append( calc_WaivedRate(s, True) )
s["dealtRate"].append( calc_DealtRate(s, True) )
def compute_summary(s):
for mttr in config["mttr"]:
t = []
for w in s[mttr]["rng"]:
if not w is None:
t.append(w)
s[mttr]["avg"] = avg(t)
for status in config["status"]:
for category in config["category"]:
for risk in config["risk"]:
s[status][category][risk]["avg"] = avg(s[status][category][risk]["rng"])
s[status][category]["TOTAL"]["avg"] = avg(s[status][category]["TOTAL"]["rng"])
for risk in config["risk"]:
s[status]["TOTAL"][risk]["avg"] = avg(s[status]["TOTAL"][risk]["rng"])
s[status]["TOTAL"]["avg"] = avg(s[status]["TOTAL"]["rng"])
#INCLUDE fixedRate, waivedRate, dealtRate loop here?
s["SonatypeValue"] = calc_TotalSonatypeValue(s)
s["FixRate"] = calc_FixedRate(s, False)
s["WaiveRate"] = calc_WaivedRate(s, False)
s["DealtRate"] = calc_DealtRate(s, False)
s["FixPercent"] = calc_FixPercent(s)
s["WaiPercent"] = calc_WaiPercent(s)
#------------------------------------------------------------------------------------
if __name__ == "__main__":
main()
#raise SystemExit
| true | true |
1c3d27c9bc4be19e0abd13182b9e4310bd52a96c | 385 | py | Python | dora/structures/migrations/0005_structure_slug.py | francoisromain/dora-back | 868491097d12b9a23135db3d91bc6495431e8237 | [
"MIT"
] | 1 | 2022-01-03T22:12:45.000Z | 2022-01-03T22:12:45.000Z | dora/structures/migrations/0005_structure_slug.py | francoisromain/dora-back | 868491097d12b9a23135db3d91bc6495431e8237 | [
"MIT"
] | 2 | 2022-03-17T18:04:11.000Z | 2022-03-18T14:55:27.000Z | dora/structures/migrations/0005_structure_slug.py | francoisromain/dora-back | 868491097d12b9a23135db3d91bc6495431e8237 | [
"MIT"
] | 1 | 2022-01-03T09:02:54.000Z | 2022-01-03T09:02:54.000Z | # Generated by Django 3.2.5 on 2021-08-12 13:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("structures", "0004_auto_20210804_1530"),
]
operations = [
migrations.AddField(
model_name="structure",
name="slug",
field=models.SlugField(blank=True),
),
]
| 20.263158 | 50 | 0.597403 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("structures", "0004_auto_20210804_1530"),
]
operations = [
migrations.AddField(
model_name="structure",
name="slug",
field=models.SlugField(blank=True),
),
]
| true | true |
1c3d27d5320a5cd8917c51edaf96e3cbe4469868 | 45 | py | Python | Modulo_1/semana2/Estructura-de-Datos/set/conjunto-iterar.py | rubens233/cocid_python | 492ebdf21817e693e5eb330ee006397272f2e0cc | [
"MIT"
] | null | null | null | Modulo_1/semana2/Estructura-de-Datos/set/conjunto-iterar.py | rubens233/cocid_python | 492ebdf21817e693e5eb330ee006397272f2e0cc | [
"MIT"
] | null | null | null | Modulo_1/semana2/Estructura-de-Datos/set/conjunto-iterar.py | rubens233/cocid_python | 492ebdf21817e693e5eb330ee006397272f2e0cc | [
"MIT"
] | 1 | 2022-03-04T00:57:18.000Z | 2022-03-04T00:57:18.000Z | s = {1, 2, 3, 4}
while s:
print(s.pop())
| 11.25 | 18 | 0.444444 | s = {1, 2, 3, 4}
while s:
print(s.pop())
| true | true |
1c3d27d82b4baef89b2eb442ebb64f581c0e95d6 | 10,450 | py | Python | src/filemanagement.py | moustachio-belvedere/ScopeControl | 2608122fa89d32adf369b9b8fa4c1b5b7962e4df | [
"MIT"
] | 1 | 2020-06-24T04:10:22.000Z | 2020-06-24T04:10:22.000Z | src/filemanagement.py | moustachio-belvedere/ScopeControl | 2608122fa89d32adf369b9b8fa4c1b5b7962e4df | [
"MIT"
] | null | null | null | src/filemanagement.py | moustachio-belvedere/ScopeControl | 2608122fa89d32adf369b9b8fa4c1b5b7962e4df | [
"MIT"
] | 1 | 2018-10-22T17:55:36.000Z | 2018-10-22T17:55:36.000Z | from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSignal, pyqtSlot
from os.path import isdir
from pathlib import Path
from src.filemanagementadvanced import AdvancedSettingsWindow
class FileDirInput(QLineEdit):
# signal to send to apply button
apply_state = pyqtSignal(bool, str)
def __init__(self, parent, fset):
super(FileDirInput, self).__init__(fset.savedir, parent)
# announce fset
self.fset = fset
# connect to function on change of text
self.textChanged.connect(self.ontextchange)
@pyqtSlot(str)
def ontextchange(self, textbox_in):
# check whether text in box different to current save directory
# and make apply button active/inactive as appropriate
same = (textbox_in == self.fset.savedir)
if same:
self.apply_state.emit(False, 'FileDirInput')
elif not same:
self.apply_state.emit(True, 'FileDirInput')
@pyqtSlot()
def applydirchange(self):
# get text from box
textbox_mod = self.text()
# ensure directory string ends and starts with '/' if not append
if len(textbox_mod)>0 and textbox_mod[-1]!='/':
textbox_mod = textbox_mod + '/'
if len(textbox_mod)>0 and textbox_mod[0]!='/':
textbox_mod = '/' + textbox_mod
if len(textbox_mod)==0:
textbox_mod = '/'
# check directory exists, if not raise question box
if isdir(textbox_mod):
self.fset.savedir = textbox_mod
elif not isdir(textbox_mod):
dir_question = QMessageBox.question(self, 'Non-existent Directory',
'Directory: "' + textbox_mod + '" does not exist.\nDo you want to create it?',
QMessageBox.Cancel | QMessageBox.Yes, QMessageBox.Yes)
if dir_question == QMessageBox.Yes:
Path(textbox_mod).mkdir(parents=True, exist_ok=True)
self.fset.savedir = textbox_mod
# update text in box
self.setText(textbox_mod)
# make sure apply button updates
self.ontextchange(textbox_mod)
class SetFileFormat(QComboBox):
# signal to send to apply button
apply_state = pyqtSignal(bool, str)
def __init__(self, parent, fset):
super(SetFileFormat, self).__init__(parent)
# announce fset handle
self.fset = fset
# initialise user interface
self.initUI()
def initUI(self):
# add all formats
self.addItem('jpeg')
self.addItem('png')
self.addItem('gif')
self.addItem('bmp')
self.addItem('yuv')
self.addItem('rgb')
self.addItem('rgba')
self.addItem('bgr')
self.addItem('bgra')
# set to initial file format
self.setCurrentText(self.fset.FileFormat)
# connect changed to tester function for apply
self.currentTextChanged.connect(self.ontextchange)
@pyqtSlot(str)
def ontextchange(self, textbox_in):
# check whether text in box different to current save directory
# and make apply button active/inactive as appropriate
same = (textbox_in == self.fset.FileFormat)
if same:
self.apply_state.emit(False, 'SetFileFormat')
elif not same:
self.apply_state.emit(True, 'SetFileFormat')
@pyqtSlot()
def applyformatchange(self):
# apply file format change
self.fset.filenameSetFormat(self.currentText())
class NameFormatPrefix(QLineEdit):
# signal to send to apply button
apply_state = pyqtSignal(bool, str)
def __init__(self, parent, fset):
super(NameFormatPrefix, self).__init__(fset.NamePrefix, parent)
# announce fset
self.fset = fset
# connect text changed to apply button checker
self.textChanged.connect(self.ontextchange)
@pyqtSlot(str)
def ontextchange(self, textbox_in):
# check whether text in box different to current save directory
# and make apply button active/inactive as appropriate
same = (textbox_in == self.fset.NamePrefix)
if same:
self.apply_state.emit(False, 'NameFormatPrefix')
elif not same:
self.apply_state.emit(True, 'NameFormatPrefix')
@pyqtSlot()
def applyprefixchange(self):
# set file name format
self.fset.filenameSetPrefix(self.text())
# update apply button
self.ontextchange(self.text())
class NameFormatStamper(QWidget):
# signal to send to apply button
apply_state = pyqtSignal(bool, str)
def __init__(self, parent, fset):
super(NameFormatStamper, self).__init__(parent)
# announce fset
self.fset = fset
# init UI
self.initUI()
# connect check boxes
self.checkboxdate.stateChanged.connect(self.ondatestampchange)
self.checkboxtime.stateChanged.connect(self.ontimestampchange)
def initUI(self):
# section layout
sublayout_namestamp = QHBoxLayout()
# initialise widgets
self.checkboxdate = QCheckBox('Date', self)
self.checkboxtime = QCheckBox('Time', self)
# set check state as appropriate
self.checkboxdate.setCheckState(int(self.fset.DateStamp))
self.checkboxdate.setTristate(False)
self.checkboxtime.setCheckState(int(self.fset.TimeStamp))
self.checkboxtime.setTristate(False)
# add widgets to vertical box layout
sublayout_namestamp.addWidget(self.checkboxdate)
sublayout_namestamp.addWidget(self.checkboxtime)
# set sublayout as widget layout
self.setLayout(sublayout_namestamp)
# reduce automatic height
self.setFixedHeight(35)
@pyqtSlot(int)
def ondatestampchange(self, datecheckbox_in):
# check whether datestamp bool matches check box
# and make apply button active/inactive as appropriate
datesame = (bool(datecheckbox_in) == self.fset.DateStamp)
timesame = (self.fset.TimeStamp == self.checkboxtime.isChecked())
bothsame = datesame and timesame
if bothsame:
self.apply_state.emit(False, 'NameFormatStamper')
elif not bothsame:
self.apply_state.emit(True, 'NameFormatStamper')
@pyqtSlot(int)
def ontimestampchange(self, timecheckbox_in):
# check whether timestamp bool matches check box
# and make apply button active/inactive as appropriate
timesame = (bool(timecheckbox_in) == self.fset.TimeStamp)
datesame = (self.fset.DateStamp == self.checkboxdate.isChecked())
bothsame = datesame and timesame
if bothsame:
self.apply_state.emit(False, 'NameFormatStamper')
elif not bothsame:
self.apply_state.emit(True, 'NameFormatStamper')
@pyqtSlot()
def applystampchange(self):
# set datestamp bool and update file name
self.fset.filenameSetDate(self.checkboxdate.isChecked())
# set timestamp bool and update file name
self.fset.filenameSetTime(self.checkboxtime.isChecked())
# test and update apply button
samedate = (self.fset.DateStamp == self.checkboxdate.isChecked())
sametime = (self.fset.TimeStamp == self.checkboxtime.isChecked())
bothsame = samedate and sametime
if bothsame:
self.apply_state.emit(False, 'NameFormatStamper')
elif not bothsame:
self.apply_state.emit(True, 'NameFormatStamper')
class ApplyButton(QPushButton):
def __init__(self, parent, fset):
super(ApplyButton, self).__init__(QIcon('resources/done-all.svg'), 'Apply', parent)
# announce fset handle
self.fset = fset
# set inactive initially as boxes all contain defaults
self.setEnabled(False)
# dictionary keeping track of all changed
self.changedict = {'FileDirInput':False, 'SetFileFormat':False,
'NameFormatPrefix':False, 'NameFormatStamper':False}
@pyqtSlot(bool, str)
def individualSetEnable(self, inbool, inkey):
self.changedict[inkey] = inbool
# run check
self.checkallstates()
def checkallstates(self):
# check everything False, if so disable apply
if all(value == False for value in self.changedict.values()):
self.setEnabled(False)
elif any(value == True for value in self.changedict.values()):
self.setEnabled(True)
class AdvancedSettingsButton(QPushButton):
def __init__(self, parent, fset):
super(AdvancedSettingsButton, self).__init__(QIcon('resources/cog.svg'), 'Advanced', parent)
# announce main window parent and fset
self.parent = parent
self.fset = fset
# connect
self.clicked.connect(self.open_settings)
def open_settings(self):
# create and open file settings window dialog box,
# with handle on parent and fset object
self.window = AdvancedSettingsWindow(self.parent, self.fset)
self.window.show()
class FileManagementSection(QGroupBox):
def __init__(self, parent, camera):
super(FileManagementSection, self).__init__(parent)
# announce camera handle
self.fset = camera.fn
# init UI
self.initUI()
# make connections between slots and signals
self.makeconnections()
def initUI(self):
# general settings
self.setTitle('File Settings')
# section layout
sublayout_fileman = QFormLayout()
# initialise widgets
self.dirinput = FileDirInput(self, self.fset)
self.fileformat = SetFileFormat(self, self.fset)
self.nameformat = NameFormatPrefix(self, self.fset)
self.namestamper = NameFormatStamper(self, self.fset)
self.applybutton = ApplyButton(self, self.fset)
self.adsetbutton = AdvancedSettingsButton(self, self.fset)
# add widgets to vertical box layout
sublayout_fileman.addRow(QLabel('Save Directory:'), self.dirinput)
sublayout_fileman.addRow(QLabel('File Format:'), self.fileformat)
sublayout_fileman.addRow(QLabel('File Name Prefix:'), self.nameformat)
sublayout_fileman.addRow(QLabel('Include Stamps:'), self.namestamper)
sublayout_fileman.addRow(self.adsetbutton, self.applybutton)
# set sublayout as widget layout
self.setLayout(sublayout_fileman)
# set geometry
#~ self.setFixedSize(sublayout_fileman.sizeHint())
def makeconnections(self):
# from directory text box to apply button
self.dirinput.apply_state.connect(self.applybutton.individualSetEnable)
# from apply button to directory text box
self.applybutton.clicked.connect(self.dirinput.applydirchange)
# from file format combo box to apply button
self.fileformat.apply_state.connect(self.applybutton.individualSetEnable)
# from apply button to format combox box
self.applybutton.clicked.connect(self.fileformat.applyformatchange)
# from name prefix text box to apply button
self.nameformat.apply_state.connect(self.applybutton.individualSetEnable)
# from apply button to directory text box
self.applybutton.clicked.connect(self.nameformat.applyprefixchange)
# from date/time stamp check boxes to apply button
self.namestamper.apply_state.connect(self.applybutton.individualSetEnable)
# from apply button to date/time stamp box confirmation
self.applybutton.clicked.connect(self.namestamper.applystampchange)
| 29.027778 | 94 | 0.738278 | from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSignal, pyqtSlot
from os.path import isdir
from pathlib import Path
from src.filemanagementadvanced import AdvancedSettingsWindow
class FileDirInput(QLineEdit):
apply_state = pyqtSignal(bool, str)
def __init__(self, parent, fset):
super(FileDirInput, self).__init__(fset.savedir, parent)
self.fset = fset
self.textChanged.connect(self.ontextchange)
@pyqtSlot(str)
def ontextchange(self, textbox_in):
same = (textbox_in == self.fset.savedir)
if same:
self.apply_state.emit(False, 'FileDirInput')
elif not same:
self.apply_state.emit(True, 'FileDirInput')
@pyqtSlot()
def applydirchange(self):
textbox_mod = self.text()
if len(textbox_mod)>0 and textbox_mod[-1]!='/':
textbox_mod = textbox_mod + '/'
if len(textbox_mod)>0 and textbox_mod[0]!='/':
textbox_mod = '/' + textbox_mod
if len(textbox_mod)==0:
textbox_mod = '/'
if isdir(textbox_mod):
self.fset.savedir = textbox_mod
elif not isdir(textbox_mod):
dir_question = QMessageBox.question(self, 'Non-existent Directory',
'Directory: "' + textbox_mod + '" does not exist.\nDo you want to create it?',
QMessageBox.Cancel | QMessageBox.Yes, QMessageBox.Yes)
if dir_question == QMessageBox.Yes:
Path(textbox_mod).mkdir(parents=True, exist_ok=True)
self.fset.savedir = textbox_mod
self.setText(textbox_mod)
self.ontextchange(textbox_mod)
class SetFileFormat(QComboBox):
apply_state = pyqtSignal(bool, str)
def __init__(self, parent, fset):
super(SetFileFormat, self).__init__(parent)
self.fset = fset
self.initUI()
def initUI(self):
self.addItem('jpeg')
self.addItem('png')
self.addItem('gif')
self.addItem('bmp')
self.addItem('yuv')
self.addItem('rgb')
self.addItem('rgba')
self.addItem('bgr')
self.addItem('bgra')
self.setCurrentText(self.fset.FileFormat)
self.currentTextChanged.connect(self.ontextchange)
@pyqtSlot(str)
def ontextchange(self, textbox_in):
same = (textbox_in == self.fset.FileFormat)
if same:
self.apply_state.emit(False, 'SetFileFormat')
elif not same:
self.apply_state.emit(True, 'SetFileFormat')
@pyqtSlot()
def applyformatchange(self):
self.fset.filenameSetFormat(self.currentText())
class NameFormatPrefix(QLineEdit):
apply_state = pyqtSignal(bool, str)
def __init__(self, parent, fset):
super(NameFormatPrefix, self).__init__(fset.NamePrefix, parent)
self.fset = fset
self.textChanged.connect(self.ontextchange)
@pyqtSlot(str)
def ontextchange(self, textbox_in):
same = (textbox_in == self.fset.NamePrefix)
if same:
self.apply_state.emit(False, 'NameFormatPrefix')
elif not same:
self.apply_state.emit(True, 'NameFormatPrefix')
@pyqtSlot()
def applyprefixchange(self):
self.fset.filenameSetPrefix(self.text())
self.ontextchange(self.text())
class NameFormatStamper(QWidget):
apply_state = pyqtSignal(bool, str)
def __init__(self, parent, fset):
super(NameFormatStamper, self).__init__(parent)
self.fset = fset
self.initUI()
self.checkboxdate.stateChanged.connect(self.ondatestampchange)
self.checkboxtime.stateChanged.connect(self.ontimestampchange)
def initUI(self):
sublayout_namestamp = QHBoxLayout()
self.checkboxdate = QCheckBox('Date', self)
self.checkboxtime = QCheckBox('Time', self)
self.checkboxdate.setCheckState(int(self.fset.DateStamp))
self.checkboxdate.setTristate(False)
self.checkboxtime.setCheckState(int(self.fset.TimeStamp))
self.checkboxtime.setTristate(False)
sublayout_namestamp.addWidget(self.checkboxdate)
sublayout_namestamp.addWidget(self.checkboxtime)
self.setLayout(sublayout_namestamp)
self.setFixedHeight(35)
@pyqtSlot(int)
def ondatestampchange(self, datecheckbox_in):
datesame = (bool(datecheckbox_in) == self.fset.DateStamp)
timesame = (self.fset.TimeStamp == self.checkboxtime.isChecked())
bothsame = datesame and timesame
if bothsame:
self.apply_state.emit(False, 'NameFormatStamper')
elif not bothsame:
self.apply_state.emit(True, 'NameFormatStamper')
@pyqtSlot(int)
def ontimestampchange(self, timecheckbox_in):
timesame = (bool(timecheckbox_in) == self.fset.TimeStamp)
datesame = (self.fset.DateStamp == self.checkboxdate.isChecked())
bothsame = datesame and timesame
if bothsame:
self.apply_state.emit(False, 'NameFormatStamper')
elif not bothsame:
self.apply_state.emit(True, 'NameFormatStamper')
@pyqtSlot()
def applystampchange(self):
self.fset.filenameSetDate(self.checkboxdate.isChecked())
self.fset.filenameSetTime(self.checkboxtime.isChecked())
samedate = (self.fset.DateStamp == self.checkboxdate.isChecked())
sametime = (self.fset.TimeStamp == self.checkboxtime.isChecked())
bothsame = samedate and sametime
if bothsame:
self.apply_state.emit(False, 'NameFormatStamper')
elif not bothsame:
self.apply_state.emit(True, 'NameFormatStamper')
class ApplyButton(QPushButton):
def __init__(self, parent, fset):
super(ApplyButton, self).__init__(QIcon('resources/done-all.svg'), 'Apply', parent)
self.fset = fset
self.setEnabled(False)
self.changedict = {'FileDirInput':False, 'SetFileFormat':False,
'NameFormatPrefix':False, 'NameFormatStamper':False}
@pyqtSlot(bool, str)
def individualSetEnable(self, inbool, inkey):
self.changedict[inkey] = inbool
self.checkallstates()
def checkallstates(self):
if all(value == False for value in self.changedict.values()):
self.setEnabled(False)
elif any(value == True for value in self.changedict.values()):
self.setEnabled(True)
class AdvancedSettingsButton(QPushButton):
def __init__(self, parent, fset):
super(AdvancedSettingsButton, self).__init__(QIcon('resources/cog.svg'), 'Advanced', parent)
self.parent = parent
self.fset = fset
self.clicked.connect(self.open_settings)
def open_settings(self):
self.window = AdvancedSettingsWindow(self.parent, self.fset)
self.window.show()
class FileManagementSection(QGroupBox):
def __init__(self, parent, camera):
super(FileManagementSection, self).__init__(parent)
self.fset = camera.fn
self.initUI()
self.makeconnections()
def initUI(self):
self.setTitle('File Settings')
sublayout_fileman = QFormLayout()
self.dirinput = FileDirInput(self, self.fset)
self.fileformat = SetFileFormat(self, self.fset)
self.nameformat = NameFormatPrefix(self, self.fset)
self.namestamper = NameFormatStamper(self, self.fset)
self.applybutton = ApplyButton(self, self.fset)
self.adsetbutton = AdvancedSettingsButton(self, self.fset)
sublayout_fileman.addRow(QLabel('Save Directory:'), self.dirinput)
sublayout_fileman.addRow(QLabel('File Format:'), self.fileformat)
sublayout_fileman.addRow(QLabel('File Name Prefix:'), self.nameformat)
sublayout_fileman.addRow(QLabel('Include Stamps:'), self.namestamper)
sublayout_fileman.addRow(self.adsetbutton, self.applybutton)
self.setLayout(sublayout_fileman)
def makeconnections(self):
self.dirinput.apply_state.connect(self.applybutton.individualSetEnable)
self.applybutton.clicked.connect(self.dirinput.applydirchange)
self.fileformat.apply_state.connect(self.applybutton.individualSetEnable)
self.applybutton.clicked.connect(self.fileformat.applyformatchange)
self.nameformat.apply_state.connect(self.applybutton.individualSetEnable)
self.applybutton.clicked.connect(self.nameformat.applyprefixchange)
self.namestamper.apply_state.connect(self.applybutton.individualSetEnable)
self.applybutton.clicked.connect(self.namestamper.applystampchange)
| true | true |
1c3d282e45666b6555b0ff1951bf6c7be0db77d6 | 1,194 | py | Python | tunnel/tunnel.py | zrthxn/tunnel-worker | 591f63c02ddd6d503ddf4cab0077cea2611b7a21 | [
"MIT"
] | null | null | null | tunnel/tunnel.py | zrthxn/tunnel-worker | 591f63c02ddd6d503ddf4cab0077cea2611b7a21 | [
"MIT"
] | null | null | null | tunnel/tunnel.py | zrthxn/tunnel-worker | 591f63c02ddd6d503ddf4cab0077cea2611b7a21 | [
"MIT"
] | null | null | null | from time import time
from threading import Thread, Event
from os import environ
from typing import List
from config import build_satellites
from satellite import Satellite
class setInterval:
def __init__(self, interval, action, **kwargs):
self.interval = interval
self.action = action
self.kwargs = kwargs
self.stopEvent = Event()
Thread(target=self.__setInterval).start()
def __setInterval(self):
nextTime = time() + self.interval
while not self.stopEvent.wait(nextTime - time()):
nextTime += self.interval
self.action(**self.kwargs)
def cancel(self):
self.stopEvent.set()
def main(satellites: List[Satellite]):
print("Checking all satellites...")
for sat in satellites:
if sat.ping() == sat.FAIL_STATUS:
sat.relaunch()
return 0
print("Building satellites...")
satellites = build_satellites()
print(f"Built {len(satellites)} satellites")
if __name__ == "__main__":
__time = 3600
if environ.get("PING_INTERVAL") != None:
__time = int(environ.get("PING_INTERVAL"))
if len(satellites) > 0:
for sat in satellites:
sat.launch()
interval = setInterval(__time, main, satellites=satellites)
else:
raise RuntimeError("No Satellites Built") | 23.88 | 61 | 0.727806 | from time import time
from threading import Thread, Event
from os import environ
from typing import List
from config import build_satellites
from satellite import Satellite
class setInterval:
def __init__(self, interval, action, **kwargs):
self.interval = interval
self.action = action
self.kwargs = kwargs
self.stopEvent = Event()
Thread(target=self.__setInterval).start()
def __setInterval(self):
nextTime = time() + self.interval
while not self.stopEvent.wait(nextTime - time()):
nextTime += self.interval
self.action(**self.kwargs)
def cancel(self):
self.stopEvent.set()
def main(satellites: List[Satellite]):
print("Checking all satellites...")
for sat in satellites:
if sat.ping() == sat.FAIL_STATUS:
sat.relaunch()
return 0
print("Building satellites...")
satellites = build_satellites()
print(f"Built {len(satellites)} satellites")
if __name__ == "__main__":
__time = 3600
if environ.get("PING_INTERVAL") != None:
__time = int(environ.get("PING_INTERVAL"))
if len(satellites) > 0:
for sat in satellites:
sat.launch()
interval = setInterval(__time, main, satellites=satellites)
else:
raise RuntimeError("No Satellites Built") | true | true |
1c3d286c13699acf3e34f81944822817b7af2050 | 957 | py | Python | hmsclient/genthrift/fb303/ttypes.py | itamarla/hmsclient | 96e14c93b9cc0e274602d44c5bf9bb054023aecd | [
"Apache-2.0"
] | 1 | 2020-11-23T02:48:23.000Z | 2020-11-23T02:48:23.000Z | hmsclient/genthrift/fb303/ttypes.py | itamarla/hmsclient | 96e14c93b9cc0e274602d44c5bf9bb054023aecd | [
"Apache-2.0"
] | null | null | null | hmsclient/genthrift/fb303/ttypes.py | itamarla/hmsclient | 96e14c93b9cc0e274602d44c5bf9bb054023aecd | [
"Apache-2.0"
] | null | null | null | #
# Autogenerated by Thrift Compiler (0.13.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from thrift.transport import TTransport
all_structs = []
class fb_status(object):
"""
Common status reporting mechanism across all services
"""
DEAD = 0
STARTING = 1
ALIVE = 2
STOPPING = 3
STOPPED = 4
WARNING = 5
_VALUES_TO_NAMES = {
0: "DEAD",
1: "STARTING",
2: "ALIVE",
3: "STOPPING",
4: "STOPPED",
5: "WARNING",
}
_NAMES_TO_VALUES = {
"DEAD": 0,
"STARTING": 1,
"ALIVE": 2,
"STOPPING": 3,
"STOPPED": 4,
"WARNING": 5,
}
fix_spec(all_structs)
del all_structs
| 19.14 | 93 | 0.613375 |
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from thrift.transport import TTransport
all_structs = []
class fb_status(object):
DEAD = 0
STARTING = 1
ALIVE = 2
STOPPING = 3
STOPPED = 4
WARNING = 5
_VALUES_TO_NAMES = {
0: "DEAD",
1: "STARTING",
2: "ALIVE",
3: "STOPPING",
4: "STOPPED",
5: "WARNING",
}
_NAMES_TO_VALUES = {
"DEAD": 0,
"STARTING": 1,
"ALIVE": 2,
"STOPPING": 3,
"STOPPED": 4,
"WARNING": 5,
}
fix_spec(all_structs)
del all_structs
| true | true |
1c3d29c7ad2cc7b5a92ad738564c4b14a24a613f | 24,918 | py | Python | src/oci/database/models/external_database_base.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/database/models/external_database_base.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/database/models/external_database_base.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ExternalDatabaseBase(object):
"""
A resource that allows you to manage an Oracle Database located outside of Oracle Cloud using Oracle Cloud Infrastructure's Console and APIs.
"""
#: A constant which can be used with the lifecycle_state property of a ExternalDatabaseBase.
#: This constant has a value of "PROVISIONING"
LIFECYCLE_STATE_PROVISIONING = "PROVISIONING"
#: A constant which can be used with the lifecycle_state property of a ExternalDatabaseBase.
#: This constant has a value of "NOT_CONNECTED"
LIFECYCLE_STATE_NOT_CONNECTED = "NOT_CONNECTED"
#: A constant which can be used with the lifecycle_state property of a ExternalDatabaseBase.
#: This constant has a value of "AVAILABLE"
LIFECYCLE_STATE_AVAILABLE = "AVAILABLE"
#: A constant which can be used with the lifecycle_state property of a ExternalDatabaseBase.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a ExternalDatabaseBase.
#: This constant has a value of "TERMINATING"
LIFECYCLE_STATE_TERMINATING = "TERMINATING"
#: A constant which can be used with the lifecycle_state property of a ExternalDatabaseBase.
#: This constant has a value of "TERMINATED"
LIFECYCLE_STATE_TERMINATED = "TERMINATED"
#: A constant which can be used with the lifecycle_state property of a ExternalDatabaseBase.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
#: A constant which can be used with the database_edition property of a ExternalDatabaseBase.
#: This constant has a value of "STANDARD_EDITION"
DATABASE_EDITION_STANDARD_EDITION = "STANDARD_EDITION"
#: A constant which can be used with the database_edition property of a ExternalDatabaseBase.
#: This constant has a value of "ENTERPRISE_EDITION"
DATABASE_EDITION_ENTERPRISE_EDITION = "ENTERPRISE_EDITION"
#: A constant which can be used with the database_edition property of a ExternalDatabaseBase.
#: This constant has a value of "ENTERPRISE_EDITION_HIGH_PERFORMANCE"
DATABASE_EDITION_ENTERPRISE_EDITION_HIGH_PERFORMANCE = "ENTERPRISE_EDITION_HIGH_PERFORMANCE"
#: A constant which can be used with the database_edition property of a ExternalDatabaseBase.
#: This constant has a value of "ENTERPRISE_EDITION_EXTREME_PERFORMANCE"
DATABASE_EDITION_ENTERPRISE_EDITION_EXTREME_PERFORMANCE = "ENTERPRISE_EDITION_EXTREME_PERFORMANCE"
#: A constant which can be used with the database_configuration property of a ExternalDatabaseBase.
#: This constant has a value of "RAC"
DATABASE_CONFIGURATION_RAC = "RAC"
#: A constant which can be used with the database_configuration property of a ExternalDatabaseBase.
#: This constant has a value of "SINGLE_INSTANCE"
DATABASE_CONFIGURATION_SINGLE_INSTANCE = "SINGLE_INSTANCE"
def __init__(self, **kwargs):
"""
Initializes a new ExternalDatabaseBase object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param compartment_id:
The value to assign to the compartment_id property of this ExternalDatabaseBase.
:type compartment_id: str
:param freeform_tags:
The value to assign to the freeform_tags property of this ExternalDatabaseBase.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this ExternalDatabaseBase.
:type defined_tags: dict(str, dict(str, object))
:param display_name:
The value to assign to the display_name property of this ExternalDatabaseBase.
:type display_name: str
:param id:
The value to assign to the id property of this ExternalDatabaseBase.
:type id: str
:param lifecycle_details:
The value to assign to the lifecycle_details property of this ExternalDatabaseBase.
:type lifecycle_details: str
:param lifecycle_state:
The value to assign to the lifecycle_state property of this ExternalDatabaseBase.
Allowed values for this property are: "PROVISIONING", "NOT_CONNECTED", "AVAILABLE", "UPDATING", "TERMINATING", "TERMINATED", "FAILED"
:type lifecycle_state: str
:param time_created:
The value to assign to the time_created property of this ExternalDatabaseBase.
:type time_created: datetime
:param db_unique_name:
The value to assign to the db_unique_name property of this ExternalDatabaseBase.
:type db_unique_name: str
:param db_id:
The value to assign to the db_id property of this ExternalDatabaseBase.
:type db_id: str
:param database_version:
The value to assign to the database_version property of this ExternalDatabaseBase.
:type database_version: str
:param database_edition:
The value to assign to the database_edition property of this ExternalDatabaseBase.
Allowed values for this property are: "STANDARD_EDITION", "ENTERPRISE_EDITION", "ENTERPRISE_EDITION_HIGH_PERFORMANCE", "ENTERPRISE_EDITION_EXTREME_PERFORMANCE"
:type database_edition: str
:param time_zone:
The value to assign to the time_zone property of this ExternalDatabaseBase.
:type time_zone: str
:param character_set:
The value to assign to the character_set property of this ExternalDatabaseBase.
:type character_set: str
:param ncharacter_set:
The value to assign to the ncharacter_set property of this ExternalDatabaseBase.
:type ncharacter_set: str
:param db_packs:
The value to assign to the db_packs property of this ExternalDatabaseBase.
:type db_packs: str
:param database_configuration:
The value to assign to the database_configuration property of this ExternalDatabaseBase.
Allowed values for this property are: "RAC", "SINGLE_INSTANCE"
:type database_configuration: str
:param database_management_config:
The value to assign to the database_management_config property of this ExternalDatabaseBase.
:type database_management_config: oci.database.models.DatabaseManagementConfig
"""
self.swagger_types = {
'compartment_id': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))',
'display_name': 'str',
'id': 'str',
'lifecycle_details': 'str',
'lifecycle_state': 'str',
'time_created': 'datetime',
'db_unique_name': 'str',
'db_id': 'str',
'database_version': 'str',
'database_edition': 'str',
'time_zone': 'str',
'character_set': 'str',
'ncharacter_set': 'str',
'db_packs': 'str',
'database_configuration': 'str',
'database_management_config': 'DatabaseManagementConfig'
}
self.attribute_map = {
'compartment_id': 'compartmentId',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags',
'display_name': 'displayName',
'id': 'id',
'lifecycle_details': 'lifecycleDetails',
'lifecycle_state': 'lifecycleState',
'time_created': 'timeCreated',
'db_unique_name': 'dbUniqueName',
'db_id': 'dbId',
'database_version': 'databaseVersion',
'database_edition': 'databaseEdition',
'time_zone': 'timeZone',
'character_set': 'characterSet',
'ncharacter_set': 'ncharacterSet',
'db_packs': 'dbPacks',
'database_configuration': 'databaseConfiguration',
'database_management_config': 'databaseManagementConfig'
}
self._compartment_id = None
self._freeform_tags = None
self._defined_tags = None
self._display_name = None
self._id = None
self._lifecycle_details = None
self._lifecycle_state = None
self._time_created = None
self._db_unique_name = None
self._db_id = None
self._database_version = None
self._database_edition = None
self._time_zone = None
self._character_set = None
self._ncharacter_set = None
self._db_packs = None
self._database_configuration = None
self._database_management_config = None
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this ExternalDatabaseBase.
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this ExternalDatabaseBase.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this ExternalDatabaseBase.
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this ExternalDatabaseBase.
:type: str
"""
self._compartment_id = compartment_id
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this ExternalDatabaseBase.
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this ExternalDatabaseBase.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this ExternalDatabaseBase.
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this ExternalDatabaseBase.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this ExternalDatabaseBase.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this ExternalDatabaseBase.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this ExternalDatabaseBase.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this ExternalDatabaseBase.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def display_name(self):
"""
**[Required]** Gets the display_name of this ExternalDatabaseBase.
The user-friendly name for the external database. The name does not have to be unique.
:return: The display_name of this ExternalDatabaseBase.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this ExternalDatabaseBase.
The user-friendly name for the external database. The name does not have to be unique.
:param display_name: The display_name of this ExternalDatabaseBase.
:type: str
"""
self._display_name = display_name
@property
def id(self):
"""
**[Required]** Gets the id of this ExternalDatabaseBase.
The `OCID`__ of the Oracle Cloud Infrastructure external database resource.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The id of this ExternalDatabaseBase.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ExternalDatabaseBase.
The `OCID`__ of the Oracle Cloud Infrastructure external database resource.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param id: The id of this ExternalDatabaseBase.
:type: str
"""
self._id = id
@property
def lifecycle_details(self):
"""
Gets the lifecycle_details of this ExternalDatabaseBase.
Additional information about the current lifecycle state.
:return: The lifecycle_details of this ExternalDatabaseBase.
:rtype: str
"""
return self._lifecycle_details
@lifecycle_details.setter
def lifecycle_details(self, lifecycle_details):
"""
Sets the lifecycle_details of this ExternalDatabaseBase.
Additional information about the current lifecycle state.
:param lifecycle_details: The lifecycle_details of this ExternalDatabaseBase.
:type: str
"""
self._lifecycle_details = lifecycle_details
@property
def lifecycle_state(self):
"""
**[Required]** Gets the lifecycle_state of this ExternalDatabaseBase.
The current state of the Oracle Cloud Infrastructure external database resource.
Allowed values for this property are: "PROVISIONING", "NOT_CONNECTED", "AVAILABLE", "UPDATING", "TERMINATING", "TERMINATED", "FAILED"
:return: The lifecycle_state of this ExternalDatabaseBase.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this ExternalDatabaseBase.
The current state of the Oracle Cloud Infrastructure external database resource.
:param lifecycle_state: The lifecycle_state of this ExternalDatabaseBase.
:type: str
"""
allowed_values = ["PROVISIONING", "NOT_CONNECTED", "AVAILABLE", "UPDATING", "TERMINATING", "TERMINATED", "FAILED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
raise ValueError(
"Invalid value for `lifecycle_state`, must be None or one of {0}"
.format(allowed_values)
)
self._lifecycle_state = lifecycle_state
@property
def time_created(self):
"""
**[Required]** Gets the time_created of this ExternalDatabaseBase.
The date and time the database was created.
:return: The time_created of this ExternalDatabaseBase.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this ExternalDatabaseBase.
The date and time the database was created.
:param time_created: The time_created of this ExternalDatabaseBase.
:type: datetime
"""
self._time_created = time_created
@property
def db_unique_name(self):
"""
Gets the db_unique_name of this ExternalDatabaseBase.
The `DB_UNIQUE_NAME` of the external database.
:return: The db_unique_name of this ExternalDatabaseBase.
:rtype: str
"""
return self._db_unique_name
@db_unique_name.setter
def db_unique_name(self, db_unique_name):
"""
Sets the db_unique_name of this ExternalDatabaseBase.
The `DB_UNIQUE_NAME` of the external database.
:param db_unique_name: The db_unique_name of this ExternalDatabaseBase.
:type: str
"""
self._db_unique_name = db_unique_name
@property
def db_id(self):
"""
Gets the db_id of this ExternalDatabaseBase.
The Oracle Database ID, which identifies an Oracle Database located outside of Oracle Cloud.
:return: The db_id of this ExternalDatabaseBase.
:rtype: str
"""
return self._db_id
@db_id.setter
def db_id(self, db_id):
"""
Sets the db_id of this ExternalDatabaseBase.
The Oracle Database ID, which identifies an Oracle Database located outside of Oracle Cloud.
:param db_id: The db_id of this ExternalDatabaseBase.
:type: str
"""
self._db_id = db_id
@property
def database_version(self):
"""
Gets the database_version of this ExternalDatabaseBase.
The Oracle Database version.
:return: The database_version of this ExternalDatabaseBase.
:rtype: str
"""
return self._database_version
@database_version.setter
def database_version(self, database_version):
"""
Sets the database_version of this ExternalDatabaseBase.
The Oracle Database version.
:param database_version: The database_version of this ExternalDatabaseBase.
:type: str
"""
self._database_version = database_version
@property
def database_edition(self):
"""
Gets the database_edition of this ExternalDatabaseBase.
The Oracle Database edition.
Allowed values for this property are: "STANDARD_EDITION", "ENTERPRISE_EDITION", "ENTERPRISE_EDITION_HIGH_PERFORMANCE", "ENTERPRISE_EDITION_EXTREME_PERFORMANCE"
:return: The database_edition of this ExternalDatabaseBase.
:rtype: str
"""
return self._database_edition
@database_edition.setter
def database_edition(self, database_edition):
"""
Sets the database_edition of this ExternalDatabaseBase.
The Oracle Database edition.
:param database_edition: The database_edition of this ExternalDatabaseBase.
:type: str
"""
allowed_values = ["STANDARD_EDITION", "ENTERPRISE_EDITION", "ENTERPRISE_EDITION_HIGH_PERFORMANCE", "ENTERPRISE_EDITION_EXTREME_PERFORMANCE"]
if not value_allowed_none_or_none_sentinel(database_edition, allowed_values):
raise ValueError(
"Invalid value for `database_edition`, must be None or one of {0}"
.format(allowed_values)
)
self._database_edition = database_edition
@property
def time_zone(self):
"""
Gets the time_zone of this ExternalDatabaseBase.
The time zone of the external database.
It is a time zone offset (a character type in the format '[+|-]TZH:TZM') or a time zone region name,
depending on how the time zone value was specified when the database was created / last altered.
:return: The time_zone of this ExternalDatabaseBase.
:rtype: str
"""
return self._time_zone
@time_zone.setter
def time_zone(self, time_zone):
"""
Sets the time_zone of this ExternalDatabaseBase.
The time zone of the external database.
It is a time zone offset (a character type in the format '[+|-]TZH:TZM') or a time zone region name,
depending on how the time zone value was specified when the database was created / last altered.
:param time_zone: The time_zone of this ExternalDatabaseBase.
:type: str
"""
self._time_zone = time_zone
@property
def character_set(self):
"""
Gets the character_set of this ExternalDatabaseBase.
The character set of the external database.
:return: The character_set of this ExternalDatabaseBase.
:rtype: str
"""
return self._character_set
@character_set.setter
def character_set(self, character_set):
"""
Sets the character_set of this ExternalDatabaseBase.
The character set of the external database.
:param character_set: The character_set of this ExternalDatabaseBase.
:type: str
"""
self._character_set = character_set
@property
def ncharacter_set(self):
"""
Gets the ncharacter_set of this ExternalDatabaseBase.
The national character of the external database.
:return: The ncharacter_set of this ExternalDatabaseBase.
:rtype: str
"""
return self._ncharacter_set
@ncharacter_set.setter
def ncharacter_set(self, ncharacter_set):
"""
Sets the ncharacter_set of this ExternalDatabaseBase.
The national character of the external database.
:param ncharacter_set: The ncharacter_set of this ExternalDatabaseBase.
:type: str
"""
self._ncharacter_set = ncharacter_set
@property
def db_packs(self):
"""
Gets the db_packs of this ExternalDatabaseBase.
The database packs licensed for the external Oracle Database.
:return: The db_packs of this ExternalDatabaseBase.
:rtype: str
"""
return self._db_packs
@db_packs.setter
def db_packs(self, db_packs):
"""
Sets the db_packs of this ExternalDatabaseBase.
The database packs licensed for the external Oracle Database.
:param db_packs: The db_packs of this ExternalDatabaseBase.
:type: str
"""
self._db_packs = db_packs
@property
def database_configuration(self):
"""
Gets the database_configuration of this ExternalDatabaseBase.
The Oracle Database configuration
Allowed values for this property are: "RAC", "SINGLE_INSTANCE"
:return: The database_configuration of this ExternalDatabaseBase.
:rtype: str
"""
return self._database_configuration
@database_configuration.setter
def database_configuration(self, database_configuration):
"""
Sets the database_configuration of this ExternalDatabaseBase.
The Oracle Database configuration
:param database_configuration: The database_configuration of this ExternalDatabaseBase.
:type: str
"""
allowed_values = ["RAC", "SINGLE_INSTANCE"]
if not value_allowed_none_or_none_sentinel(database_configuration, allowed_values):
raise ValueError(
"Invalid value for `database_configuration`, must be None or one of {0}"
.format(allowed_values)
)
self._database_configuration = database_configuration
@property
def database_management_config(self):
"""
Gets the database_management_config of this ExternalDatabaseBase.
:return: The database_management_config of this ExternalDatabaseBase.
:rtype: oci.database.models.DatabaseManagementConfig
"""
return self._database_management_config
@database_management_config.setter
def database_management_config(self, database_management_config):
"""
Sets the database_management_config of this ExternalDatabaseBase.
:param database_management_config: The database_management_config of this ExternalDatabaseBase.
:type: oci.database.models.DatabaseManagementConfig
"""
self._database_management_config = database_management_config
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 35.546362 | 245 | 0.670319 |
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ExternalDatabaseBase(object):
LIFECYCLE_STATE_PROVISIONING = "PROVISIONING"
LIFECYCLE_STATE_NOT_CONNECTED = "NOT_CONNECTED"
LIFECYCLE_STATE_AVAILABLE = "AVAILABLE"
LIFECYCLE_STATE_UPDATING = "UPDATING"
LIFECYCLE_STATE_TERMINATING = "TERMINATING"
LIFECYCLE_STATE_TERMINATED = "TERMINATED"
LIFECYCLE_STATE_FAILED = "FAILED"
DATABASE_EDITION_STANDARD_EDITION = "STANDARD_EDITION"
DATABASE_EDITION_ENTERPRISE_EDITION = "ENTERPRISE_EDITION"
DATABASE_EDITION_ENTERPRISE_EDITION_HIGH_PERFORMANCE = "ENTERPRISE_EDITION_HIGH_PERFORMANCE"
DATABASE_EDITION_ENTERPRISE_EDITION_EXTREME_PERFORMANCE = "ENTERPRISE_EDITION_EXTREME_PERFORMANCE"
DATABASE_CONFIGURATION_RAC = "RAC"
DATABASE_CONFIGURATION_SINGLE_INSTANCE = "SINGLE_INSTANCE"
def __init__(self, **kwargs):
self.swagger_types = {
'compartment_id': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))',
'display_name': 'str',
'id': 'str',
'lifecycle_details': 'str',
'lifecycle_state': 'str',
'time_created': 'datetime',
'db_unique_name': 'str',
'db_id': 'str',
'database_version': 'str',
'database_edition': 'str',
'time_zone': 'str',
'character_set': 'str',
'ncharacter_set': 'str',
'db_packs': 'str',
'database_configuration': 'str',
'database_management_config': 'DatabaseManagementConfig'
}
self.attribute_map = {
'compartment_id': 'compartmentId',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags',
'display_name': 'displayName',
'id': 'id',
'lifecycle_details': 'lifecycleDetails',
'lifecycle_state': 'lifecycleState',
'time_created': 'timeCreated',
'db_unique_name': 'dbUniqueName',
'db_id': 'dbId',
'database_version': 'databaseVersion',
'database_edition': 'databaseEdition',
'time_zone': 'timeZone',
'character_set': 'characterSet',
'ncharacter_set': 'ncharacterSet',
'db_packs': 'dbPacks',
'database_configuration': 'databaseConfiguration',
'database_management_config': 'databaseManagementConfig'
}
self._compartment_id = None
self._freeform_tags = None
self._defined_tags = None
self._display_name = None
self._id = None
self._lifecycle_details = None
self._lifecycle_state = None
self._time_created = None
self._db_unique_name = None
self._db_id = None
self._database_version = None
self._database_edition = None
self._time_zone = None
self._character_set = None
self._ncharacter_set = None
self._db_packs = None
self._database_configuration = None
self._database_management_config = None
@property
def compartment_id(self):
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
self._compartment_id = compartment_id
@property
def freeform_tags(self):
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
self._defined_tags = defined_tags
@property
def display_name(self):
return self._display_name
@display_name.setter
def display_name(self, display_name):
self._display_name = display_name
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def lifecycle_details(self):
return self._lifecycle_details
@lifecycle_details.setter
def lifecycle_details(self, lifecycle_details):
self._lifecycle_details = lifecycle_details
@property
def lifecycle_state(self):
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
allowed_values = ["PROVISIONING", "NOT_CONNECTED", "AVAILABLE", "UPDATING", "TERMINATING", "TERMINATED", "FAILED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
raise ValueError(
"Invalid value for `lifecycle_state`, must be None or one of {0}"
.format(allowed_values)
)
self._lifecycle_state = lifecycle_state
@property
def time_created(self):
return self._time_created
@time_created.setter
def time_created(self, time_created):
self._time_created = time_created
@property
def db_unique_name(self):
return self._db_unique_name
@db_unique_name.setter
def db_unique_name(self, db_unique_name):
self._db_unique_name = db_unique_name
@property
def db_id(self):
return self._db_id
@db_id.setter
def db_id(self, db_id):
self._db_id = db_id
@property
def database_version(self):
return self._database_version
@database_version.setter
def database_version(self, database_version):
self._database_version = database_version
@property
def database_edition(self):
return self._database_edition
@database_edition.setter
def database_edition(self, database_edition):
allowed_values = ["STANDARD_EDITION", "ENTERPRISE_EDITION", "ENTERPRISE_EDITION_HIGH_PERFORMANCE", "ENTERPRISE_EDITION_EXTREME_PERFORMANCE"]
if not value_allowed_none_or_none_sentinel(database_edition, allowed_values):
raise ValueError(
"Invalid value for `database_edition`, must be None or one of {0}"
.format(allowed_values)
)
self._database_edition = database_edition
@property
def time_zone(self):
return self._time_zone
@time_zone.setter
def time_zone(self, time_zone):
self._time_zone = time_zone
@property
def character_set(self):
return self._character_set
@character_set.setter
def character_set(self, character_set):
self._character_set = character_set
@property
def ncharacter_set(self):
return self._ncharacter_set
@ncharacter_set.setter
def ncharacter_set(self, ncharacter_set):
self._ncharacter_set = ncharacter_set
@property
def db_packs(self):
return self._db_packs
@db_packs.setter
def db_packs(self, db_packs):
self._db_packs = db_packs
@property
def database_configuration(self):
return self._database_configuration
@database_configuration.setter
def database_configuration(self, database_configuration):
allowed_values = ["RAC", "SINGLE_INSTANCE"]
if not value_allowed_none_or_none_sentinel(database_configuration, allowed_values):
raise ValueError(
"Invalid value for `database_configuration`, must be None or one of {0}"
.format(allowed_values)
)
self._database_configuration = database_configuration
@property
def database_management_config(self):
return self._database_management_config
@database_management_config.setter
def database_management_config(self, database_management_config):
self._database_management_config = database_management_config
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c3d2a6725b8f8f4bf2562d430c72f187cac0af2 | 1,914 | py | Python | algorithm/oripy/hidden-markov.py | thautwarm/flowlibs | 4b70280e05380a6c5acf591095bcc490a9214e53 | [
"MIT"
] | 2 | 2017-08-24T16:48:56.000Z | 2017-08-24T19:33:10.000Z | algorithm/oripy/hidden-markov.py | thautwarm/flowlibs | 4b70280e05380a6c5acf591095bcc490a9214e53 | [
"MIT"
] | null | null | null | algorithm/oripy/hidden-markov.py | thautwarm/flowlibs | 4b70280e05380a6c5acf591095bcc490a9214e53 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 1 10:38:50 2017
@author: misakawa
"""
from collections import defaultdict
# Σ :: status -> observation -> probability -
# Φ :: status -> status -> probability
class HMM:
__slots__ = []
def __new__(self, Σ, Φ, F):
return (Σ, Φ, F)
def cal_status_transfrom(Seqs, gram, decide_status, gap = 1):
Σ = defaultdict(lambda :defaultdict(float))
Φ = defaultdict(lambda :defaultdict(float))
for Seq in Seqs:
last = None
for i in range(0, len(Seq) - gram +1, gap):
seq = Seq[i:i+gram]
observation = "".join(map(lambda x:x[0], seq))
status = decide_status(list(map(lambda x:x[0], seq)))
Σ[status][observation] += 1
Φ[last][status] += 1
last = status
def normalize(X):
for X1 in X:
count = sum(X[X1].values())
for X2 in X[X1]:
X[X1][X2] /= count
normalize(Σ)
normalize(Φ)
def F(seq):
for i in range(0, len(seq) - gram +1, gap):
yield seq[i:i+gram]
return HMM(Σ, Φ, F)
def forward(Σ, Φ, F):
def _f(seq):
# initial
prob = [dict()]
observations = F(seq)
observation = next(observations)
for status in Σ:
try:
prob[-1][status] = Σ[status][observation]
except:
assert status == None
prob.append(dict())
# forward
for observation in observations:
for status in Σ:
prob[-1][status] = \
sum( ( prob[-2][ϕ]*Φ[ϕ][status] for ϕ in Φ if ϕ is not None) )*Σ[status][observation]
prob.append(dict())
prob.pop()
return prob
return _f
| 24.538462 | 105 | 0.482759 |
from collections import defaultdict
class HMM:
__slots__ = []
def __new__(self, Σ, Φ, F):
return (Σ, Φ, F)
def cal_status_transfrom(Seqs, gram, decide_status, gap = 1):
Σ = defaultdict(lambda :defaultdict(float))
Φ = defaultdict(lambda :defaultdict(float))
for Seq in Seqs:
last = None
for i in range(0, len(Seq) - gram +1, gap):
seq = Seq[i:i+gram]
observation = "".join(map(lambda x:x[0], seq))
status = decide_status(list(map(lambda x:x[0], seq)))
Σ[status][observation] += 1
Φ[last][status] += 1
last = status
def normalize(X):
for X1 in X:
count = sum(X[X1].values())
for X2 in X[X1]:
X[X1][X2] /= count
normalize(Σ)
normalize(Φ)
def F(seq):
for i in range(0, len(seq) - gram +1, gap):
yield seq[i:i+gram]
return HMM(Σ, Φ, F)
def forward(Σ, Φ, F):
def _f(seq):
prob = [dict()]
observations = F(seq)
observation = next(observations)
for status in Σ:
try:
prob[-1][status] = Σ[status][observation]
except:
assert status == None
prob.append(dict())
for observation in observations:
for status in Σ:
prob[-1][status] = \
sum( ( prob[-2][ϕ]*Φ[ϕ][status] for ϕ in Φ if ϕ is not None) )*Σ[status][observation]
prob.append(dict())
prob.pop()
return prob
return _f
| true | true |
1c3d2a737848c70e7b11695521b5c70999b42c5b | 2,008 | py | Python | django/db/backends/sqlite3/features.py | beniwohli/django | 514b2c989a948e3c59bda0da0c9427acf643cf5b | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/db/backends/sqlite3/features.py | beniwohli/django | 514b2c989a948e3c59bda0da0c9427acf643cf5b | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/db/backends/sqlite3/features.py | beniwohli/django | 514b2c989a948e3c59bda0da0c9427acf643cf5b | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2017-10-23T22:25:45.000Z | 2017-10-23T22:25:45.000Z | from django.db import utils
from django.db.backends.base.features import BaseDatabaseFeatures
from django.utils.functional import cached_property
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_timezones = False
max_query_params = 999
supports_mixed_date_datetime_comparisons = False
supports_column_check_constraints = False
autocommits_when_autocommit_is_off = True
can_introspect_decimal_field = False
can_introspect_positive_integer_field = True
can_introspect_small_integer_field = True
supports_transactions = True
atomic_transactions = False
can_rollback_ddl = True
supports_paramstyle_pyformat = False
supports_sequence_reset = False
can_clone_databases = True
supports_temporal_subtraction = True
ignores_table_name_case = True
supports_cast_with_precision = False
uses_savepoints = True
can_release_savepoints = True
@cached_property
def supports_stddev(self):
"""
Confirm support for STDDEV and related stats functions.
SQLite supports STDDEV as an extension package; so
connection.ops.check_expression_support() can't unilaterally
rule out support for STDDEV. Manually check whether the call works.
"""
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
try:
cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
has_support = True
except utils.DatabaseError:
has_support = False
cursor.execute('DROP TABLE STDDEV_TEST')
return has_support
| 38.615385 | 79 | 0.726594 | from django.db import utils
from django.db.backends.base.features import BaseDatabaseFeatures
from django.utils.functional import cached_property
class DatabaseFeatures(BaseDatabaseFeatures):
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_timezones = False
max_query_params = 999
supports_mixed_date_datetime_comparisons = False
supports_column_check_constraints = False
autocommits_when_autocommit_is_off = True
can_introspect_decimal_field = False
can_introspect_positive_integer_field = True
can_introspect_small_integer_field = True
supports_transactions = True
atomic_transactions = False
can_rollback_ddl = True
supports_paramstyle_pyformat = False
supports_sequence_reset = False
can_clone_databases = True
supports_temporal_subtraction = True
ignores_table_name_case = True
supports_cast_with_precision = False
uses_savepoints = True
can_release_savepoints = True
@cached_property
def supports_stddev(self):
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
try:
cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
has_support = True
except utils.DatabaseError:
has_support = False
cursor.execute('DROP TABLE STDDEV_TEST')
return has_support
| true | true |
1c3d2af11592b23293e1b67e8a4a20bc495a69bc | 4,695 | py | Python | services/web/server/src/simcore_service_webserver/reverse_proxy/handlers/jupyter.py | oetiker/osparc-simcore | 00918bf8f000840cc70cc49458780a55858d52ea | [
"MIT"
] | null | null | null | services/web/server/src/simcore_service_webserver/reverse_proxy/handlers/jupyter.py | oetiker/osparc-simcore | 00918bf8f000840cc70cc49458780a55858d52ea | [
"MIT"
] | 2 | 2018-05-13T09:10:57.000Z | 2019-03-06T08:10:40.000Z | services/web/server/src/simcore_service_webserver/reverse_proxy/handlers/jupyter.py | oetiker/osparc-simcore | 00918bf8f000840cc70cc49458780a55858d52ea | [
"MIT"
] | null | null | null | """ Reverse-proxy customized for jupyter notebooks
"""
import asyncio
import logging
import pprint
import aiohttp
from aiohttp import client, web
from yarl import URL
APP_SOCKETS_KEY = "simcore_service_webserver.reverse_proxy.settings.sockets"
#FIXME: Image tag should determine the handler instead of the opposite!!!
SUPPORTED_IMAGE_NAME = ["simcore/services/dynamic/jupyter-base-notebook",
"simcore/services/dynamic/jupyter-scipy-notebook",
"simcore/services/dynamic/jupyter-r-notebook",
"simcore/services/dynamic/kember-viewer",
"simcore/services/dynamic/mattward-viewer",
"simcore/services/dynamic/cc-2d-viewer",
"simcore/services/dynamic/cc-1d-viewer",
"simcore/services/dynamic/cc-0d-viewer",
"simcore/services/dynamic/spat-an-app-nb",
"simcore/services/dynamic/raw-graphs"
]
SUPPORTED_IMAGE_TAG = ">=1.5.0"
logger = logging.getLogger(__name__)
async def handler(req: web.Request, service_url: str, **_kwargs):
""" Redirects communication to jupyter notebook in the backend
(e.g. front-end) client <---> proxy <-----> server (e.g. backend dynamic service)
:param req: aiohttp request
:type req: web.Request
:param service_url: Resolved url pointing to backend jupyter service. Typically http:hostname:port/x/12345/.
:type service_url: str
:raises ValueError: Unexpected web-socket message
"""
# FIXME: hash of statics somehow get do not work. then neeed to be strip away
# Removing query ... which not sure is a good idea
target_url = URL(service_url).origin() / req.path.lstrip('/')
reqH = req.headers.copy()
if reqH.get('connection', '').lower() == 'upgrade' and reqH.get('upgrade', '').lower() == 'websocket' and req.method == 'GET':
ws_server = web.WebSocketResponse()
available = ws_server.can_prepare(req)
if available:
await ws_server.prepare(req)
logger.info('##### WS_SERVER %s', pprint.pformat(ws_server))
try:
req.app[APP_SOCKETS_KEY].append(ws_server)
client_session = aiohttp.ClientSession(cookies=req.cookies)
async with client_session.ws_connect(target_url) as ws_client:
logger.info('##### WS_CLIENT %s', pprint.pformat(ws_client))
async def ws_forward(ws_from, ws_to):
async for msg in ws_from:
mt = msg.type
md = msg.data
if mt == aiohttp.WSMsgType.TEXT:
await ws_to.send_str(md)
elif mt == aiohttp.WSMsgType.BINARY:
await ws_to.send_bytes(md)
elif mt == aiohttp.WSMsgType.PING:
await ws_to.ping()
elif mt == aiohttp.WSMsgType.PONG:
await ws_to.pong()
elif ws_to.closed:
await ws_to.close(code=ws_to.close_code, message=msg.extra)
else:
raise ValueError(
'unexpected message type: %s' % pprint.pformat(msg))
await asyncio.wait([ws_forward(ws_server, ws_client),
ws_forward(ws_client, ws_server)],
return_when=asyncio.FIRST_COMPLETED)
return ws_server
finally:
req.app[APP_SOCKETS_KEY].remove(ws_server)
else:
target_url = URL(service_url).origin().with_path(req.path).with_query(req.query)
async with client.request(
req.method, target_url,
headers=reqH,
allow_redirects=False,
data=await req.read()
) as res:
body = await res.read()
response = web.Response(
headers=res.headers.copy(),
status=res.status,
body=body
)
return response
if __name__ == "__main__":
# dummies for manual testing
BASE_URL = 'http://0.0.0.0:8888'
MOUNT_POINT = '/x/12345'
def adapter(req: web.Request):
return handler(req, service_url=BASE_URL)
app = web.Application()
app[APP_SOCKETS_KEY] = list()
app.router.add_route('*', MOUNT_POINT + '/{proxyPath:.*}', adapter)
web.run_app(app, port=3984)
| 40.474138 | 130 | 0.553781 |
import asyncio
import logging
import pprint
import aiohttp
from aiohttp import client, web
from yarl import URL
APP_SOCKETS_KEY = "simcore_service_webserver.reverse_proxy.settings.sockets"
SUPPORTED_IMAGE_NAME = ["simcore/services/dynamic/jupyter-base-notebook",
"simcore/services/dynamic/jupyter-scipy-notebook",
"simcore/services/dynamic/jupyter-r-notebook",
"simcore/services/dynamic/kember-viewer",
"simcore/services/dynamic/mattward-viewer",
"simcore/services/dynamic/cc-2d-viewer",
"simcore/services/dynamic/cc-1d-viewer",
"simcore/services/dynamic/cc-0d-viewer",
"simcore/services/dynamic/spat-an-app-nb",
"simcore/services/dynamic/raw-graphs"
]
SUPPORTED_IMAGE_TAG = ">=1.5.0"
logger = logging.getLogger(__name__)
async def handler(req: web.Request, service_url: str, **_kwargs):
target_url = URL(service_url).origin() / req.path.lstrip('/')
reqH = req.headers.copy()
if reqH.get('connection', '').lower() == 'upgrade' and reqH.get('upgrade', '').lower() == 'websocket' and req.method == 'GET':
ws_server = web.WebSocketResponse()
available = ws_server.can_prepare(req)
if available:
await ws_server.prepare(req)
logger.info('##### WS_SERVER %s', pprint.pformat(ws_server))
try:
req.app[APP_SOCKETS_KEY].append(ws_server)
client_session = aiohttp.ClientSession(cookies=req.cookies)
async with client_session.ws_connect(target_url) as ws_client:
logger.info('##### WS_CLIENT %s', pprint.pformat(ws_client))
async def ws_forward(ws_from, ws_to):
async for msg in ws_from:
mt = msg.type
md = msg.data
if mt == aiohttp.WSMsgType.TEXT:
await ws_to.send_str(md)
elif mt == aiohttp.WSMsgType.BINARY:
await ws_to.send_bytes(md)
elif mt == aiohttp.WSMsgType.PING:
await ws_to.ping()
elif mt == aiohttp.WSMsgType.PONG:
await ws_to.pong()
elif ws_to.closed:
await ws_to.close(code=ws_to.close_code, message=msg.extra)
else:
raise ValueError(
'unexpected message type: %s' % pprint.pformat(msg))
await asyncio.wait([ws_forward(ws_server, ws_client),
ws_forward(ws_client, ws_server)],
return_when=asyncio.FIRST_COMPLETED)
return ws_server
finally:
req.app[APP_SOCKETS_KEY].remove(ws_server)
else:
target_url = URL(service_url).origin().with_path(req.path).with_query(req.query)
async with client.request(
req.method, target_url,
headers=reqH,
allow_redirects=False,
data=await req.read()
) as res:
body = await res.read()
response = web.Response(
headers=res.headers.copy(),
status=res.status,
body=body
)
return response
if __name__ == "__main__":
BASE_URL = 'http://0.0.0.0:8888'
MOUNT_POINT = '/x/12345'
def adapter(req: web.Request):
return handler(req, service_url=BASE_URL)
app = web.Application()
app[APP_SOCKETS_KEY] = list()
app.router.add_route('*', MOUNT_POINT + '/{proxyPath:.*}', adapter)
web.run_app(app, port=3984)
| true | true |
1c3d2b6217b94095a760d431548d3e5c51b33f82 | 341 | py | Python | src/handler.py | caulagi/serverless-python-rds-cron | 452e3baff8dfb1b1a078894da6142a3c2372415a | [
"MIT"
] | null | null | null | src/handler.py | caulagi/serverless-python-rds-cron | 452e3baff8dfb1b1a078894da6142a3c2372415a | [
"MIT"
] | null | null | null | src/handler.py | caulagi/serverless-python-rds-cron | 452e3baff8dfb1b1a078894da6142a3c2372415a | [
"MIT"
] | 1 | 2019-08-05T09:50:19.000Z | 2019-08-05T09:50:19.000Z | """
Just a wrapper around the actual script (cleanup) for serverless,
but adds sentry support
"""
import os
import sentry_sdk
from sentry_sdk.integrations.aws_lambda import AwsLambdaIntegration
import cleanup
sentry_sdk.init(dsn=os.getenv("SENTRY_DSN"), integrations=[AwsLambdaIntegration()])
def run(event, context):
cleanup.run()
| 20.058824 | 83 | 0.782991 | import os
import sentry_sdk
from sentry_sdk.integrations.aws_lambda import AwsLambdaIntegration
import cleanup
sentry_sdk.init(dsn=os.getenv("SENTRY_DSN"), integrations=[AwsLambdaIntegration()])
def run(event, context):
cleanup.run()
| true | true |
1c3d2def7bbd1c0a2936a78f94d3508c9b97b156 | 1,095 | py | Python | cases/old cases/case2.py | massovercharge/lorenzattractor | f9b1712d5f38be3a84afee037ac7051efbe1f4c6 | [
"MIT"
] | null | null | null | cases/old cases/case2.py | massovercharge/lorenzattractor | f9b1712d5f38be3a84afee037ac7051efbe1f4c6 | [
"MIT"
] | null | null | null | cases/old cases/case2.py | massovercharge/lorenzattractor | f9b1712d5f38be3a84afee037ac7051efbe1f4c6 | [
"MIT"
] | 1 | 2021-07-07T14:09:23.000Z | 2021-07-07T14:09:23.000Z | """
This file could contain the necessary calls to make plots etc for
case 2
"""
import os
import sys
os.chdir('C:\\Users\\dwp\\OneDrive - Novozymes A S\\PhD\\Kurser\\scipro\\'+
'project\\lorenz\\cases')
sys.path.append(os.path.abspath('../'))
from lorenz import run
from lorenz import solver
def case2():
sigma = 10
beta = 8/3
rho = 16
init = [0.1,0.1,0.1]
N = 10000
t = 0.02
filename = '..\\results\\case2\\case2'
if not os.path.exists('..\\results\\case2'):
os.mkdir('..\\results\\case2')
solver_obj = solver.lorenz_solver(sigma, beta, rho, init, N, t)
print('Running lorenz solver for case2 given a starting point of '+
'{}, a stepcount of {} and a stepsize of {}.'.format(init, N, t))
solver_obj.euler()
run.save_case(solver_obj, filename)
print('Loading lorenz_solver class object binary file into variable'+
' "case2" and plotting path.')
case2 = run.load_case(filename=filename)
return case2
if __name__ == '__main__':
case2 = case2() | 26.071429 | 76 | 0.605479 | import os
import sys
os.chdir('C:\\Users\\dwp\\OneDrive - Novozymes A S\\PhD\\Kurser\\scipro\\'+
'project\\lorenz\\cases')
sys.path.append(os.path.abspath('../'))
from lorenz import run
from lorenz import solver
def case2():
sigma = 10
beta = 8/3
rho = 16
init = [0.1,0.1,0.1]
N = 10000
t = 0.02
filename = '..\\results\\case2\\case2'
if not os.path.exists('..\\results\\case2'):
os.mkdir('..\\results\\case2')
solver_obj = solver.lorenz_solver(sigma, beta, rho, init, N, t)
print('Running lorenz solver for case2 given a starting point of '+
'{}, a stepcount of {} and a stepsize of {}.'.format(init, N, t))
solver_obj.euler()
run.save_case(solver_obj, filename)
print('Loading lorenz_solver class object binary file into variable'+
' "case2" and plotting path.')
case2 = run.load_case(filename=filename)
return case2
if __name__ == '__main__':
case2 = case2() | true | true |
1c3d2f13ba02324e1993545338734814396d3da4 | 769 | py | Python | helpers/yake_helper.py | thisishardik/forum-posts-clustering | cadc304143f431b3e94ba138dc2c8e0999814c8a | [
"MIT"
] | null | null | null | helpers/yake_helper.py | thisishardik/forum-posts-clustering | cadc304143f431b3e94ba138dc2c8e0999814c8a | [
"MIT"
] | null | null | null | helpers/yake_helper.py | thisishardik/forum-posts-clustering | cadc304143f431b3e94ba138dc2c8e0999814c8a | [
"MIT"
] | null | null | null | import os
os.system("pip install git+https://github.com/LIAAD/yake")
import pandas as pd
from nltk.tokenize import RegexpTokenizer
import yake
def keywords_yake(sample_posts):
simple_kwextractor = yake.KeywordExtractor()
sentences = []
for post in sample_posts:
post_keywords = simple_kwextractor.extract_keywords(post)
sentence_output = ""
for word, number in post_keywords:
sentence_output += word + " "
sentences.append(sentence_output)
return(sentences)
def tokenizing_after_YAKE(sentences):
tokenizer = RegexpTokenizer(r'\w+')
tokenized_data = [w.lower() for w in sentences]
tokenized_data = [tokenizer.tokenize(i) for i in tokenized_data]
return(tokenized_data) | 26.517241 | 68 | 0.695709 | import os
os.system("pip install git+https://github.com/LIAAD/yake")
import pandas as pd
from nltk.tokenize import RegexpTokenizer
import yake
def keywords_yake(sample_posts):
simple_kwextractor = yake.KeywordExtractor()
sentences = []
for post in sample_posts:
post_keywords = simple_kwextractor.extract_keywords(post)
sentence_output = ""
for word, number in post_keywords:
sentence_output += word + " "
sentences.append(sentence_output)
return(sentences)
def tokenizing_after_YAKE(sentences):
tokenizer = RegexpTokenizer(r'\w+')
tokenized_data = [w.lower() for w in sentences]
tokenized_data = [tokenizer.tokenize(i) for i in tokenized_data]
return(tokenized_data) | true | true |
1c3d2ff7435e1846ca3949259991207ef79e55e4 | 6,600 | py | Python | InvenTree/order/migrations/0064_purchaseorderextraline_salesorderextraline.py | rkalman/InvenTree | 8ceff063f86394cd1be9f5c57e6302b79782a526 | [
"MIT"
] | null | null | null | InvenTree/order/migrations/0064_purchaseorderextraline_salesorderextraline.py | rkalman/InvenTree | 8ceff063f86394cd1be9f5c57e6302b79782a526 | [
"MIT"
] | null | null | null | InvenTree/order/migrations/0064_purchaseorderextraline_salesorderextraline.py | rkalman/InvenTree | 8ceff063f86394cd1be9f5c57e6302b79782a526 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.12 on 2022-03-27 01:11
import InvenTree.fields
import django.core.validators
from django.core import serializers
from django.db import migrations, models
import django.db.models.deletion
import djmoney.models.fields
import djmoney.models.validators
def _convert_model(apps, line_item_ref, extra_line_ref, price_ref):
"""Convert the OrderLineItem instances if applicable to new ExtraLine instances"""
OrderLineItem = apps.get_model('order', line_item_ref)
OrderExtraLine = apps.get_model('order', extra_line_ref)
items_to_change = OrderLineItem.objects.filter(part=None)
if items_to_change.count() == 0:
return
print(f'\nFound {items_to_change.count()} old {line_item_ref} instance(s)')
print(f'Starting to convert - currently at {OrderExtraLine.objects.all().count()} {extra_line_ref} / {OrderLineItem.objects.all().count()} {line_item_ref} instance(s)')
for lineItem in items_to_change:
newitem = OrderExtraLine(
order=lineItem.order,
notes=lineItem.notes,
price=getattr(lineItem, price_ref),
quantity=lineItem.quantity,
reference=lineItem.reference,
)
newitem.context = {'migration': serializers.serialize('json', [lineItem, ])}
newitem.save()
lineItem.delete()
print(f'Done converting line items - now at {OrderExtraLine.objects.all().count()} {extra_line_ref} / {OrderLineItem.objects.all().count()} {line_item_ref} instance(s)')
def _reconvert_model(apps, line_item_ref, extra_line_ref): # pragma: no cover
"""Convert ExtraLine instances back to OrderLineItem instances"""
OrderLineItem = apps.get_model('order', line_item_ref)
OrderExtraLine = apps.get_model('order', extra_line_ref)
print(f'\nStarting to convert - currently at {OrderExtraLine.objects.all().count()} {extra_line_ref} / {OrderLineItem.objects.all().count()} {line_item_ref} instance(s)')
for extra_line in OrderExtraLine.objects.all():
# regenreate item
if extra_line.context:
context_string = getattr(extra_line.context, 'migration')
if not context_string:
continue
[item.save() for item in serializers.deserialize('json', context_string)]
extra_line.delete()
print(f'Done converting line items - now at {OrderExtraLine.objects.all().count()} {extra_line_ref} / {OrderLineItem.objects.all().count()} {line_item_ref} instance(s)')
def convert_line_items(apps, schema_editor):
"""convert line items"""
_convert_model(apps, 'PurchaseOrderLineItem', 'PurchaseOrderExtraLine', 'purchase_price')
_convert_model(apps, 'SalesOrderLineItem', 'SalesOrderExtraLine', 'sale_price')
def nunconvert_line_items(apps, schema_editor): # pragma: no cover
"""reconvert line items"""
_reconvert_model(apps, 'PurchaseOrderLineItem', 'PurchaseOrderExtraLine')
_reconvert_model(apps, 'SalesOrderLineItem', 'SalesOrderExtraLine')
class Migration(migrations.Migration):
dependencies = [
('order', '0063_alter_purchaseorderlineitem_unique_together'),
]
operations = [
migrations.CreateModel(
name='SalesOrderExtraLine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', InvenTree.fields.RoundingDecimalField(decimal_places=5, default=1, help_text='Item quantity', max_digits=15, validators=[django.core.validators.MinValueValidator(0)], verbose_name='Quantity')),
('reference', models.CharField(blank=True, help_text='Line item reference', max_length=100, verbose_name='Reference')),
('notes', models.CharField(blank=True, help_text='Line item notes', max_length=500, verbose_name='Notes')),
('target_date', models.DateField(blank=True, help_text='Target shipping date for this line item', null=True, verbose_name='Target Date')),
('context', models.JSONField(blank=True, help_text='Additional context for this line', null=True, verbose_name='Context')),
('price_currency', djmoney.models.fields.CurrencyField(choices=[], default='', editable=False, max_length=3)),
('price', InvenTree.fields.InvenTreeModelMoneyField(blank=True, currency_choices=[], decimal_places=4, default_currency='', help_text='Unit price', max_digits=19, null=True, validators=[djmoney.models.validators.MinMoneyValidator(0)], verbose_name='Price')),
('order', models.ForeignKey(help_text='Sales Order', on_delete=django.db.models.deletion.CASCADE, related_name='extra_lines', to='order.salesorder', verbose_name='Order')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PurchaseOrderExtraLine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', InvenTree.fields.RoundingDecimalField(decimal_places=5, default=1, help_text='Item quantity', max_digits=15, validators=[django.core.validators.MinValueValidator(0)], verbose_name='Quantity')),
('reference', models.CharField(blank=True, help_text='Line item reference', max_length=100, verbose_name='Reference')),
('notes', models.CharField(blank=True, help_text='Line item notes', max_length=500, verbose_name='Notes')),
('target_date', models.DateField(blank=True, help_text='Target shipping date for this line item', null=True, verbose_name='Target Date')),
('context', models.JSONField(blank=True, help_text='Additional context for this line', null=True, verbose_name='Context')),
('price_currency', djmoney.models.fields.CurrencyField(choices=[], default='', editable=False, max_length=3)),
('price', InvenTree.fields.InvenTreeModelMoneyField(blank=True, currency_choices=[], decimal_places=4, default_currency='', help_text='Unit price', max_digits=19, null=True, validators=[djmoney.models.validators.MinMoneyValidator(0)], verbose_name='Price')),
('order', models.ForeignKey(help_text='Purchase Order', on_delete=django.db.models.deletion.CASCADE, related_name='extra_lines', to='order.purchaseorder', verbose_name='Order')),
],
options={
'abstract': False,
},
),
migrations.RunPython(convert_line_items, reverse_code=nunconvert_line_items),
]
| 60 | 274 | 0.690455 |
import InvenTree.fields
import django.core.validators
from django.core import serializers
from django.db import migrations, models
import django.db.models.deletion
import djmoney.models.fields
import djmoney.models.validators
def _convert_model(apps, line_item_ref, extra_line_ref, price_ref):
OrderLineItem = apps.get_model('order', line_item_ref)
OrderExtraLine = apps.get_model('order', extra_line_ref)
items_to_change = OrderLineItem.objects.filter(part=None)
if items_to_change.count() == 0:
return
print(f'\nFound {items_to_change.count()} old {line_item_ref} instance(s)')
print(f'Starting to convert - currently at {OrderExtraLine.objects.all().count()} {extra_line_ref} / {OrderLineItem.objects.all().count()} {line_item_ref} instance(s)')
for lineItem in items_to_change:
newitem = OrderExtraLine(
order=lineItem.order,
notes=lineItem.notes,
price=getattr(lineItem, price_ref),
quantity=lineItem.quantity,
reference=lineItem.reference,
)
newitem.context = {'migration': serializers.serialize('json', [lineItem, ])}
newitem.save()
lineItem.delete()
print(f'Done converting line items - now at {OrderExtraLine.objects.all().count()} {extra_line_ref} / {OrderLineItem.objects.all().count()} {line_item_ref} instance(s)')
def _reconvert_model(apps, line_item_ref, extra_line_ref):
OrderLineItem = apps.get_model('order', line_item_ref)
OrderExtraLine = apps.get_model('order', extra_line_ref)
print(f'\nStarting to convert - currently at {OrderExtraLine.objects.all().count()} {extra_line_ref} / {OrderLineItem.objects.all().count()} {line_item_ref} instance(s)')
for extra_line in OrderExtraLine.objects.all():
if extra_line.context:
context_string = getattr(extra_line.context, 'migration')
if not context_string:
continue
[item.save() for item in serializers.deserialize('json', context_string)]
extra_line.delete()
print(f'Done converting line items - now at {OrderExtraLine.objects.all().count()} {extra_line_ref} / {OrderLineItem.objects.all().count()} {line_item_ref} instance(s)')
def convert_line_items(apps, schema_editor):
_convert_model(apps, 'PurchaseOrderLineItem', 'PurchaseOrderExtraLine', 'purchase_price')
_convert_model(apps, 'SalesOrderLineItem', 'SalesOrderExtraLine', 'sale_price')
def nunconvert_line_items(apps, schema_editor):
_reconvert_model(apps, 'PurchaseOrderLineItem', 'PurchaseOrderExtraLine')
_reconvert_model(apps, 'SalesOrderLineItem', 'SalesOrderExtraLine')
class Migration(migrations.Migration):
dependencies = [
('order', '0063_alter_purchaseorderlineitem_unique_together'),
]
operations = [
migrations.CreateModel(
name='SalesOrderExtraLine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', InvenTree.fields.RoundingDecimalField(decimal_places=5, default=1, help_text='Item quantity', max_digits=15, validators=[django.core.validators.MinValueValidator(0)], verbose_name='Quantity')),
('reference', models.CharField(blank=True, help_text='Line item reference', max_length=100, verbose_name='Reference')),
('notes', models.CharField(blank=True, help_text='Line item notes', max_length=500, verbose_name='Notes')),
('target_date', models.DateField(blank=True, help_text='Target shipping date for this line item', null=True, verbose_name='Target Date')),
('context', models.JSONField(blank=True, help_text='Additional context for this line', null=True, verbose_name='Context')),
('price_currency', djmoney.models.fields.CurrencyField(choices=[], default='', editable=False, max_length=3)),
('price', InvenTree.fields.InvenTreeModelMoneyField(blank=True, currency_choices=[], decimal_places=4, default_currency='', help_text='Unit price', max_digits=19, null=True, validators=[djmoney.models.validators.MinMoneyValidator(0)], verbose_name='Price')),
('order', models.ForeignKey(help_text='Sales Order', on_delete=django.db.models.deletion.CASCADE, related_name='extra_lines', to='order.salesorder', verbose_name='Order')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PurchaseOrderExtraLine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', InvenTree.fields.RoundingDecimalField(decimal_places=5, default=1, help_text='Item quantity', max_digits=15, validators=[django.core.validators.MinValueValidator(0)], verbose_name='Quantity')),
('reference', models.CharField(blank=True, help_text='Line item reference', max_length=100, verbose_name='Reference')),
('notes', models.CharField(blank=True, help_text='Line item notes', max_length=500, verbose_name='Notes')),
('target_date', models.DateField(blank=True, help_text='Target shipping date for this line item', null=True, verbose_name='Target Date')),
('context', models.JSONField(blank=True, help_text='Additional context for this line', null=True, verbose_name='Context')),
('price_currency', djmoney.models.fields.CurrencyField(choices=[], default='', editable=False, max_length=3)),
('price', InvenTree.fields.InvenTreeModelMoneyField(blank=True, currency_choices=[], decimal_places=4, default_currency='', help_text='Unit price', max_digits=19, null=True, validators=[djmoney.models.validators.MinMoneyValidator(0)], verbose_name='Price')),
('order', models.ForeignKey(help_text='Purchase Order', on_delete=django.db.models.deletion.CASCADE, related_name='extra_lines', to='order.purchaseorder', verbose_name='Order')),
],
options={
'abstract': False,
},
),
migrations.RunPython(convert_line_items, reverse_code=nunconvert_line_items),
]
| true | true |
1c3d3062ce19698532e267a7eb8ba2beff97559c | 54,252 | py | Python | tests/test_scenic_quality.py | davemfish/invest | c97a0ce58c1ecd248326691ca9d36660c744fa90 | [
"BSD-3-Clause"
] | null | null | null | tests/test_scenic_quality.py | davemfish/invest | c97a0ce58c1ecd248326691ca9d36660c744fa90 | [
"BSD-3-Clause"
] | 1 | 2021-12-08T19:49:56.000Z | 2021-12-11T01:59:55.000Z | tests/test_scenic_quality.py | emlys/invest | 5b0391fd456df5a6afd2fdfbaed542a090f58f17 | [
"BSD-3-Clause"
] | null | null | null | """Module for Regression Testing the InVEST Scenic Quality module."""
import unittest
import tempfile
import shutil
import os
import glob
from osgeo import gdal
from osgeo import osr
import pygeoprocessing.testing
from pygeoprocessing.testing import sampledata
from shapely.geometry import Polygon, Point
import numpy
_SRS = osr.SpatialReference()
_SRS.ImportFromEPSG(32731) # WGS84 / UTM zone 31s
WKT = _SRS.ExportToWkt()
class ScenicQualityTests(unittest.TestCase):
"""Tests for the InVEST Scenic Quality model."""
def setUp(self):
"""Create a temporary workspace."""
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
"""Remove the temporary workspace after a test."""
shutil.rmtree(self.workspace_dir)
@staticmethod
def create_dem(dem_path):
"""Create a known DEM at the given path.
Parameters:
dem_path (string): Where to store the DEM.
Returns:
``None``
"""
from pygeoprocessing.testing import create_raster_on_disk
dem_matrix = numpy.array(
[[10, 2, 2, 2, 10],
[2, 10, 2, 10, 2],
[2, 2, 10, 2, 2],
[2, 10, 2, 10, 2],
[10, 2, 2, 2, 10]], dtype=numpy.int8)
create_raster_on_disk(
[dem_matrix],
origin=(2, -2),
projection_wkt=WKT,
nodata=255, # byte nodata value
pixel_size=(2, -2),
raster_driver_creation_tuple=(
'GTIFF', ['TILED=YES',
'BIGTIFF=YES',
'COMPRESS=LZW']),
filename=dem_path)
@staticmethod
def create_aoi(aoi_path):
"""Create a known bounding box that overlaps the DEM.
The envelope of the AOI perfectly overlaps the outside edge of the DEM.
Parameters:
aoi_path (string): The filepath where the AOI should be written.
Returns:
``None``
"""
sampledata.create_vector_on_disk(
[Polygon([(2, -2), (2, -12), (12, -12), (12, -2), (2, -2)])],
WKT, filename=aoi_path)
@staticmethod
def create_viewpoints(viewpoints_path, fields=None, attributes=None):
"""Create a known set of viewpoints for this DEM.
This vector will contain 4 viewpoints in the WGS84/UTM31S projection.
The second viewpoint is off the edge of the DEM and will therefore not
be included in the Scenic Quality analysis.
Parameters:
viewpoints_path (string): The filepath where the viewpoints vector
should be saved.
fields=None (dict): If provided, this must be a dict mapping
fieldnames to datatypes, as expected by
``pygeoprocessing.create_vector_on_disk``.
attributes=None (dict): If provided, this must be a list of dicts
mapping fieldnames (which match the keys in ``fields``) to
values that will be used as the column value for each feature
in sequence.
Returns:
``None``
"""
sampledata.create_vector_on_disk(
[Point(7.0, -3.0),
Point(1.0, -7.0), # off the edge of DEM, won't be included.
Point(7.0, -11.0),
Point(11.0, -7.0)],
projection=WKT,
fields=fields,
attributes=attributes,
filename=viewpoints_path)
def test_exception_when_no_structures_aoi_overlap(self):
"""SQ: model raises exception when AOI does not overlap structures."""
from natcap.invest.scenic_quality import scenic_quality
dem_path = os.path.join(self.workspace_dir, 'dem.tif')
ScenicQualityTests.create_dem(dem_path)
viewpoints_path = os.path.join(self.workspace_dir,
'viewpoints.geojson')
ScenicQualityTests.create_viewpoints(viewpoints_path)
# AOI DEFINITELY doesn't overlap with the viewpoints.
aoi_path = os.path.join(self.workspace_dir, 'aoi.geojson')
sampledata.create_vector_on_disk(
[Polygon([(2, 2), (2, 12), (12, 12), (12, 2), (2, 2)])],
WKT, filename=aoi_path)
args = {
'workspace_dir': os.path.join(self.workspace_dir, 'workspace'),
'aoi_path': aoi_path,
'structure_path': viewpoints_path,
'dem_path': dem_path,
'refraction': 0.13,
# Valuation parameter defaults to False, so leaving it off here.
'n_workers': -1,
}
with self.assertRaises(ValueError) as cm:
scenic_quality.execute(args)
self.assertTrue('found no intersection between' in str(cm.exception))
def test_no_valuation(self):
"""SQ: model works as expected without valuation."""
from natcap.invest.scenic_quality import scenic_quality
dem_path = os.path.join(self.workspace_dir, 'dem.tif')
ScenicQualityTests.create_dem(dem_path)
# Using weighted viewpoints here to make the visual quality output more
# interesting.
viewpoints_path = os.path.join(self.workspace_dir,
'viewpoints.geojson')
ScenicQualityTests.create_viewpoints(
viewpoints_path,
fields={'RADIUS': 'real',
'HEIGHT': 'real',
'WEIGHT': 'real'},
attributes=[
{'RADIUS': 6.0, 'HEIGHT': 1.0, 'WEIGHT': 1.0},
{'RADIUS': 6.0, 'HEIGHT': 1.0, 'WEIGHT': 1.0},
{'RADIUS': 6.0, 'HEIGHT': 1.0, 'WEIGHT': 2.5},
{'RADIUS': 6.0, 'HEIGHT': 1.0, 'WEIGHT': 2.5}])
aoi_path = os.path.join(self.workspace_dir, 'aoi.geojson')
ScenicQualityTests.create_aoi(aoi_path)
args = {
'workspace_dir': os.path.join(self.workspace_dir, 'workspace'),
'aoi_path': aoi_path,
'structure_path': viewpoints_path,
'dem_path': dem_path,
'refraction': 0.13,
# Valuation parameter defaults to False, so leaving it off here.
'n_workers': -1,
}
scenic_quality.execute(args)
# vshed.tif and vshed_qual.tif are still created by the model,
# vshed_value.tif is not when we are not doing valuation.
for output_filename, should_exist in (
('vshed_value.tif', False),
('vshed.tif', True),
('vshed_qual.tif', True)):
full_filepath = os.path.join(
args['workspace_dir'], 'output', output_filename)
self.assertEqual(os.path.exists(full_filepath), should_exist)
# In a non-valuation run, vshed_qual.tif is based on the number of
# visible structures rather than the valuation, so we need to make sure
# that the raster has the expected values.
expected_visual_quality = numpy.array(
[[1, 1, 1, 1, 4],
[0, 1, 1, 4, 3],
[0, 0, 4, 3, 3],
[0, 3, 3, 4, 3],
[3, 3, 3, 3, 4]])
visual_quality_raster = os.path.join(
args['workspace_dir'], 'output', 'vshed_qual.tif')
quality_matrix = gdal.OpenEx(
visual_quality_raster, gdal.OF_RASTER).ReadAsArray()
numpy.testing.assert_allclose(expected_visual_quality,
quality_matrix,
rtol=0, atol=1e-6)
def test_invalid_valuation_function(self):
"""SQ: model raises exception with invalid valuation function."""
from natcap.invest.scenic_quality import scenic_quality
dem_path = os.path.join(self.workspace_dir, 'dem.tif')
ScenicQualityTests.create_dem(dem_path)
viewpoints_path = os.path.join(self.workspace_dir,
'viewpoints.geojson')
ScenicQualityTests.create_viewpoints(viewpoints_path)
aoi_path = os.path.join(self.workspace_dir, 'aoi.geojson')
ScenicQualityTests.create_aoi(aoi_path)
args = {
'workspace_dir': os.path.join(self.workspace_dir, 'workspace'),
'results_suffix': 'foo',
'aoi_path': aoi_path,
'structure_path': viewpoints_path,
'dem_path': dem_path,
'refraction': 0.13,
'do_valuation': True,
'valuation_function': 'INVALID FUNCTION',
'a_coef': 1,
'b_coef': 0,
'max_valuation_radius': 10.0,
'n_workers': -1,
}
with self.assertRaises(ValueError):
scenic_quality.execute(args)
def test_error_invalid_viewpoints(self):
"""SQ: error when no valid viewpoints.
This also tests for coverage when using logarithmic valuation on pixels
with size < 1m.
"""
from natcap.invest.scenic_quality import scenic_quality
from pygeoprocessing.testing import create_raster_on_disk
dem_matrix = numpy.array(
[[-1, -1, 2, -1, -1],
[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1]], dtype=numpy.int)
dem_path = os.path.join(self.workspace_dir, 'dem.tif')
create_raster_on_disk(
[dem_matrix],
origin=(0, 0),
projection_wkt=WKT,
nodata=-1,
pixel_size=(0.5, -0.5),
filename=dem_path)
viewpoints_path = os.path.join(self.workspace_dir,
'viewpoints.geojson')
sampledata.create_vector_on_disk(
[Point(1.25, -0.5), # Valid in DEM but outside of AOI.
Point(-1.0, -5.0), # off the edge of DEM.
Point(1.25, -1.5)], # Within AOI, over nodata.
WKT, filename=viewpoints_path)
aoi_path = os.path.join(self.workspace_dir, 'aoi.geojson')
sampledata.create_vector_on_disk(
[Polygon([(1, -1), (1, -2.5), (2.5, -2.5), (2.5, -1), (1, -1)])],
WKT, filename=aoi_path)
args = {
'workspace_dir': os.path.join(self.workspace_dir, 'workspace'),
'results_suffix': 'foo',
'aoi_path': aoi_path,
'structure_path': viewpoints_path,
'dem_path': dem_path,
'refraction': 0.13,
'valuation_function': 'logarithmic',
'a_coef': 1,
'b_coef': 0,
'max_valuation_radius': 10.0,
'n_workers': -1, # use serial mode to ensure correct exception.
}
with self.assertRaises(ValueError) as raised_error:
scenic_quality.execute(args)
self.assertTrue('No valid viewpoints found.' in
str(raised_error.exception))
def test_viewshed_field_defaults(self):
"""SQ: run model with default field values."""
from natcap.invest.scenic_quality import scenic_quality
dem_path = os.path.join(self.workspace_dir, 'dem.tif')
ScenicQualityTests.create_dem(dem_path)
viewpoints_path = os.path.join(self.workspace_dir,
'viewpoints.geojson')
ScenicQualityTests.create_viewpoints(viewpoints_path)
aoi_path = os.path.join(self.workspace_dir, 'aoi.geojson')
ScenicQualityTests.create_aoi(aoi_path)
args = {
'workspace_dir': os.path.join(self.workspace_dir, 'workspace'),
'results_suffix': 'foo',
'aoi_path': aoi_path,
'structure_path': viewpoints_path,
'dem_path': dem_path,
'refraction': 0.13,
'valuation_function': 'linear',
'do_valuation': True,
'a_coef': 1,
'b_coef': 0,
'max_valuation_radius': 10.0,
'n_workers': -1,
}
# Simulate a run where the clipped structures vector already exists.
# This is needed for coverage in the vector clipping function.
clipped_structures_path = os.path.join(args['workspace_dir'],
'intermediate',
'structures_clipped_foo.shp')
os.makedirs(os.path.dirname(clipped_structures_path))
with open(clipped_structures_path, 'w') as fake_file:
fake_file.write('this is a vector :)')
scenic_quality.execute(args)
# 3 of the 4 viewpoints overlap the DEM, so there should only be files
# from 3 viewsheds.
self.assertEqual(len(glob.glob(os.path.join(
args['workspace_dir'], 'intermediate', 'visibility*'))), 3)
self.assertEqual(len(glob.glob(os.path.join(
args['workspace_dir'], 'intermediate', 'value*'))), 3)
# Verify that the value summation matrix is what we expect it to be.
expected_value = numpy.array(
[[1, 1, 1, 1, 2],
[0, 1, 1, 2, 1],
[0, 0, 3, 1, 1],
[0, 1, 1, 2, 1],
[1, 1, 1, 1, 2]], dtype=numpy.int8)
value_raster = gdal.OpenEx(
os.path.join(args['workspace_dir'], 'output',
'vshed_value_foo.tif'), gdal.OF_RASTER)
value_band = value_raster.GetRasterBand(1)
value_matrix = value_band.ReadAsArray()
numpy.testing.assert_allclose(
expected_value, value_matrix, rtol=0, atol=1e-6)
# verify that the correct number of viewpoints has been tallied.
vshed_raster = gdal.OpenEx(
os.path.join(args['workspace_dir'], 'output',
'vshed_foo.tif'), gdal.OF_RASTER)
vshed_band = vshed_raster.GetRasterBand(1)
vshed_matrix = vshed_band.ReadAsArray()
# Because our B coefficient is 0, the vshed matrix should match the
# value matrix.
numpy.testing.assert_allclose(
expected_value, vshed_matrix, rtol=0, atol=1e-6)
# Test the visual quality raster.
expected_visual_quality = numpy.array(
[[3, 3, 3, 3, 4],
[0, 3, 3, 4, 3],
[0, 0, 4, 3, 3],
[0, 3, 3, 4, 3],
[3, 3, 3, 3, 4]])
visual_quality_raster = os.path.join(
args['workspace_dir'], 'output', 'vshed_qual_foo.tif')
quality_matrix = gdal.OpenEx(visual_quality_raster,
gdal.OF_RASTER).ReadAsArray()
numpy.testing.assert_allclose(expected_visual_quality,
quality_matrix,
rtol=0, atol=1e-6)
def test_viewshed_with_fields(self):
"""SQ: verify that we can specify viewpoint fields."""
from natcap.invest.scenic_quality import scenic_quality
dem_path = os.path.join(self.workspace_dir, 'dem.tif')
ScenicQualityTests.create_dem(dem_path)
viewpoints_path = os.path.join(self.workspace_dir,
'viewpoints.geojson')
ScenicQualityTests.create_viewpoints(
viewpoints_path,
fields={'RADIUS': 'real',
'HEIGHT': 'real',
'WEIGHT': 'real'},
attributes=[
{'RADIUS': 6.0, 'HEIGHT': 1.0, 'WEIGHT': 1.0},
{'RADIUS': 6.0, 'HEIGHT': 1.0, 'WEIGHT': 1.0},
{'RADIUS': 6.0, 'HEIGHT': 1.0, 'WEIGHT': 2.5},
{'RADIUS': 6.0, 'HEIGHT': 1.0, 'WEIGHT': 2.5}])
aoi_path = os.path.join(self.workspace_dir, 'aoi.geojson')
ScenicQualityTests.create_aoi(aoi_path)
args = {
'workspace_dir': os.path.join(self.workspace_dir, 'workspace'),
'aoi_path': aoi_path,
'structure_path': viewpoints_path,
'dem_path': dem_path,
'refraction': 0.13,
'do_valuation': True,
'valuation_function': 'linear',
'a_coef': 0,
'b_coef': 1,
'max_valuation_radius': 10.0,
# n_workers is explicitly excluded here to trigger the model
# default.
}
scenic_quality.execute(args)
# Verify that the value summation matrix is what we expect it to be.
# The weight of two of the points makes some sectors more valuable
expected_value = numpy.array(
[[4., 2., 0., 2., 14.],
[0., 2.82842712, 2., 9.89949494, 5.],
[0., 0., 24., 5., 0.],
[0., 7.07106781, 5., 14.14213562, 5.],
[10., 5., 0., 5., 20.]])
value_raster = gdal.OpenEx(
os.path.join(args['workspace_dir'], 'output',
'vshed_value.tif'), gdal.OF_RASTER)
value_band = value_raster.GetRasterBand(1)
value_matrix = value_band.ReadAsArray()
numpy.testing.assert_allclose(
expected_value, value_matrix, rtol=0, atol=1e-6)
# Verify that the sum of the viewsheds (which is weighted) is correct.
expected_weighted_vshed = numpy.array(
[[1., 1., 1., 1., 3.5],
[0., 1., 1., 3.5, 2.5],
[0., 0., 6., 2.5, 2.5],
[0., 2.5, 2.5, 5., 2.5],
[2.5, 2.5, 2.5, 2.5, 5.]], dtype=numpy.float32)
vshed_raster_path = os.path.join(args['workspace_dir'], 'output',
'vshed.tif')
weighted_vshed_matrix = gdal.OpenEx(
vshed_raster_path, gdal.OF_RASTER).ReadAsArray()
numpy.testing.assert_allclose(expected_weighted_vshed,
weighted_vshed_matrix,
rtol=0, atol=1e-6)
# Test the visual quality raster since this run is weighted.
expected_visual_quality = numpy.array(
[[1, 1, 0, 1, 4],
[0, 1, 1, 3, 3],
[0, 0, 4, 3, 0],
[0, 3, 3, 4, 3],
[3, 3, 0, 3, 4]])
visual_quality_raster = os.path.join(
args['workspace_dir'], 'output', 'vshed_qual.tif')
quality_matrix = gdal.OpenEx(
visual_quality_raster, gdal.OF_RASTER).ReadAsArray()
numpy.testing.assert_allclose(expected_visual_quality,
quality_matrix,
rtol=0, atol=1e-6)
def test_exponential_valuation(self):
"""SQ: verify values on exponential valuation."""
from natcap.invest.scenic_quality import scenic_quality
dem_path = os.path.join(self.workspace_dir, 'dem.tif')
ScenicQualityTests.create_dem(dem_path)
viewpoints_path = os.path.join(self.workspace_dir,
'viewpoints.geojson')
ScenicQualityTests.create_viewpoints(viewpoints_path)
aoi_path = os.path.join(self.workspace_dir, 'aoi.geojson')
ScenicQualityTests.create_aoi(aoi_path)
args = {
'workspace_dir': os.path.join(self.workspace_dir, 'workspace'),
'aoi_path': aoi_path,
'structure_path': viewpoints_path,
'dem_path': dem_path,
'refraction': 0.13,
'valuation_function': 'exponential',
'a_coef': 1,
'b_coef': 1,
'max_valuation_radius': 10.0,
'do_valuation': True,
'n_workers': -1,
}
scenic_quality.execute(args)
# Verify that the value summation matrix is what we expect it to be.
# The weight of two of the points makes some sectors more valuable
expected_value = numpy.array(
[[0.01831564, 0.13533528, 1., 0.13533528, 0.03663128],
[0., 0.05910575, 0.13533528, 0.11821149, 0.13533528],
[0., 0., 0.05494692, 0.13533528, 1.],
[0., 0.05910575, 0.13533528, 0.11821149, 0.13533528],
[0.01831564, 0.13533528, 1., 0.13533528, 0.03663128]])
value_raster = gdal.OpenEx(
os.path.join(args['workspace_dir'], 'output', 'vshed_value.tif'),
gdal.OF_RASTER)
value_band = value_raster.GetRasterBand(1)
value_matrix = value_band.ReadAsArray()
numpy.testing.assert_allclose(expected_value, value_matrix, rtol=0, atol=1e-6)
def test_logarithmic_valuation(self):
"""SQ: verify values on logarithmic valuation."""
from natcap.invest.scenic_quality import scenic_quality
dem_path = os.path.join(self.workspace_dir, 'dem.tif')
ScenicQualityTests.create_dem(dem_path)
viewpoints_path = os.path.join(self.workspace_dir,
'viewpoints.geojson')
ScenicQualityTests.create_viewpoints(viewpoints_path)
aoi_path = os.path.join(self.workspace_dir, 'aoi.geojson')
ScenicQualityTests.create_aoi(aoi_path)
args = {
'workspace_dir': os.path.join(self.workspace_dir, 'workspace'),
'aoi_path': aoi_path,
'structure_path': viewpoints_path,
'dem_path': dem_path,
'refraction': 0.13,
'valuation_function': 'logarithmic',
'do_valuation': True,
'a_coef': 1,
'b_coef': 1,
'max_valuation_radius': 10.0,
'n_workers': -1,
}
scenic_quality.execute(args)
# Verify that the value summation matrix is what we expect it to be.
# The weight of two of the points makes some sectors more valuable
expected_value = numpy.array(
[[2.60943791, 2.09861229, 1., 2.09861229, 5.21887582],
[0., 2.34245405, 2.09861229, 4.68490809, 2.09861229],
[0., 0., 7.82831374, 2.09861229, 1.],
[0., 2.34245405, 2.09861229, 4.68490809, 2.09861229],
[2.60943791, 2.09861229, 1., 2.09861229, 5.21887582]])
value_raster = gdal.OpenEx(
os.path.join(args['workspace_dir'], 'output',
'vshed_value.tif'), gdal.OF_RASTER)
value_band = value_raster.GetRasterBand(1)
value_matrix = value_band.ReadAsArray()
numpy.testing.assert_allclose(
expected_value, value_matrix, rtol=0, atol=1e-6)
def test_visual_quality(self):
"""SQ: verify visual quality calculations."""
from natcap.invest.scenic_quality import scenic_quality
visible_structures = numpy.tile(
numpy.array([3, 0, 0, 0, 6, 7, 8]), (5, 1))
n_visible = os.path.join(self.workspace_dir, 'n_visible.tif')
visual_quality_raster = os.path.join(self.workspace_dir,
'visual_quality.tif')
driver = gdal.GetDriverByName('GTiff')
raster = driver.Create(n_visible, 7, 5, 1, gdal.GDT_Int32)
band = raster.GetRasterBand(1)
band.SetNoDataValue(-1)
band.WriteArray(visible_structures)
band = None
raster = None
scenic_quality._calculate_visual_quality(n_visible,
self.workspace_dir,
visual_quality_raster)
expected_visual_quality = numpy.tile(
numpy.array([1, 0, 0, 0, 2, 3, 4]), (5, 1))
visual_quality_matrix = gdal.OpenEx(
visual_quality_raster, gdal.OF_RASTER).ReadAsArray()
numpy.testing.assert_allclose(expected_visual_quality,
visual_quality_matrix,
rtol=0, atol=1e-6)
def test_visual_quality_large_blocks(self):
"""SQ: verify visual quality on large blocks."""
# This is a regression test for an issue encountered in the
# percentiles algorithm. To exercise the fix, we need to
# calculate percentiles on a raster that does not fit completely into
# memory in a single percentile buffer.
from natcap.invest.scenic_quality import scenic_quality
shape = (512, 512)
n_blocks = 5
visible_structures = numpy.concatenate(
[numpy.full(shape, n*2) for n in range(n_blocks)])
n_visible = os.path.join(self.workspace_dir, 'n_visible.tif')
visual_quality_raster = os.path.join(self.workspace_dir,
'visual_quality.tif')
driver = gdal.GetDriverByName('GTiff')
raster = driver.Create(n_visible, shape[0], shape[1]*n_blocks,
1, gdal.GDT_Int32)
band = raster.GetRasterBand(1)
band.SetNoDataValue(-1)
band.WriteArray(visible_structures)
band = None
raster = None
scenic_quality._calculate_visual_quality(n_visible,
self.workspace_dir,
visual_quality_raster)
expected_visual_quality = numpy.concatenate(
[numpy.full(shape, n) for n in range(n_blocks)])
visual_quality_matrix = gdal.OpenEx(
visual_quality_raster, gdal.OF_RASTER).ReadAsArray()
numpy.testing.assert_allclose(expected_visual_quality,
visual_quality_matrix,
rtol=0, atol=1e-6)
def test_visual_quality_low_count(self):
"""SQ: verify visual quality calculations for low pixel counts."""
from natcap.invest.scenic_quality import scenic_quality
visible_structures = numpy.array([[-1, 3, 0, 0, 0, 3, 6, 7]])
n_visible = os.path.join(self.workspace_dir, 'n_visible.tif')
visual_quality_raster = os.path.join(self.workspace_dir,
'visual_quality.tif')
driver = gdal.GetDriverByName('GTiff')
raster = driver.Create(n_visible, 8, 1, 1, gdal.GDT_Int32)
band = raster.GetRasterBand(1)
band.SetNoDataValue(-1)
band.WriteArray(visible_structures)
band = None
raster = None
scenic_quality._calculate_visual_quality(n_visible,
self.workspace_dir,
visual_quality_raster)
expected_visual_quality = numpy.array([[255, 2, 0, 0, 0, 2, 3, 4]])
visual_quality_matrix = gdal.OpenEx(
visual_quality_raster, gdal.OF_RASTER).ReadAsArray()
numpy.testing.assert_allclose(expected_visual_quality,
visual_quality_matrix,
rtol=0, atol=1e-6)
def test_visual_quality_floats(self):
"""SQ: verify visual quality calculations for floating-point vshed."""
from natcap.invest.scenic_quality import scenic_quality
visible_structures = numpy.array(
[[-1, 3.33, 0, 0, 0, 3.66, 6.12, 7.8]])
n_visible = os.path.join(self.workspace_dir, 'n_visible.tif')
visual_quality_raster = os.path.join(self.workspace_dir,
'visual_quality.tif')
driver = gdal.GetDriverByName('GTiff')
raster = driver.Create(n_visible, 8, 1, 1, gdal.GDT_Float32)
band = raster.GetRasterBand(1)
band.SetNoDataValue(-1)
band.WriteArray(visible_structures)
band = None
raster = None
scenic_quality._calculate_visual_quality(n_visible,
self.workspace_dir,
visual_quality_raster)
expected_visual_quality = numpy.array([[255, 1, 0, 0, 0, 2, 3, 4]])
visual_quality_matrix = gdal.OpenEx(
visual_quality_raster, gdal.OF_RASTER).ReadAsArray()
numpy.testing.assert_allclose(expected_visual_quality,
visual_quality_matrix,
rtol=0, atol=1e-6)
class ScenicQualityValidationTests(unittest.TestCase):
"""Tests for Scenic Quality validation."""
def setUp(self):
"""Create a temporary workspace."""
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
"""Remove the temporary workspace after a test."""
shutil.rmtree(self.workspace_dir)
def test_missing_keys(self):
"""SQ Validate: assert missing keys."""
from natcap.invest.scenic_quality import scenic_quality
from natcap.invest import validation
validation_errors = scenic_quality.validate({}) # empty args dict.
invalid_keys = validation.get_invalid_keys(validation_errors)
expected_missing_keys = set([
'aoi_path',
'dem_path',
'refraction',
'structure_path',
'workspace_dir',
])
self.assertEqual(invalid_keys, expected_missing_keys)
def test_polynomial_required_keys(self):
"""SQ Validate: assert polynomial required keys."""
from natcap.invest.scenic_quality import scenic_quality
from natcap.invest import validation
args = {
'valuation_function': 'polynomial',
'do_valuation': True,
}
validation_errors = scenic_quality.validate(args)
invalid_keys = validation.get_invalid_keys(validation_errors)
self.assertEqual(
invalid_keys,
set(['a_coef',
'aoi_path',
'b_coef',
'dem_path',
'refraction',
'structure_path',
'workspace_dir',
'valuation_function', ])
)
def test_novaluation_required_keys(self):
"""SQ Validate: assert required keys without valuation."""
from natcap.invest.scenic_quality import scenic_quality
from natcap.invest import validation
args = {}
validation_errors = scenic_quality.validate(args)
invalid_keys = validation.get_invalid_keys(validation_errors)
expected_missing_keys = set([
'aoi_path',
'dem_path',
'refraction',
'structure_path',
'workspace_dir',
])
self.assertEqual(invalid_keys, expected_missing_keys)
def test_bad_values(self):
"""SQ Validate: Assert we can catch various validation errors."""
from natcap.invest.scenic_quality import scenic_quality
# AOI path is missing
args = {
'workspace_dir': '', # required key, missing value
'aoi_path': '/bad/vector/path',
'a_coef': 'foo', # not a number
'b_coef': -1, # valid
'dem_path': 'not/a/path', # not a raster
'refraction': "0.13",
'max_valuation_radius': None, # covers missing value.
'structure_path': 'vector/missing',
'valuation_function': 'bad function',
}
validation_errors = scenic_quality.validate(args)
self.assertEqual(len(validation_errors), 6)
# map single-key errors to their errors.
single_key_errors = {}
for keys, error in validation_errors:
if len(keys) == 1:
single_key_errors[keys[0]] = error
self.assertTrue('refraction' not in single_key_errors)
self.assertEqual(
single_key_errors['a_coef'], (
"Value 'foo' could not be interpreted as a number"))
self.assertEqual(
single_key_errors['dem_path'], 'File not found')
self.assertEqual(single_key_errors['structure_path'],
'File not found')
self.assertEqual(single_key_errors['aoi_path'], 'File not found')
self.assertTrue(
single_key_errors['valuation_function'].startswith(
'Value must be one of'))
def test_dem_projected_in_m(self):
"""SQ Validate: the DEM must be projected in meters."""
from natcap.invest.scenic_quality import scenic_quality
from pygeoprocessing.testing import create_raster_on_disk
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326) # WGS84 is not projected.
filepath = os.path.join(self.workspace_dir, 'dem.tif')
create_raster_on_disk(
[numpy.array([[1]])],
origin=(0, 0),
projection_wkt=srs.ExportToWkt(),
nodata=-1,
pixel_size=(1, -1),
filename=filepath)
args = {'dem_path': filepath}
validation_errors = scenic_quality.validate(args, limit_to='dem_path')
self.assertEqual(len(validation_errors), 1)
self.assertTrue('must be projected in linear units' in
validation_errors[0][1])
class ViewshedTests(unittest.TestCase):
"""Tests for pygeoprocessing's viewshed."""
def setUp(self):
"""Create a temporary workspace that's deleted later."""
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
"""Clean up remaining files."""
shutil.rmtree(self.workspace_dir)
@staticmethod
def create_dem(matrix, filepath, pixel_size=(1, 1), nodata=-1):
"""Create a DEM in WGS84 coordinate system.
Parameters:
matrix (numpy.array): A 2D numpy array of pixel values.
filepath (string): The filepath where the new raster file will be
written.
pixel_size=(1, -1): The pixel size to use for the output raster.
nodata=-1: The nodata value to use for the output raster.
Returns:
``None``.
"""
from pygeoprocessing.testing import create_raster_on_disk
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326) # WGS84
wkt = srs.ExportToWkt()
create_raster_on_disk(
[matrix],
origin=(0, 0),
projection_wkt=wkt,
nodata=nodata,
pixel_size=pixel_size,
filename=filepath)
def test_pixels_not_square(self):
"""SQ Viewshed: exception raised when pixels are not square."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.ones((20, 20))
viewpoint = (10, 10)
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
ViewshedTests.create_dem(matrix, dem_filepath,
pixel_size=(1.111111, 1.12))
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
with self.assertRaises(AssertionError):
viewshed((dem_filepath, 1), viewpoint, visibility_filepath)
def test_viewpoint_not_overlapping_dem(self):
"""SQ Viewshed: exception raised when viewpoint is not over the DEM."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.ones((20, 20))
viewpoint = (-10, -10)
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
with self.assertRaises(ValueError):
viewshed((dem_filepath, 1), viewpoint, visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'))
def test_max_distance(self):
"""SQ Viewshed: setting a max distance limits visibility distance."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.ones((6, 6))
viewpoint = (5, 5)
max_dist = 4
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
viewshed((dem_filepath, 1), viewpoint, visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'),
refraction_coeff=1.0, max_distance=max_dist)
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
expected_visibility = numpy.zeros(matrix.shape)
expected_visibility = numpy.array(
[[255, 255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 0],
[255, 255, 255, 1, 1, 1],
[255, 255, 1, 1, 1, 1],
[255, 255, 1, 1, 1, 1],
[255, 0, 1, 1, 1, 1]], dtype=numpy.uint8)
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_refractivity(self):
"""SQ Viewshed: refractivity partly compensates for earth's curvature."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.array([[2, 1, 1, 2, 1, 1, 1, 1, 1, 50]])
viewpoint = (0, 0)
matrix[viewpoint] = 2
matrix[0, 3] = 2
pixel_size = (1000, -1000)
# pixels are 1km. With the viewpoint at an elevation of 1m,
# the horizon should be about 3.6km out. A 50m structure 10km out
# should be visible above the horizon.
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
ViewshedTests.create_dem(matrix, dem_filepath,
pixel_size=pixel_size)
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
viewshed((dem_filepath, 1), viewpoint, visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'),
refraction_coeff=0.1)
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
# Because of refractivity calculations (and the size of the pixels),
# the pixels farther to the right are visible despite being 'hidden'
# behind the hill at (0,3). This is due to refractivity.
expected_visibility = numpy.array(
[[1, 1, 1, 1, 0, 0, 0, 0, 0, 1]], dtype=numpy.uint8)
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_intervening_nodata(self):
"""SQ Viewshed: intervening nodata does not affect visibility."""
from natcap.invest.scenic_quality.viewshed import viewshed
nodata = 255
matrix = numpy.array([[2, 2, nodata, 3]])
viewpoint = (0, 0)
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
ViewshedTests.create_dem(matrix, dem_filepath,
nodata=nodata)
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
viewshed((dem_filepath, 1), viewpoint, visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'),
refraction_coeff=0.0)
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
expected_visibility = numpy.array(
[[1, 1, 0, 1]], dtype=numpy.uint8)
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_nodata_undefined(self):
"""SQ Viewshed: assume a reasonable nodata value if none defined."""
from natcap.invest.scenic_quality.viewshed import viewshed
nodata = None # viewshed assumes an unlikely nodata value.
matrix = numpy.array([[2, 2, 1, 3]])
viewpoint = (0, 0)
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
ViewshedTests.create_dem(matrix, dem_filepath,
nodata=nodata)
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
viewshed((dem_filepath, 1), viewpoint, visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'),
refraction_coeff=0.0)
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
expected_visibility = numpy.array(
[[1, 1, 0, 1]], dtype=numpy.uint8)
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_block_size_check(self):
"""SQ Viewshed: exception raised when blocks not equal, power of 2."""
from natcap.invest.scenic_quality.viewshed import viewshed
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
pygeoprocessing.testing.create_raster_on_disk(
[numpy.ones((10, 10))], (0, 0), projection_wkt=srs.ExportToWkt(),
nodata=-1, pixel_size=(1, -1),
raster_driver_creation_tuple=(
'GTIFF', ('TILED=NO', 'BIGTIFF=YES', 'COMPRESS=LZW',
'BLOCKXSIZE=20', 'BLOCKYSIZE=40')),
filename=dem_filepath)
with self.assertRaises(ValueError):
viewshed(
(dem_filepath, 1), (0, 0), visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir, 'auxiliary.tif')
)
def test_view_from_valley(self):
"""SQ Viewshed: test visibility from within a pit."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.zeros((9, 9))
matrix[5:8, 5:8] = 2
matrix[4:7, 4:7] = 1
matrix[5, 5] = 0
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed((dem_filepath, 1), (5, 5), visibility_filepath,
refraction_coeff=1.0,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'))
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
expected_visibility = numpy.zeros(visibility_matrix.shape)
expected_visibility[matrix != 0] = 1
expected_visibility[5, 5] = 1
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_tower_view_from_valley(self):
"""SQ Viewshed: test visibility from a 'tower' within a pit."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.zeros((9, 9))
matrix[5:8, 5:8] = 2
matrix[4:7, 4:7] = 1
matrix[5, 5] = 0
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed((dem_filepath, 1), (5, 5), visibility_filepath,
viewpoint_height=10,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'))
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
expected_visibility = numpy.ones(visibility_matrix.shape)
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_primitive_peak(self):
"""SQ Viewshed: looking down from a peak renders everything visible."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.zeros((8, 8))
matrix[4:7, 4:7] = 1
matrix[5, 5] = 2
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed((dem_filepath, 1), (5, 5), visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'),
refraction_coeff=1.0)
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
numpy.testing.assert_equal(visibility_matrix, numpy.ones(matrix.shape))
def test_cliff_bottom_half_visibility(self):
"""SQ Viewshed: visibility for a cliff on bottom half of DEM."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.empty((20, 20))
matrix.fill(2)
matrix[7:] = 10 # cliff at row 7
viewpoint = (5, 10)
matrix[viewpoint] = 5 # viewpoint
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed(
dem_raster_path_band=(dem_filepath, 1),
viewpoint=(viewpoint[1], viewpoint[0]),
visibility_filepath=visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir, 'auxiliary.tif')
)
expected_visibility = numpy.ones(matrix.shape)
expected_visibility[8:] = 0
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_cliff_top_half_visibility(self):
"""SQ Viewshed: visibility for a cliff on top half of DEM."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.empty((20, 20))
matrix.fill(2)
matrix[:8] = 10 # cliff at row 8
viewpoint = (10, 10)
matrix[viewpoint] = 5 # viewpoint
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed(
dem_raster_path_band=(dem_filepath, 1),
viewpoint=viewpoint,
visibility_filepath=visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir, 'auxiliary.tif')
)
expected_visibility = numpy.ones(matrix.shape)
expected_visibility[:7] = 0
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_cliff_left_half_visibility(self):
"""SQ Viewshed: visibility for a cliff on left half of DEM."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.empty((20, 20))
matrix.fill(2)
matrix[:, :8] = 10 # cliff at column 8
viewpoint = (10, 10)
matrix[viewpoint] = 5 # viewpoint
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed(
dem_raster_path_band=(dem_filepath, 1),
viewpoint=viewpoint,
visibility_filepath=visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir, 'auxiliary.tif')
)
expected_visibility = numpy.ones(matrix.shape)
expected_visibility[:, :7] = 0
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_cliff_right_half_visibility(self):
"""SQ Viewshed: visibility for a cliff on right half of DEM."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.empty((20, 20))
matrix.fill(2)
matrix[:, 12:] = 10 # cliff at column 8
viewpoint = (10, 10)
matrix[viewpoint] = 5 # viewpoint
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed(
dem_raster_path_band=(dem_filepath, 1),
viewpoint=viewpoint,
visibility_filepath=visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir, 'auxiliary.tif')
)
expected_visibility = numpy.ones(matrix.shape)
expected_visibility[:, 13:] = 0
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_pillars(self):
"""SQ Viewshed: put a few pillars in a field, can't see behind them."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.empty((20, 20))
matrix.fill(2)
# Put a couple of pillars in there.
for pillar in (
(2, 5),
(18, 5),
(7, 18)):
matrix[pillar] = 10
viewpoint = (10, 10)
matrix[viewpoint] = 5 # so it stands out in the DEM
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed(
dem_raster_path_band=(dem_filepath, 1),
viewpoint=viewpoint,
visibility_filepath=visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir, 'auxiliary.tif')
)
expected_visibility = numpy.array(
[[1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
| 42.751773 | 87 | 0.562302 | import unittest
import tempfile
import shutil
import os
import glob
from osgeo import gdal
from osgeo import osr
import pygeoprocessing.testing
from pygeoprocessing.testing import sampledata
from shapely.geometry import Polygon, Point
import numpy
_SRS = osr.SpatialReference()
_SRS.ImportFromEPSG(32731)
WKT = _SRS.ExportToWkt()
class ScenicQualityTests(unittest.TestCase):
def setUp(self):
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.workspace_dir)
@staticmethod
def create_dem(dem_path):
from pygeoprocessing.testing import create_raster_on_disk
dem_matrix = numpy.array(
[[10, 2, 2, 2, 10],
[2, 10, 2, 10, 2],
[2, 2, 10, 2, 2],
[2, 10, 2, 10, 2],
[10, 2, 2, 2, 10]], dtype=numpy.int8)
create_raster_on_disk(
[dem_matrix],
origin=(2, -2),
projection_wkt=WKT,
nodata=255,
pixel_size=(2, -2),
raster_driver_creation_tuple=(
'GTIFF', ['TILED=YES',
'BIGTIFF=YES',
'COMPRESS=LZW']),
filename=dem_path)
@staticmethod
def create_aoi(aoi_path):
sampledata.create_vector_on_disk(
[Polygon([(2, -2), (2, -12), (12, -12), (12, -2), (2, -2)])],
WKT, filename=aoi_path)
@staticmethod
def create_viewpoints(viewpoints_path, fields=None, attributes=None):
sampledata.create_vector_on_disk(
[Point(7.0, -3.0),
Point(1.0, -7.0),
Point(7.0, -11.0),
Point(11.0, -7.0)],
projection=WKT,
fields=fields,
attributes=attributes,
filename=viewpoints_path)
def test_exception_when_no_structures_aoi_overlap(self):
from natcap.invest.scenic_quality import scenic_quality
dem_path = os.path.join(self.workspace_dir, 'dem.tif')
ScenicQualityTests.create_dem(dem_path)
viewpoints_path = os.path.join(self.workspace_dir,
'viewpoints.geojson')
ScenicQualityTests.create_viewpoints(viewpoints_path)
# AOI DEFINITELY doesn't overlap with the viewpoints.
aoi_path = os.path.join(self.workspace_dir, 'aoi.geojson')
sampledata.create_vector_on_disk(
[Polygon([(2, 2), (2, 12), (12, 12), (12, 2), (2, 2)])],
WKT, filename=aoi_path)
args = {
'workspace_dir': os.path.join(self.workspace_dir, 'workspace'),
'aoi_path': aoi_path,
'structure_path': viewpoints_path,
'dem_path': dem_path,
'refraction': 0.13,
'n_workers': -1,
}
with self.assertRaises(ValueError) as cm:
scenic_quality.execute(args)
self.assertTrue('found no intersection between' in str(cm.exception))
def test_no_valuation(self):
from natcap.invest.scenic_quality import scenic_quality
dem_path = os.path.join(self.workspace_dir, 'dem.tif')
ScenicQualityTests.create_dem(dem_path)
viewpoints_path = os.path.join(self.workspace_dir,
'viewpoints.geojson')
ScenicQualityTests.create_viewpoints(
viewpoints_path,
fields={'RADIUS': 'real',
'HEIGHT': 'real',
'WEIGHT': 'real'},
attributes=[
{'RADIUS': 6.0, 'HEIGHT': 1.0, 'WEIGHT': 1.0},
{'RADIUS': 6.0, 'HEIGHT': 1.0, 'WEIGHT': 1.0},
{'RADIUS': 6.0, 'HEIGHT': 1.0, 'WEIGHT': 2.5},
{'RADIUS': 6.0, 'HEIGHT': 1.0, 'WEIGHT': 2.5}])
aoi_path = os.path.join(self.workspace_dir, 'aoi.geojson')
ScenicQualityTests.create_aoi(aoi_path)
args = {
'workspace_dir': os.path.join(self.workspace_dir, 'workspace'),
'aoi_path': aoi_path,
'structure_path': viewpoints_path,
'dem_path': dem_path,
'refraction': 0.13,
'n_workers': -1,
}
scenic_quality.execute(args)
for output_filename, should_exist in (
('vshed_value.tif', False),
('vshed.tif', True),
('vshed_qual.tif', True)):
full_filepath = os.path.join(
args['workspace_dir'], 'output', output_filename)
self.assertEqual(os.path.exists(full_filepath), should_exist)
expected_visual_quality = numpy.array(
[[1, 1, 1, 1, 4],
[0, 1, 1, 4, 3],
[0, 0, 4, 3, 3],
[0, 3, 3, 4, 3],
[3, 3, 3, 3, 4]])
visual_quality_raster = os.path.join(
args['workspace_dir'], 'output', 'vshed_qual.tif')
quality_matrix = gdal.OpenEx(
visual_quality_raster, gdal.OF_RASTER).ReadAsArray()
numpy.testing.assert_allclose(expected_visual_quality,
quality_matrix,
rtol=0, atol=1e-6)
def test_invalid_valuation_function(self):
from natcap.invest.scenic_quality import scenic_quality
dem_path = os.path.join(self.workspace_dir, 'dem.tif')
ScenicQualityTests.create_dem(dem_path)
viewpoints_path = os.path.join(self.workspace_dir,
'viewpoints.geojson')
ScenicQualityTests.create_viewpoints(viewpoints_path)
aoi_path = os.path.join(self.workspace_dir, 'aoi.geojson')
ScenicQualityTests.create_aoi(aoi_path)
args = {
'workspace_dir': os.path.join(self.workspace_dir, 'workspace'),
'results_suffix': 'foo',
'aoi_path': aoi_path,
'structure_path': viewpoints_path,
'dem_path': dem_path,
'refraction': 0.13,
'do_valuation': True,
'valuation_function': 'INVALID FUNCTION',
'a_coef': 1,
'b_coef': 0,
'max_valuation_radius': 10.0,
'n_workers': -1,
}
with self.assertRaises(ValueError):
scenic_quality.execute(args)
def test_error_invalid_viewpoints(self):
from natcap.invest.scenic_quality import scenic_quality
from pygeoprocessing.testing import create_raster_on_disk
dem_matrix = numpy.array(
[[-1, -1, 2, -1, -1],
[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1]], dtype=numpy.int)
dem_path = os.path.join(self.workspace_dir, 'dem.tif')
create_raster_on_disk(
[dem_matrix],
origin=(0, 0),
projection_wkt=WKT,
nodata=-1,
pixel_size=(0.5, -0.5),
filename=dem_path)
viewpoints_path = os.path.join(self.workspace_dir,
'viewpoints.geojson')
sampledata.create_vector_on_disk(
[Point(1.25, -0.5),
Point(-1.0, -5.0),
Point(1.25, -1.5)],
WKT, filename=viewpoints_path)
aoi_path = os.path.join(self.workspace_dir, 'aoi.geojson')
sampledata.create_vector_on_disk(
[Polygon([(1, -1), (1, -2.5), (2.5, -2.5), (2.5, -1), (1, -1)])],
WKT, filename=aoi_path)
args = {
'workspace_dir': os.path.join(self.workspace_dir, 'workspace'),
'results_suffix': 'foo',
'aoi_path': aoi_path,
'structure_path': viewpoints_path,
'dem_path': dem_path,
'refraction': 0.13,
'valuation_function': 'logarithmic',
'a_coef': 1,
'b_coef': 0,
'max_valuation_radius': 10.0,
'n_workers': -1,
}
with self.assertRaises(ValueError) as raised_error:
scenic_quality.execute(args)
self.assertTrue('No valid viewpoints found.' in
str(raised_error.exception))
def test_viewshed_field_defaults(self):
from natcap.invest.scenic_quality import scenic_quality
dem_path = os.path.join(self.workspace_dir, 'dem.tif')
ScenicQualityTests.create_dem(dem_path)
viewpoints_path = os.path.join(self.workspace_dir,
'viewpoints.geojson')
ScenicQualityTests.create_viewpoints(viewpoints_path)
aoi_path = os.path.join(self.workspace_dir, 'aoi.geojson')
ScenicQualityTests.create_aoi(aoi_path)
args = {
'workspace_dir': os.path.join(self.workspace_dir, 'workspace'),
'results_suffix': 'foo',
'aoi_path': aoi_path,
'structure_path': viewpoints_path,
'dem_path': dem_path,
'refraction': 0.13,
'valuation_function': 'linear',
'do_valuation': True,
'a_coef': 1,
'b_coef': 0,
'max_valuation_radius': 10.0,
'n_workers': -1,
}
clipped_structures_path = os.path.join(args['workspace_dir'],
'intermediate',
'structures_clipped_foo.shp')
os.makedirs(os.path.dirname(clipped_structures_path))
with open(clipped_structures_path, 'w') as fake_file:
fake_file.write('this is a vector :)')
scenic_quality.execute(args)
self.assertEqual(len(glob.glob(os.path.join(
args['workspace_dir'], 'intermediate', 'visibility*'))), 3)
self.assertEqual(len(glob.glob(os.path.join(
args['workspace_dir'], 'intermediate', 'value*'))), 3)
expected_value = numpy.array(
[[1, 1, 1, 1, 2],
[0, 1, 1, 2, 1],
[0, 0, 3, 1, 1],
[0, 1, 1, 2, 1],
[1, 1, 1, 1, 2]], dtype=numpy.int8)
value_raster = gdal.OpenEx(
os.path.join(args['workspace_dir'], 'output',
'vshed_value_foo.tif'), gdal.OF_RASTER)
value_band = value_raster.GetRasterBand(1)
value_matrix = value_band.ReadAsArray()
numpy.testing.assert_allclose(
expected_value, value_matrix, rtol=0, atol=1e-6)
vshed_raster = gdal.OpenEx(
os.path.join(args['workspace_dir'], 'output',
'vshed_foo.tif'), gdal.OF_RASTER)
vshed_band = vshed_raster.GetRasterBand(1)
vshed_matrix = vshed_band.ReadAsArray()
numpy.testing.assert_allclose(
expected_value, vshed_matrix, rtol=0, atol=1e-6)
expected_visual_quality = numpy.array(
[[3, 3, 3, 3, 4],
[0, 3, 3, 4, 3],
[0, 0, 4, 3, 3],
[0, 3, 3, 4, 3],
[3, 3, 3, 3, 4]])
visual_quality_raster = os.path.join(
args['workspace_dir'], 'output', 'vshed_qual_foo.tif')
quality_matrix = gdal.OpenEx(visual_quality_raster,
gdal.OF_RASTER).ReadAsArray()
numpy.testing.assert_allclose(expected_visual_quality,
quality_matrix,
rtol=0, atol=1e-6)
def test_viewshed_with_fields(self):
from natcap.invest.scenic_quality import scenic_quality
dem_path = os.path.join(self.workspace_dir, 'dem.tif')
ScenicQualityTests.create_dem(dem_path)
viewpoints_path = os.path.join(self.workspace_dir,
'viewpoints.geojson')
ScenicQualityTests.create_viewpoints(
viewpoints_path,
fields={'RADIUS': 'real',
'HEIGHT': 'real',
'WEIGHT': 'real'},
attributes=[
{'RADIUS': 6.0, 'HEIGHT': 1.0, 'WEIGHT': 1.0},
{'RADIUS': 6.0, 'HEIGHT': 1.0, 'WEIGHT': 1.0},
{'RADIUS': 6.0, 'HEIGHT': 1.0, 'WEIGHT': 2.5},
{'RADIUS': 6.0, 'HEIGHT': 1.0, 'WEIGHT': 2.5}])
aoi_path = os.path.join(self.workspace_dir, 'aoi.geojson')
ScenicQualityTests.create_aoi(aoi_path)
args = {
'workspace_dir': os.path.join(self.workspace_dir, 'workspace'),
'aoi_path': aoi_path,
'structure_path': viewpoints_path,
'dem_path': dem_path,
'refraction': 0.13,
'do_valuation': True,
'valuation_function': 'linear',
'a_coef': 0,
'b_coef': 1,
'max_valuation_radius': 10.0,
}
scenic_quality.execute(args)
expected_value = numpy.array(
[[4., 2., 0., 2., 14.],
[0., 2.82842712, 2., 9.89949494, 5.],
[0., 0., 24., 5., 0.],
[0., 7.07106781, 5., 14.14213562, 5.],
[10., 5., 0., 5., 20.]])
value_raster = gdal.OpenEx(
os.path.join(args['workspace_dir'], 'output',
'vshed_value.tif'), gdal.OF_RASTER)
value_band = value_raster.GetRasterBand(1)
value_matrix = value_band.ReadAsArray()
numpy.testing.assert_allclose(
expected_value, value_matrix, rtol=0, atol=1e-6)
expected_weighted_vshed = numpy.array(
[[1., 1., 1., 1., 3.5],
[0., 1., 1., 3.5, 2.5],
[0., 0., 6., 2.5, 2.5],
[0., 2.5, 2.5, 5., 2.5],
[2.5, 2.5, 2.5, 2.5, 5.]], dtype=numpy.float32)
vshed_raster_path = os.path.join(args['workspace_dir'], 'output',
'vshed.tif')
weighted_vshed_matrix = gdal.OpenEx(
vshed_raster_path, gdal.OF_RASTER).ReadAsArray()
numpy.testing.assert_allclose(expected_weighted_vshed,
weighted_vshed_matrix,
rtol=0, atol=1e-6)
expected_visual_quality = numpy.array(
[[1, 1, 0, 1, 4],
[0, 1, 1, 3, 3],
[0, 0, 4, 3, 0],
[0, 3, 3, 4, 3],
[3, 3, 0, 3, 4]])
visual_quality_raster = os.path.join(
args['workspace_dir'], 'output', 'vshed_qual.tif')
quality_matrix = gdal.OpenEx(
visual_quality_raster, gdal.OF_RASTER).ReadAsArray()
numpy.testing.assert_allclose(expected_visual_quality,
quality_matrix,
rtol=0, atol=1e-6)
def test_exponential_valuation(self):
from natcap.invest.scenic_quality import scenic_quality
dem_path = os.path.join(self.workspace_dir, 'dem.tif')
ScenicQualityTests.create_dem(dem_path)
viewpoints_path = os.path.join(self.workspace_dir,
'viewpoints.geojson')
ScenicQualityTests.create_viewpoints(viewpoints_path)
aoi_path = os.path.join(self.workspace_dir, 'aoi.geojson')
ScenicQualityTests.create_aoi(aoi_path)
args = {
'workspace_dir': os.path.join(self.workspace_dir, 'workspace'),
'aoi_path': aoi_path,
'structure_path': viewpoints_path,
'dem_path': dem_path,
'refraction': 0.13,
'valuation_function': 'exponential',
'a_coef': 1,
'b_coef': 1,
'max_valuation_radius': 10.0,
'do_valuation': True,
'n_workers': -1,
}
scenic_quality.execute(args)
expected_value = numpy.array(
[[0.01831564, 0.13533528, 1., 0.13533528, 0.03663128],
[0., 0.05910575, 0.13533528, 0.11821149, 0.13533528],
[0., 0., 0.05494692, 0.13533528, 1.],
[0., 0.05910575, 0.13533528, 0.11821149, 0.13533528],
[0.01831564, 0.13533528, 1., 0.13533528, 0.03663128]])
value_raster = gdal.OpenEx(
os.path.join(args['workspace_dir'], 'output', 'vshed_value.tif'),
gdal.OF_RASTER)
value_band = value_raster.GetRasterBand(1)
value_matrix = value_band.ReadAsArray()
numpy.testing.assert_allclose(expected_value, value_matrix, rtol=0, atol=1e-6)
def test_logarithmic_valuation(self):
from natcap.invest.scenic_quality import scenic_quality
dem_path = os.path.join(self.workspace_dir, 'dem.tif')
ScenicQualityTests.create_dem(dem_path)
viewpoints_path = os.path.join(self.workspace_dir,
'viewpoints.geojson')
ScenicQualityTests.create_viewpoints(viewpoints_path)
aoi_path = os.path.join(self.workspace_dir, 'aoi.geojson')
ScenicQualityTests.create_aoi(aoi_path)
args = {
'workspace_dir': os.path.join(self.workspace_dir, 'workspace'),
'aoi_path': aoi_path,
'structure_path': viewpoints_path,
'dem_path': dem_path,
'refraction': 0.13,
'valuation_function': 'logarithmic',
'do_valuation': True,
'a_coef': 1,
'b_coef': 1,
'max_valuation_radius': 10.0,
'n_workers': -1,
}
scenic_quality.execute(args)
expected_value = numpy.array(
[[2.60943791, 2.09861229, 1., 2.09861229, 5.21887582],
[0., 2.34245405, 2.09861229, 4.68490809, 2.09861229],
[0., 0., 7.82831374, 2.09861229, 1.],
[0., 2.34245405, 2.09861229, 4.68490809, 2.09861229],
[2.60943791, 2.09861229, 1., 2.09861229, 5.21887582]])
value_raster = gdal.OpenEx(
os.path.join(args['workspace_dir'], 'output',
'vshed_value.tif'), gdal.OF_RASTER)
value_band = value_raster.GetRasterBand(1)
value_matrix = value_band.ReadAsArray()
numpy.testing.assert_allclose(
expected_value, value_matrix, rtol=0, atol=1e-6)
def test_visual_quality(self):
from natcap.invest.scenic_quality import scenic_quality
visible_structures = numpy.tile(
numpy.array([3, 0, 0, 0, 6, 7, 8]), (5, 1))
n_visible = os.path.join(self.workspace_dir, 'n_visible.tif')
visual_quality_raster = os.path.join(self.workspace_dir,
'visual_quality.tif')
driver = gdal.GetDriverByName('GTiff')
raster = driver.Create(n_visible, 7, 5, 1, gdal.GDT_Int32)
band = raster.GetRasterBand(1)
band.SetNoDataValue(-1)
band.WriteArray(visible_structures)
band = None
raster = None
scenic_quality._calculate_visual_quality(n_visible,
self.workspace_dir,
visual_quality_raster)
expected_visual_quality = numpy.tile(
numpy.array([1, 0, 0, 0, 2, 3, 4]), (5, 1))
visual_quality_matrix = gdal.OpenEx(
visual_quality_raster, gdal.OF_RASTER).ReadAsArray()
numpy.testing.assert_allclose(expected_visual_quality,
visual_quality_matrix,
rtol=0, atol=1e-6)
def test_visual_quality_large_blocks(self):
from natcap.invest.scenic_quality import scenic_quality
shape = (512, 512)
n_blocks = 5
visible_structures = numpy.concatenate(
[numpy.full(shape, n*2) for n in range(n_blocks)])
n_visible = os.path.join(self.workspace_dir, 'n_visible.tif')
visual_quality_raster = os.path.join(self.workspace_dir,
'visual_quality.tif')
driver = gdal.GetDriverByName('GTiff')
raster = driver.Create(n_visible, shape[0], shape[1]*n_blocks,
1, gdal.GDT_Int32)
band = raster.GetRasterBand(1)
band.SetNoDataValue(-1)
band.WriteArray(visible_structures)
band = None
raster = None
scenic_quality._calculate_visual_quality(n_visible,
self.workspace_dir,
visual_quality_raster)
expected_visual_quality = numpy.concatenate(
[numpy.full(shape, n) for n in range(n_blocks)])
visual_quality_matrix = gdal.OpenEx(
visual_quality_raster, gdal.OF_RASTER).ReadAsArray()
numpy.testing.assert_allclose(expected_visual_quality,
visual_quality_matrix,
rtol=0, atol=1e-6)
def test_visual_quality_low_count(self):
from natcap.invest.scenic_quality import scenic_quality
visible_structures = numpy.array([[-1, 3, 0, 0, 0, 3, 6, 7]])
n_visible = os.path.join(self.workspace_dir, 'n_visible.tif')
visual_quality_raster = os.path.join(self.workspace_dir,
'visual_quality.tif')
driver = gdal.GetDriverByName('GTiff')
raster = driver.Create(n_visible, 8, 1, 1, gdal.GDT_Int32)
band = raster.GetRasterBand(1)
band.SetNoDataValue(-1)
band.WriteArray(visible_structures)
band = None
raster = None
scenic_quality._calculate_visual_quality(n_visible,
self.workspace_dir,
visual_quality_raster)
expected_visual_quality = numpy.array([[255, 2, 0, 0, 0, 2, 3, 4]])
visual_quality_matrix = gdal.OpenEx(
visual_quality_raster, gdal.OF_RASTER).ReadAsArray()
numpy.testing.assert_allclose(expected_visual_quality,
visual_quality_matrix,
rtol=0, atol=1e-6)
def test_visual_quality_floats(self):
from natcap.invest.scenic_quality import scenic_quality
visible_structures = numpy.array(
[[-1, 3.33, 0, 0, 0, 3.66, 6.12, 7.8]])
n_visible = os.path.join(self.workspace_dir, 'n_visible.tif')
visual_quality_raster = os.path.join(self.workspace_dir,
'visual_quality.tif')
driver = gdal.GetDriverByName('GTiff')
raster = driver.Create(n_visible, 8, 1, 1, gdal.GDT_Float32)
band = raster.GetRasterBand(1)
band.SetNoDataValue(-1)
band.WriteArray(visible_structures)
band = None
raster = None
scenic_quality._calculate_visual_quality(n_visible,
self.workspace_dir,
visual_quality_raster)
expected_visual_quality = numpy.array([[255, 1, 0, 0, 0, 2, 3, 4]])
visual_quality_matrix = gdal.OpenEx(
visual_quality_raster, gdal.OF_RASTER).ReadAsArray()
numpy.testing.assert_allclose(expected_visual_quality,
visual_quality_matrix,
rtol=0, atol=1e-6)
class ScenicQualityValidationTests(unittest.TestCase):
def setUp(self):
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.workspace_dir)
def test_missing_keys(self):
from natcap.invest.scenic_quality import scenic_quality
from natcap.invest import validation
validation_errors = scenic_quality.validate({})
invalid_keys = validation.get_invalid_keys(validation_errors)
expected_missing_keys = set([
'aoi_path',
'dem_path',
'refraction',
'structure_path',
'workspace_dir',
])
self.assertEqual(invalid_keys, expected_missing_keys)
def test_polynomial_required_keys(self):
from natcap.invest.scenic_quality import scenic_quality
from natcap.invest import validation
args = {
'valuation_function': 'polynomial',
'do_valuation': True,
}
validation_errors = scenic_quality.validate(args)
invalid_keys = validation.get_invalid_keys(validation_errors)
self.assertEqual(
invalid_keys,
set(['a_coef',
'aoi_path',
'b_coef',
'dem_path',
'refraction',
'structure_path',
'workspace_dir',
'valuation_function', ])
)
def test_novaluation_required_keys(self):
from natcap.invest.scenic_quality import scenic_quality
from natcap.invest import validation
args = {}
validation_errors = scenic_quality.validate(args)
invalid_keys = validation.get_invalid_keys(validation_errors)
expected_missing_keys = set([
'aoi_path',
'dem_path',
'refraction',
'structure_path',
'workspace_dir',
])
self.assertEqual(invalid_keys, expected_missing_keys)
def test_bad_values(self):
from natcap.invest.scenic_quality import scenic_quality
args = {
'workspace_dir': '',
'aoi_path': '/bad/vector/path',
'a_coef': 'foo',
'b_coef': -1,
'dem_path': 'not/a/path',
'refraction': "0.13",
'max_valuation_radius': None,
'structure_path': 'vector/missing',
'valuation_function': 'bad function',
}
validation_errors = scenic_quality.validate(args)
self.assertEqual(len(validation_errors), 6)
single_key_errors = {}
for keys, error in validation_errors:
if len(keys) == 1:
single_key_errors[keys[0]] = error
self.assertTrue('refraction' not in single_key_errors)
self.assertEqual(
single_key_errors['a_coef'], (
"Value 'foo' could not be interpreted as a number"))
self.assertEqual(
single_key_errors['dem_path'], 'File not found')
self.assertEqual(single_key_errors['structure_path'],
'File not found')
self.assertEqual(single_key_errors['aoi_path'], 'File not found')
self.assertTrue(
single_key_errors['valuation_function'].startswith(
'Value must be one of'))
def test_dem_projected_in_m(self):
from natcap.invest.scenic_quality import scenic_quality
from pygeoprocessing.testing import create_raster_on_disk
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
filepath = os.path.join(self.workspace_dir, 'dem.tif')
create_raster_on_disk(
[numpy.array([[1]])],
origin=(0, 0),
projection_wkt=srs.ExportToWkt(),
nodata=-1,
pixel_size=(1, -1),
filename=filepath)
args = {'dem_path': filepath}
validation_errors = scenic_quality.validate(args, limit_to='dem_path')
self.assertEqual(len(validation_errors), 1)
self.assertTrue('must be projected in linear units' in
validation_errors[0][1])
class ViewshedTests(unittest.TestCase):
def setUp(self):
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.workspace_dir)
@staticmethod
def create_dem(matrix, filepath, pixel_size=(1, 1), nodata=-1):
from pygeoprocessing.testing import create_raster_on_disk
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
wkt = srs.ExportToWkt()
create_raster_on_disk(
[matrix],
origin=(0, 0),
projection_wkt=wkt,
nodata=nodata,
pixel_size=pixel_size,
filename=filepath)
def test_pixels_not_square(self):
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.ones((20, 20))
viewpoint = (10, 10)
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
ViewshedTests.create_dem(matrix, dem_filepath,
pixel_size=(1.111111, 1.12))
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
with self.assertRaises(AssertionError):
viewshed((dem_filepath, 1), viewpoint, visibility_filepath)
def test_viewpoint_not_overlapping_dem(self):
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.ones((20, 20))
viewpoint = (-10, -10)
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
with self.assertRaises(ValueError):
viewshed((dem_filepath, 1), viewpoint, visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'))
def test_max_distance(self):
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.ones((6, 6))
viewpoint = (5, 5)
max_dist = 4
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
viewshed((dem_filepath, 1), viewpoint, visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'),
refraction_coeff=1.0, max_distance=max_dist)
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
expected_visibility = numpy.zeros(matrix.shape)
expected_visibility = numpy.array(
[[255, 255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 0],
[255, 255, 255, 1, 1, 1],
[255, 255, 1, 1, 1, 1],
[255, 255, 1, 1, 1, 1],
[255, 0, 1, 1, 1, 1]], dtype=numpy.uint8)
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_refractivity(self):
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.array([[2, 1, 1, 2, 1, 1, 1, 1, 1, 50]])
viewpoint = (0, 0)
matrix[viewpoint] = 2
matrix[0, 3] = 2
pixel_size = (1000, -1000)
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
ViewshedTests.create_dem(matrix, dem_filepath,
pixel_size=pixel_size)
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
viewshed((dem_filepath, 1), viewpoint, visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'),
refraction_coeff=0.1)
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
expected_visibility = numpy.array(
[[1, 1, 1, 1, 0, 0, 0, 0, 0, 1]], dtype=numpy.uint8)
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_intervening_nodata(self):
from natcap.invest.scenic_quality.viewshed import viewshed
nodata = 255
matrix = numpy.array([[2, 2, nodata, 3]])
viewpoint = (0, 0)
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
ViewshedTests.create_dem(matrix, dem_filepath,
nodata=nodata)
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
viewshed((dem_filepath, 1), viewpoint, visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'),
refraction_coeff=0.0)
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
expected_visibility = numpy.array(
[[1, 1, 0, 1]], dtype=numpy.uint8)
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_nodata_undefined(self):
from natcap.invest.scenic_quality.viewshed import viewshed
nodata = None
matrix = numpy.array([[2, 2, 1, 3]])
viewpoint = (0, 0)
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
ViewshedTests.create_dem(matrix, dem_filepath,
nodata=nodata)
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
viewshed((dem_filepath, 1), viewpoint, visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'),
refraction_coeff=0.0)
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
expected_visibility = numpy.array(
[[1, 1, 0, 1]], dtype=numpy.uint8)
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_block_size_check(self):
from natcap.invest.scenic_quality.viewshed import viewshed
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
pygeoprocessing.testing.create_raster_on_disk(
[numpy.ones((10, 10))], (0, 0), projection_wkt=srs.ExportToWkt(),
nodata=-1, pixel_size=(1, -1),
raster_driver_creation_tuple=(
'GTIFF', ('TILED=NO', 'BIGTIFF=YES', 'COMPRESS=LZW',
'BLOCKXSIZE=20', 'BLOCKYSIZE=40')),
filename=dem_filepath)
with self.assertRaises(ValueError):
viewshed(
(dem_filepath, 1), (0, 0), visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir, 'auxiliary.tif')
)
def test_view_from_valley(self):
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.zeros((9, 9))
matrix[5:8, 5:8] = 2
matrix[4:7, 4:7] = 1
matrix[5, 5] = 0
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed((dem_filepath, 1), (5, 5), visibility_filepath,
refraction_coeff=1.0,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'))
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
expected_visibility = numpy.zeros(visibility_matrix.shape)
expected_visibility[matrix != 0] = 1
expected_visibility[5, 5] = 1
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_tower_view_from_valley(self):
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.zeros((9, 9))
matrix[5:8, 5:8] = 2
matrix[4:7, 4:7] = 1
matrix[5, 5] = 0
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed((dem_filepath, 1), (5, 5), visibility_filepath,
viewpoint_height=10,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'))
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
expected_visibility = numpy.ones(visibility_matrix.shape)
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_primitive_peak(self):
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.zeros((8, 8))
matrix[4:7, 4:7] = 1
matrix[5, 5] = 2
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed((dem_filepath, 1), (5, 5), visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'),
refraction_coeff=1.0)
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
numpy.testing.assert_equal(visibility_matrix, numpy.ones(matrix.shape))
def test_cliff_bottom_half_visibility(self):
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.empty((20, 20))
matrix.fill(2)
matrix[7:] = 10
viewpoint = (5, 10)
matrix[viewpoint] = 5
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed(
dem_raster_path_band=(dem_filepath, 1),
viewpoint=(viewpoint[1], viewpoint[0]),
visibility_filepath=visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir, 'auxiliary.tif')
)
expected_visibility = numpy.ones(matrix.shape)
expected_visibility[8:] = 0
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_cliff_top_half_visibility(self):
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.empty((20, 20))
matrix.fill(2)
matrix[:8] = 10
viewpoint = (10, 10)
matrix[viewpoint] = 5
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed(
dem_raster_path_band=(dem_filepath, 1),
viewpoint=viewpoint,
visibility_filepath=visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir, 'auxiliary.tif')
)
expected_visibility = numpy.ones(matrix.shape)
expected_visibility[:7] = 0
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_cliff_left_half_visibility(self):
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.empty((20, 20))
matrix.fill(2)
matrix[:, :8] = 10
viewpoint = (10, 10)
matrix[viewpoint] = 5
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed(
dem_raster_path_band=(dem_filepath, 1),
viewpoint=viewpoint,
visibility_filepath=visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir, 'auxiliary.tif')
)
expected_visibility = numpy.ones(matrix.shape)
expected_visibility[:, :7] = 0
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_cliff_right_half_visibility(self):
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.empty((20, 20))
matrix.fill(2)
matrix[:, 12:] = 10
viewpoint = (10, 10)
matrix[viewpoint] = 5
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed(
dem_raster_path_band=(dem_filepath, 1),
viewpoint=viewpoint,
visibility_filepath=visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir, 'auxiliary.tif')
)
expected_visibility = numpy.ones(matrix.shape)
expected_visibility[:, 13:] = 0
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_pillars(self):
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.empty((20, 20))
matrix.fill(2)
for pillar in (
(2, 5),
(18, 5),
(7, 18)):
matrix[pillar] = 10
viewpoint = (10, 10)
matrix[viewpoint] = 5
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed(
dem_raster_path_band=(dem_filepath, 1),
viewpoint=viewpoint,
visibility_filepath=visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir, 'auxiliary.tif')
)
expected_visibility = numpy.array(
[[1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
| true | true |
1c3d3148417bdc3d66da11935ad180531d7ce2ba | 19,118 | py | Python | cubes/server/blueprint.py | digitalsatori/cubes | 140133e8c2e3f2ff60631cc3ebc9966d16c1655e | [
"MIT"
] | 1,020 | 2015-01-02T03:05:26.000Z | 2022-02-12T18:48:51.000Z | cubes/server/blueprint.py | digitalsatori/cubes | 140133e8c2e3f2ff60631cc3ebc9966d16c1655e | [
"MIT"
] | 259 | 2015-01-02T22:35:14.000Z | 2021-09-02T04:20:41.000Z | cubes/server/blueprint.py | digitalsatori/cubes | 140133e8c2e3f2ff60631cc3ebc9966d16c1655e | [
"MIT"
] | 288 | 2015-01-08T00:42:26.000Z | 2022-03-31T17:25:10.000Z | # -*- coding: utf-8 -*-
import json
import sys
import traceback
from collections import OrderedDict
from flask import Blueprint, Response, request, g, current_app, safe_join, make_response
from flask import render_template, redirect
from ..workspace import Workspace, SLICER_INFO_KEYS
from ..query import Cell, cut_from_dict
from ..query import SPLIT_DIMENSION_NAME
from ..errors import *
from ..formatters import JSONLinesGenerator, csv_generator
from .. import ext
from ..logging import get_logger
from .logging import configured_request_log_handlers, RequestLogger
from .logging import AsyncRequestLogger
from .errors import *
from .decorators import *
from .local import *
from .auth import NotAuthenticated
from cubes import __version__
# TODO: missing features from the original Werkzeug Slicer:
# * /locales and localization
# * default cube: /aggregate
# * caching
# * root / index
# * response.headers.add("Access-Control-Allow-Origin", "*")
try:
import cubes_search
except ImportError:
cubes_search = None
__all__ = (
"slicer",
"API_VERSION"
)
API_VERSION = 2
# Cross-origin resource sharing – 20 days cache
CORS_MAX_AGE = 1728000
slicer = Blueprint("slicer", __name__, template_folder="templates")
# Before
# ------
def _store_option(config, option, default, type_=None, allowed=None,
section="server"):
"""Copies the `option` into the application config dictionary. `default`
is a default value, if there is no such option in `config`. `type_` can be
`bool`, `int` or `string` (default). If `allowed` is specified, then the
option should be only from the list of allowed options, otherwise a
`ConfigurationError` exception is raised.
"""
if config.has_option(section, option):
if type_ == "bool":
value = config.getboolean(section, option)
elif type_ == "int":
value = config.getint(section, option)
else:
value = config.get(section, option)
else:
value = default
if allowed and value not in allowed:
raise ConfigurationError("Invalued value '%s' for option '%s'"
% (value, option))
setattr(current_app.slicer, option, value)
@slicer.record_once
def initialize_slicer(state):
"""Create the workspace and configure the application context from the
``slicer.ini`` configuration."""
with state.app.app_context():
config = state.options["config"]
logger = get_logger()
# Create workspace and other app objects
# We avoid pollution of the current_app context, as we are a Blueprint
params = CustomDict()
current_app.slicer = params
current_app.slicer.config = config
# FIXME: this is a workaround, see Workspace note about _options
if "cubes_root" in state.options:
_options = {"cubes_root": state.options["cubes_root"]}
else:
_options = {}
if not hasattr(current_app, 'cubes_workspace'):
current_app.cubes_workspace = Workspace(config, **_options)
# Configure the application
# FIXME XXX this shouldn't be in the "server" section
_store_option(config, "prettyprint", False, "bool")
_store_option(config, "json_record_limit", 1000, "int")
_store_option(config, "hide_private_cuts", False, "bool")
_store_option(config, "allow_cors_origin", None, "str")
_store_option(config, "visualizer", None, "str")
_store_option(config, "authentication", "none")
method = current_app.slicer.authentication
if method is None or method == "none":
current_app.slicer.authenticator = None
else:
if config.has_section("authentication"):
options = dict(config.items("authentication"))
else:
options = {}
current_app.slicer.authenticator = ext.authenticator(method,
**options)
logger.debug("Server authentication method: %s" % (method or "none"))
if not current_app.slicer.authenticator and workspace.authorizer:
logger.warn("No authenticator specified, but workspace seems to "
"be using an authorizer")
# Collect query loggers
handlers = configured_request_log_handlers(config)
if config.has_option('server', 'asynchronous_logging'):
async_logging = config.getboolean("server", "asynchronous_logging")
else:
async_logging = False
if async_logging:
current_app.slicer.request_logger = AsyncRequestLogger(handlers)
else:
current_app.slicer.request_logger = RequestLogger(handlers)
# Before and After
# ================
@slicer.before_request
def process_common_parameters():
# TODO: setup language
# Copy from the application context
g.json_record_limit = current_app.slicer.json_record_limit
if "prettyprint" in request.args:
g.prettyprint = str_to_bool(request.args.get("prettyprint"))
else:
g.prettyprint = current_app.slicer.prettyprint
@slicer.before_request
def prepare_authorization():
if current_app.slicer.authenticator:
try:
identity = current_app.slicer.authenticator.authenticate(request)
except NotAuthenticated as e:
raise NotAuthenticatedError
else:
identity = None
# Authorization
# -------------
g.auth_identity = identity
# Error Handler
# =============
@slicer.errorhandler(UserError)
def user_error_handler(e):
error_type = e.__class__.error_type
error = OrderedDict()
error["error"] = error_type
error["message"] = str(e)
if hasattr(e, "hint") and e.hint:
error["hint"] = e.hint
if hasattr(e, "to_dict"):
error.update(e.to_dict())
code = server_error_codes.get(error_type, 400)
return jsonify(error), code
@slicer.errorhandler(404)
def page_not_found(e):
error = {
"error": "not_found",
"message": "The requested URL was not found on the server.",
"hint": "If you entered the URL manually please check your "
"spelling and try again."
}
return jsonify(error), 404
@slicer.errorhandler(InternalError)
def server_error(e):
(exc_type, exc_value, exc_traceback) = sys.exc_info()
exc_name = exc_type.__name__
logger.error("Internal Cubes error ({}): {}".format(exc_name, exc_value))
tb = traceback.format_exception(exc_type, exc_value,
exc_traceback)
logger.debug("Exception stack trace:\n{}".format("".join(tb)))
error = {
"error": "internal_server_error",
"message": "Internal server error",
"hint": "Server administrators can learn more about the error from "
"the error logs (even more if they have 'debug' level)"
}
return jsonify(error), 500
# Endpoints
# =========
@slicer.route("/")
def show_index():
info = get_info()
has_about = any(key in info for key in SLICER_INFO_KEYS)
return render_template("index.html",
has_about=has_about,
**info)
@slicer.route("/version")
def show_version():
info = {
"version": __version__,
# Backward compatibility key
"server_version": __version__,
"api_version": API_VERSION
}
return jsonify(info)
def get_info():
if workspace.info:
info = OrderedDict(workspace.info)
else:
info = OrderedDict()
info["json_record_limit"] = current_app.slicer.json_record_limit
info["cubes_version"] = __version__
info["timezone"] = workspace.calendar.timezone_name
info["first_weekday"] = workspace.calendar.first_weekday
info["api_version"] = API_VERSION
# authentication
authinfo = {}
authinfo["type"] = (current_app.slicer.authentication or "none")
if g.auth_identity:
authinfo['identity'] = g.auth_identity
if current_app.slicer.authenticator:
ainfo = current_app.slicer.authenticator.info_dict(request)
authinfo.update(ainfo)
info['authentication'] = authinfo
return info
@slicer.route("/info")
def show_info():
return jsonify(get_info())
@slicer.route("/cubes")
def list_cubes():
cube_list = workspace.list_cubes(g.auth_identity)
# TODO: cache per-identity
return jsonify(cube_list)
@slicer.route("/cube/<cube_name>/model")
@requires_cube
def cube_model(cube_name):
if workspace.authorizer:
hier_limits = workspace.authorizer.hierarchy_limits(g.auth_identity,
cube_name)
else:
hier_limits = None
response = g.cube.to_dict(expand_dimensions=True,
with_mappings=False,
full_attribute_names=True,
create_label=True,
hierarchy_limits=hier_limits)
response["features"] = workspace.cube_features(g.cube)
return jsonify(response)
@slicer.route("/cube/<cube_name>/aggregate")
@requires_browser
@log_request("aggregate", "aggregates")
def aggregate(cube_name):
cube = g.cube
output_format = validated_parameter(request.args, "format",
values=["json", "csv", 'xlsx'],
default="json")
header_type = validated_parameter(request.args, "header",
values=["names", "labels", "none"],
default="labels")
fields_str = request.args.get("fields")
if fields_str:
fields = fields_str.lower().split(',')
else:
fields = None
# Aggregates
# ----------
aggregates = []
for agg in request.args.getlist("aggregates") or []:
aggregates += agg.split("|")
drilldown = []
ddlist = request.args.getlist("drilldown")
if ddlist:
for ddstring in ddlist:
drilldown += ddstring.split("|")
prepare_cell("split", "split")
result = g.browser.aggregate(g.cell,
aggregates=aggregates,
drilldown=drilldown,
split=g.split,
page=g.page,
page_size=g.page_size,
order=g.order)
# Hide cuts that were generated internally (default: don't)
if current_app.slicer.hide_private_cuts:
result.cell = result.cell.public_cell()
if output_format == "json":
return jsonify(result)
elif output_format != "csv":
raise RequestError("unknown response format '%s'" % output_format)
# csv
if header_type == "names":
header = result.labels
elif header_type == "labels":
header = []
for l in result.labels:
# TODO: add a little bit of polish to this
if l == SPLIT_DIMENSION_NAME:
header.append('Matches Filters')
else:
header += [ attr.label or attr.name for attr in cube.get_attributes([l], aggregated=True) ]
else:
header = None
fields = result.labels
generator = csv_generator(result,
fields,
include_header=bool(header),
header=header)
headers = {"Content-Disposition": 'attachment; filename="aggregate.csv"'}
return Response(generator,
mimetype='text/csv',
headers=headers)
@slicer.route("/cube/<cube_name>/facts")
@requires_browser
@log_request("facts", "fields")
def cube_facts(cube_name):
# Request parameters
fields_str = request.args.get("fields")
if fields_str:
fields = fields_str.split(',')
else:
fields = None
# fields contain attribute names
if fields:
attributes = g.cube.get_attributes(fields)
else:
attributes = g.cube.all_fact_attributes
# Construct the field list
fields = [attr.ref for attr in attributes]
# Get the result
facts = g.browser.facts(g.cell,
fields=fields,
order=g.order,
page=g.page,
page_size=g.page_size)
# Add cube key to the fields (it is returned in the result)
fields.insert(0, g.cube.key or "__fact_key__")
# Construct the header
labels = [attr.label or attr.name for attr in attributes]
labels.insert(0, g.cube.key or "__fact_key__")
return formatted_response(facts, fields, labels)
@slicer.route("/cube/<cube_name>/fact/<fact_id>")
@requires_browser
def cube_fact(cube_name, fact_id):
fact = g.browser.fact(fact_id)
if fact:
return jsonify(fact)
else:
raise NotFoundError(fact_id, "fact",
message="No fact with id '%s'" % fact_id)
@slicer.route("/cube/<cube_name>/members/<dimension_name>")
@requires_browser
@log_request("members")
def cube_members(cube_name, dimension_name):
# TODO: accept level name
depth = request.args.get("depth")
level = request.args.get("level")
if depth and level:
raise RequestError("Both depth and level provided, use only one "
"(preferably level)")
if depth:
try:
depth = int(depth)
except ValueError:
raise RequestError("depth should be an integer")
try:
dimension = g.cube.dimension(dimension_name)
except KeyError:
raise NotFoundError(dimension_name, "dimension",
message="Dimension '%s' was not found" % dimension_name)
hier_name = request.args.get("hierarchy")
hierarchy = dimension.hierarchy(hier_name)
if not depth and not level:
depth = len(hierarchy)
elif level:
depth = hierarchy.level_index(level) + 1
values = g.browser.members(g.cell,
dimension,
depth=depth,
hierarchy=hierarchy,
page=g.page,
page_size=g.page_size)
result = {
"dimension": dimension.name,
"hierarchy": hierarchy.name,
"depth": len(hierarchy) if depth is None else depth,
"data": values
}
# Collect fields and labels
attributes = []
for level in hierarchy.levels_for_depth(depth):
attributes += level.attributes
fields = [attr.ref for attr in attributes]
labels = [attr.label or attr.name for attr in attributes]
return formatted_response(result, fields, labels, iterable=values)
@slicer.route("/cube/<cube_name>/cell")
@requires_browser
def cube_cell(cube_name):
details = g.browser.cell_details(g.cell)
if not g.cell:
g.cell = Cell(g.cube)
cell_dict = g.cell.to_dict()
for cut, detail in zip(cell_dict["cuts"], details):
cut["details"] = detail
return jsonify(cell_dict)
@slicer.route("/cube/<cube_name>/report", methods=["GET", "POST"])
@requires_browser
def cube_report(cube_name):
report_request = json.loads(request.data)
try:
queries = report_request["queries"]
except KeyError:
raise RequestError("Report request does not contain 'queries' key")
cell_cuts = report_request.get("cell")
if cell_cuts:
# Override URL cut with the one in report
cuts = [cut_from_dict(cut) for cut in cell_cuts]
cell = Cell(g.cube, cuts)
logger.info("using cell from report specification (URL parameters "
"are ignored)")
if workspace.authorizer:
cell = workspace.authorizer.restricted_cell(g.auth_identity,
cube=g.cube,
cell=cell)
else:
if not g.cell:
cell = Cell(g.cube)
else:
cell = g.cell
result = g.browser.report(cell, queries)
return jsonify(result)
@slicer.route("/cube/<cube_name>/search")
def cube_search(cube_name):
# TODO: this is ported from old Werkzeug slicer, requires revision
config = current_app.config
if config.has_section("search"):
options = dict(config.items("search"))
engine_name = options.pop("engine")
else:
raise ConfigurationError("Search engine is not configured.")
logger.debug("using search engine: %s" % engine_name)
search_engine = cubes_search.create_searcher(engine_name,
browser=g.browser,
locales=g.locales,
**options)
dimension = request.args.get("dimension")
if not dimension:
raise RequestError("No search dimension provided")
query = request.args.get("query")
if not query:
raise RequestError("No search query provided")
locale = g.locale or g.locales[0]
logger.debug("searching for '%s' in %s, locale %s"
% (query, dimension, locale))
search_result = search_engine.search(query, dimension, locale=locale)
result = {
"matches": search_result.dimension_matches(dimension),
"dimension": dimension,
"total_found": search_result.total_found,
"locale": locale
}
if search_result.error:
result["error"] = search_result.error
if search_result.warning:
result["warning"] = search_result.warning
return jsonify(result)
@slicer.route("/logout")
def logout():
if current_app.slicer.authenticator:
return current_app.slicer.authenticator.logout(request, g.auth_identity)
else:
return "logged out"
@slicer.route("/visualizer/")
@slicer.route("/visualizer/index.html")
def get_visualizer():
viz = current_app.slicer.visualizer
if viz:
return redirect(viz)
else:
raise PageNotFoundError("Visualizer not configured")
@slicer.after_request
def add_cors_headers(response):
"""Add Cross-origin resource sharing headers."""
origin = current_app.slicer.allow_cors_origin
if origin and len(origin):
if request.method == 'OPTIONS':
response.headers['Access-Control-Allow-Headers'] = 'X-Requested-With'
# OPTIONS preflight requests need to receive origin back instead of wildcard
if origin == '*':
response.headers['Access-Control-Allow-Origin'] = request.headers.get('Origin', origin)
else:
response.headers['Access-Control-Allow-Origin'] = origin
response.headers['Access-Control-Allow-Credentials'] = 'true'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
response.headers['Access-Control-Max-Age'] = CORS_MAX_AGE
return response
| 30.5888 | 107 | 0.61293 |
import json
import sys
import traceback
from collections import OrderedDict
from flask import Blueprint, Response, request, g, current_app, safe_join, make_response
from flask import render_template, redirect
from ..workspace import Workspace, SLICER_INFO_KEYS
from ..query import Cell, cut_from_dict
from ..query import SPLIT_DIMENSION_NAME
from ..errors import *
from ..formatters import JSONLinesGenerator, csv_generator
from .. import ext
from ..logging import get_logger
from .logging import configured_request_log_handlers, RequestLogger
from .logging import AsyncRequestLogger
from .errors import *
from .decorators import *
from .local import *
from .auth import NotAuthenticated
from cubes import __version__
try:
import cubes_search
except ImportError:
cubes_search = None
__all__ = (
"slicer",
"API_VERSION"
)
API_VERSION = 2
CORS_MAX_AGE = 1728000
slicer = Blueprint("slicer", __name__, template_folder="templates")
def _store_option(config, option, default, type_=None, allowed=None,
section="server"):
if config.has_option(section, option):
if type_ == "bool":
value = config.getboolean(section, option)
elif type_ == "int":
value = config.getint(section, option)
else:
value = config.get(section, option)
else:
value = default
if allowed and value not in allowed:
raise ConfigurationError("Invalued value '%s' for option '%s'"
% (value, option))
setattr(current_app.slicer, option, value)
@slicer.record_once
def initialize_slicer(state):
with state.app.app_context():
config = state.options["config"]
logger = get_logger()
params = CustomDict()
current_app.slicer = params
current_app.slicer.config = config
if "cubes_root" in state.options:
_options = {"cubes_root": state.options["cubes_root"]}
else:
_options = {}
if not hasattr(current_app, 'cubes_workspace'):
current_app.cubes_workspace = Workspace(config, **_options)
_store_option(config, "prettyprint", False, "bool")
_store_option(config, "json_record_limit", 1000, "int")
_store_option(config, "hide_private_cuts", False, "bool")
_store_option(config, "allow_cors_origin", None, "str")
_store_option(config, "visualizer", None, "str")
_store_option(config, "authentication", "none")
method = current_app.slicer.authentication
if method is None or method == "none":
current_app.slicer.authenticator = None
else:
if config.has_section("authentication"):
options = dict(config.items("authentication"))
else:
options = {}
current_app.slicer.authenticator = ext.authenticator(method,
**options)
logger.debug("Server authentication method: %s" % (method or "none"))
if not current_app.slicer.authenticator and workspace.authorizer:
logger.warn("No authenticator specified, but workspace seems to "
"be using an authorizer")
# Collect query loggers
handlers = configured_request_log_handlers(config)
if config.has_option('server', 'asynchronous_logging'):
async_logging = config.getboolean("server", "asynchronous_logging")
else:
async_logging = False
if async_logging:
current_app.slicer.request_logger = AsyncRequestLogger(handlers)
else:
current_app.slicer.request_logger = RequestLogger(handlers)
# Before and After
# ================
@slicer.before_request
def process_common_parameters():
# TODO: setup language
# Copy from the application context
g.json_record_limit = current_app.slicer.json_record_limit
if "prettyprint" in request.args:
g.prettyprint = str_to_bool(request.args.get("prettyprint"))
else:
g.prettyprint = current_app.slicer.prettyprint
@slicer.before_request
def prepare_authorization():
if current_app.slicer.authenticator:
try:
identity = current_app.slicer.authenticator.authenticate(request)
except NotAuthenticated as e:
raise NotAuthenticatedError
else:
identity = None
# Authorization
# -------------
g.auth_identity = identity
# Error Handler
# =============
@slicer.errorhandler(UserError)
def user_error_handler(e):
error_type = e.__class__.error_type
error = OrderedDict()
error["error"] = error_type
error["message"] = str(e)
if hasattr(e, "hint") and e.hint:
error["hint"] = e.hint
if hasattr(e, "to_dict"):
error.update(e.to_dict())
code = server_error_codes.get(error_type, 400)
return jsonify(error), code
@slicer.errorhandler(404)
def page_not_found(e):
error = {
"error": "not_found",
"message": "The requested URL was not found on the server.",
"hint": "If you entered the URL manually please check your "
"spelling and try again."
}
return jsonify(error), 404
@slicer.errorhandler(InternalError)
def server_error(e):
(exc_type, exc_value, exc_traceback) = sys.exc_info()
exc_name = exc_type.__name__
logger.error("Internal Cubes error ({}): {}".format(exc_name, exc_value))
tb = traceback.format_exception(exc_type, exc_value,
exc_traceback)
logger.debug("Exception stack trace:\n{}".format("".join(tb)))
error = {
"error": "internal_server_error",
"message": "Internal server error",
"hint": "Server administrators can learn more about the error from "
"the error logs (even more if they have 'debug' level)"
}
return jsonify(error), 500
# Endpoints
# =========
@slicer.route("/")
def show_index():
info = get_info()
has_about = any(key in info for key in SLICER_INFO_KEYS)
return render_template("index.html",
has_about=has_about,
**info)
@slicer.route("/version")
def show_version():
info = {
"version": __version__,
# Backward compatibility key
"server_version": __version__,
"api_version": API_VERSION
}
return jsonify(info)
def get_info():
if workspace.info:
info = OrderedDict(workspace.info)
else:
info = OrderedDict()
info["json_record_limit"] = current_app.slicer.json_record_limit
info["cubes_version"] = __version__
info["timezone"] = workspace.calendar.timezone_name
info["first_weekday"] = workspace.calendar.first_weekday
info["api_version"] = API_VERSION
# authentication
authinfo = {}
authinfo["type"] = (current_app.slicer.authentication or "none")
if g.auth_identity:
authinfo['identity'] = g.auth_identity
if current_app.slicer.authenticator:
ainfo = current_app.slicer.authenticator.info_dict(request)
authinfo.update(ainfo)
info['authentication'] = authinfo
return info
@slicer.route("/info")
def show_info():
return jsonify(get_info())
@slicer.route("/cubes")
def list_cubes():
cube_list = workspace.list_cubes(g.auth_identity)
# TODO: cache per-identity
return jsonify(cube_list)
@slicer.route("/cube/<cube_name>/model")
@requires_cube
def cube_model(cube_name):
if workspace.authorizer:
hier_limits = workspace.authorizer.hierarchy_limits(g.auth_identity,
cube_name)
else:
hier_limits = None
response = g.cube.to_dict(expand_dimensions=True,
with_mappings=False,
full_attribute_names=True,
create_label=True,
hierarchy_limits=hier_limits)
response["features"] = workspace.cube_features(g.cube)
return jsonify(response)
@slicer.route("/cube/<cube_name>/aggregate")
@requires_browser
@log_request("aggregate", "aggregates")
def aggregate(cube_name):
cube = g.cube
output_format = validated_parameter(request.args, "format",
values=["json", "csv", 'xlsx'],
default="json")
header_type = validated_parameter(request.args, "header",
values=["names", "labels", "none"],
default="labels")
fields_str = request.args.get("fields")
if fields_str:
fields = fields_str.lower().split(',')
else:
fields = None
# Aggregates
# ----------
aggregates = []
for agg in request.args.getlist("aggregates") or []:
aggregates += agg.split("|")
drilldown = []
ddlist = request.args.getlist("drilldown")
if ddlist:
for ddstring in ddlist:
drilldown += ddstring.split("|")
prepare_cell("split", "split")
result = g.browser.aggregate(g.cell,
aggregates=aggregates,
drilldown=drilldown,
split=g.split,
page=g.page,
page_size=g.page_size,
order=g.order)
# Hide cuts that were generated internally (default: don't)
if current_app.slicer.hide_private_cuts:
result.cell = result.cell.public_cell()
if output_format == "json":
return jsonify(result)
elif output_format != "csv":
raise RequestError("unknown response format '%s'" % output_format)
if header_type == "names":
header = result.labels
elif header_type == "labels":
header = []
for l in result.labels:
if l == SPLIT_DIMENSION_NAME:
header.append('Matches Filters')
else:
header += [ attr.label or attr.name for attr in cube.get_attributes([l], aggregated=True) ]
else:
header = None
fields = result.labels
generator = csv_generator(result,
fields,
include_header=bool(header),
header=header)
headers = {"Content-Disposition": 'attachment; filename="aggregate.csv"'}
return Response(generator,
mimetype='text/csv',
headers=headers)
@slicer.route("/cube/<cube_name>/facts")
@requires_browser
@log_request("facts", "fields")
def cube_facts(cube_name):
fields_str = request.args.get("fields")
if fields_str:
fields = fields_str.split(',')
else:
fields = None
if fields:
attributes = g.cube.get_attributes(fields)
else:
attributes = g.cube.all_fact_attributes
fields = [attr.ref for attr in attributes]
facts = g.browser.facts(g.cell,
fields=fields,
order=g.order,
page=g.page,
page_size=g.page_size)
fields.insert(0, g.cube.key or "__fact_key__")
labels = [attr.label or attr.name for attr in attributes]
labels.insert(0, g.cube.key or "__fact_key__")
return formatted_response(facts, fields, labels)
@slicer.route("/cube/<cube_name>/fact/<fact_id>")
@requires_browser
def cube_fact(cube_name, fact_id):
fact = g.browser.fact(fact_id)
if fact:
return jsonify(fact)
else:
raise NotFoundError(fact_id, "fact",
message="No fact with id '%s'" % fact_id)
@slicer.route("/cube/<cube_name>/members/<dimension_name>")
@requires_browser
@log_request("members")
def cube_members(cube_name, dimension_name):
depth = request.args.get("depth")
level = request.args.get("level")
if depth and level:
raise RequestError("Both depth and level provided, use only one "
"(preferably level)")
if depth:
try:
depth = int(depth)
except ValueError:
raise RequestError("depth should be an integer")
try:
dimension = g.cube.dimension(dimension_name)
except KeyError:
raise NotFoundError(dimension_name, "dimension",
message="Dimension '%s' was not found" % dimension_name)
hier_name = request.args.get("hierarchy")
hierarchy = dimension.hierarchy(hier_name)
if not depth and not level:
depth = len(hierarchy)
elif level:
depth = hierarchy.level_index(level) + 1
values = g.browser.members(g.cell,
dimension,
depth=depth,
hierarchy=hierarchy,
page=g.page,
page_size=g.page_size)
result = {
"dimension": dimension.name,
"hierarchy": hierarchy.name,
"depth": len(hierarchy) if depth is None else depth,
"data": values
}
attributes = []
for level in hierarchy.levels_for_depth(depth):
attributes += level.attributes
fields = [attr.ref for attr in attributes]
labels = [attr.label or attr.name for attr in attributes]
return formatted_response(result, fields, labels, iterable=values)
@slicer.route("/cube/<cube_name>/cell")
@requires_browser
def cube_cell(cube_name):
details = g.browser.cell_details(g.cell)
if not g.cell:
g.cell = Cell(g.cube)
cell_dict = g.cell.to_dict()
for cut, detail in zip(cell_dict["cuts"], details):
cut["details"] = detail
return jsonify(cell_dict)
@slicer.route("/cube/<cube_name>/report", methods=["GET", "POST"])
@requires_browser
def cube_report(cube_name):
report_request = json.loads(request.data)
try:
queries = report_request["queries"]
except KeyError:
raise RequestError("Report request does not contain 'queries' key")
cell_cuts = report_request.get("cell")
if cell_cuts:
cuts = [cut_from_dict(cut) for cut in cell_cuts]
cell = Cell(g.cube, cuts)
logger.info("using cell from report specification (URL parameters "
"are ignored)")
if workspace.authorizer:
cell = workspace.authorizer.restricted_cell(g.auth_identity,
cube=g.cube,
cell=cell)
else:
if not g.cell:
cell = Cell(g.cube)
else:
cell = g.cell
result = g.browser.report(cell, queries)
return jsonify(result)
@slicer.route("/cube/<cube_name>/search")
def cube_search(cube_name):
config = current_app.config
if config.has_section("search"):
options = dict(config.items("search"))
engine_name = options.pop("engine")
else:
raise ConfigurationError("Search engine is not configured.")
logger.debug("using search engine: %s" % engine_name)
search_engine = cubes_search.create_searcher(engine_name,
browser=g.browser,
locales=g.locales,
**options)
dimension = request.args.get("dimension")
if not dimension:
raise RequestError("No search dimension provided")
query = request.args.get("query")
if not query:
raise RequestError("No search query provided")
locale = g.locale or g.locales[0]
logger.debug("searching for '%s' in %s, locale %s"
% (query, dimension, locale))
search_result = search_engine.search(query, dimension, locale=locale)
result = {
"matches": search_result.dimension_matches(dimension),
"dimension": dimension,
"total_found": search_result.total_found,
"locale": locale
}
if search_result.error:
result["error"] = search_result.error
if search_result.warning:
result["warning"] = search_result.warning
return jsonify(result)
@slicer.route("/logout")
def logout():
if current_app.slicer.authenticator:
return current_app.slicer.authenticator.logout(request, g.auth_identity)
else:
return "logged out"
@slicer.route("/visualizer/")
@slicer.route("/visualizer/index.html")
def get_visualizer():
viz = current_app.slicer.visualizer
if viz:
return redirect(viz)
else:
raise PageNotFoundError("Visualizer not configured")
@slicer.after_request
def add_cors_headers(response):
origin = current_app.slicer.allow_cors_origin
if origin and len(origin):
if request.method == 'OPTIONS':
response.headers['Access-Control-Allow-Headers'] = 'X-Requested-With'
if origin == '*':
response.headers['Access-Control-Allow-Origin'] = request.headers.get('Origin', origin)
else:
response.headers['Access-Control-Allow-Origin'] = origin
response.headers['Access-Control-Allow-Credentials'] = 'true'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
response.headers['Access-Control-Max-Age'] = CORS_MAX_AGE
return response
| true | true |
1c3d314ab5aff7ff9f6fbd398f0e05377b19e6da | 15,105 | py | Python | python/spark_sklearn/tests/test_keyed_models.py | vedantja/spark-sklearn | 349f8485382d76417593b178036de2a9f9dbba63 | [
"Apache-2.0"
] | null | null | null | python/spark_sklearn/tests/test_keyed_models.py | vedantja/spark-sklearn | 349f8485382d76417593b178036de2a9f9dbba63 | [
"Apache-2.0"
] | null | null | null | python/spark_sklearn/tests/test_keyed_models.py | vedantja/spark-sklearn | 349f8485382d76417593b178036de2a9f9dbba63 | [
"Apache-2.0"
] | null | null | null |
from itertools import chain, repeat, cycle
import numpy as np
import pandas as pd
from sklearn.cluster import DBSCAN, KMeans
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
import unittest
from pyspark.sql.types import *
from pyspark.ml.linalg import Vectors
import sklearn.base
from spark_sklearn.keyed_models import KeyedEstimator, KeyedModel, SparkSklearnEstimator
from spark_sklearn.test_utils import fixtureReuseSparkSession, assertPandasAlmostEqual, RandomTest
def _sortByComponentWeight(pca):
zipped = zip(pca.components_, pca.explained_variance_ratio_)
ordered = sorted(zipped, key=lambda x: x[1])
return tuple(np.array(unzipped) for unzipped in zip(*ordered))
def _assertPandasAlmostEqual(actual, expected, sortby):
def convert_estimators(x): # note convertion makes estimators invariant to training order.
if isinstance(x, SparkSklearnEstimator):
x = x.estimator
if isinstance(x, LinearRegression) or isinstance(x, LogisticRegression):
return x.coef_, x.intercept_
if isinstance(x, PCA):
return _sortByComponentWeight(x)
if isinstance(x, KMeans):
return x.cluster_centers_, x.labels_
return x
assertPandasAlmostEqual(actual, expected, convert=convert_estimators, sortby=sortby)
@fixtureReuseSparkSession
class KeyedModelTests(RandomTest):
NDIM = 5
class _CustomClusterer(sklearn.base.BaseEstimator):
def fit(X, y=None):
pass
def transform(X):
return X
def fit_predict(X):
return np.zeros(len(X))
class _CustomTransformer(sklearn.base.BaseEstimator):
def fit(X): # Only 1 argument expected!
pass
def transform(X):
return X
def predict(X): # Dummy predict to throw us off - all sklearn clusterers have fit_predict
return np.zeros(len(X))
class _CustomMissingFit(sklearn.base.BaseEstimator):
def transform(X):
return X
# Makes sure that that the parameter estimator and its generated model are of the given type.
# Simultaneously makes sure that an empty fit() works.
def checkEstimatorType(self, keyedEstimator, expectedType):
self.assertEqual(keyedEstimator.sklearnEstimatorType, expectedType)
schema = StructType().add("features", DoubleType()).add("key", LongType())
yCol = keyedEstimator.getOrDefault("yCol")
if yCol is not None:
schema = schema.add(yCol, DoubleType())
emptyDF = self.spark.createDataFrame([], schema=schema)
keyedModel = keyedEstimator.fit(emptyDF)
self.assertEqual(keyedModel.sklearnEstimatorType, expectedType)
def test_correct_estimator_type(self):
self.checkEstimatorType(KeyedEstimator(sklearnEstimator=PCA()), "transformer")
self.checkEstimatorType(KeyedEstimator(sklearnEstimator=LinearRegression(), yCol="y"),
"predictor")
self.checkEstimatorType(KeyedEstimator(sklearnEstimator=DBSCAN()), "clusterer")
self.checkEstimatorType(KeyedEstimator(sklearnEstimator=KMeans()), "clusterer")
ke = KeyedEstimator(sklearnEstimator=KMeans(), estimatorType="transformer")
self.checkEstimatorType(ke, "transformer")
custom = KeyedModelTests._CustomClusterer()
ke = KeyedEstimator(sklearnEstimator=custom)
self.checkEstimatorType(ke, "clusterer")
ke = KeyedEstimator(sklearnEstimator=custom, estimatorType="transformer")
self.checkEstimatorType(ke, "transformer")
custom = KeyedModelTests._CustomTransformer()
self.checkEstimatorType(KeyedEstimator(sklearnEstimator=custom), "transformer")
def test_invalid_argument(self):
# Need to specify sklearnEstimator
self.assertRaises(ValueError, KeyedEstimator)
# sklearnEstimator must be a sklearn.base.Estimator
create = lambda: KeyedEstimator(sklearnEstimator=5)
self.assertRaises(ValueError, create)
class SomeUDC(object):
pass
create = lambda: KeyedEstimator(sklearnEstimator=SomeUDC())
self.assertRaises(ValueError, create)
# Must have fit()
create = lambda: KeyedEstimator(sklearnEstimator=KeyedModelTests._CustomMissingFit())
self.assertRaises(AttributeError, create)
# Must have key columns
create = lambda: KeyedEstimator(sklearnEstimator=PCA(), keyCols=[])
self.assertRaises(ValueError, create)
# Columns can't have "estimator" name in them
create = lambda: KeyedEstimator(sklearnEstimator=PCA(), keyCols=["key", "estimator"])
self.assertRaises(ValueError, create)
create = lambda: KeyedEstimator(sklearnEstimator=PCA(), xCol="estimator")
self.assertRaises(ValueError, create)
create = lambda: KeyedEstimator(sklearnEstimator=LinearRegression(), yCol="estimator")
self.assertRaises(ValueError, create)
create = lambda: KeyedEstimator(sklearnEstimator=PCA(), yCol="estimator")
self.assertRaises(ValueError, create)
# Presence of yCol requires predictor
create = lambda: KeyedEstimator(sklearnEstimator=LinearRegression(), yCol="y",
estimatorType="transformer")
self.assertRaises(ValueError, create)
create = lambda: KeyedEstimator(sklearnEstimator=LinearRegression(), yCol="y",
estimatorType="clusterer")
self.assertRaises(ValueError, create)
# estimatorType must be one of the three options
create = lambda: KeyedEstimator(sklearnEstimator=PCA(), estimatorType="regressor")
self.assertRaises(ValueError, create)
# Checks that only the model throws an AttributeError at transform time.
def checkPredictionAttrError(self, keyedEstimator):
schema = StructType().add("features", DoubleType()).add("key", LongType())
yCol = keyedEstimator.getOrDefault("yCol")
if yCol is not None:
schema = schema.add(yCol, DoubleType())
emptyDF = self.spark.createDataFrame([], schema=schema)
keyedModel = keyedEstimator.fit(emptyDF)
self.assertRaises(AttributeError, keyedModel.transform, emptyDF)
def test_attr_error(self):
ke = KeyedEstimator(sklearnEstimator=PCA(), estimatorType="clusterer")
self.checkPredictionAttrError(ke)
ke = KeyedEstimator(sklearnEstimator=PCA(), yCol="y", estimatorType="predictor")
self.checkPredictionAttrError(ke)
ke = KeyedEstimator(sklearnEstimator=DBSCAN(), estimatorType="transformer")
self.checkPredictionAttrError(ke)
ke = KeyedEstimator(sklearnEstimator=DBSCAN(), yCol="y", estimatorType="predictor")
self.checkPredictionAttrError(ke)
# LinearRegression() or any other predictor would actually fail at fit-time if we used a
# non-empty DF with the wrong estimatorType since no y value would be passed, so
# scikit-learn would complain.
def test_type_error(self):
df = self.spark.createDataFrame([("a", 0), ("b", 0)]).toDF("features", "key")
keyedPCA = KeyedEstimator(sklearnEstimator=PCA())
self.assertRaises(TypeError, keyedPCA.fit, df)
df = self.spark.createDataFrame([(Vectors.dense([i]), [i], 0) for i in range(10)])
df = df.toDF("features", "y", "key")
keyedLR = KeyedEstimator(sklearnEstimator=LinearRegression(), yCol="y")
self.assertRaises(TypeError, keyedLR.fit, df)
def checkKeyedModelEquivalent(self, minExamples, featureGen, labelGen, **kwargs):
NUSERS = 10
# featureGen() should generate a np rank-1 ndarray of equal length
# labelGen() should generate a scalar
assert (labelGen is not None) == ("yCol" in kwargs)
isPredictor = labelGen is not None
# sklearn's LinearRegression estimator is stable even if undetermined.
# User keys are just [0, NUSERS), repeated for each key if there are multiple columns.
# The i-th user has i examples.
keyCols = kwargs.get("keyCols", KeyedEstimator._paramSpecs["keyCols"]["default"])
outputCol = kwargs.get("outputCol", KeyedEstimator._paramSpecs["outputCol"]["default"])
xCol = kwargs.get("xCol", KeyedEstimator._paramSpecs["xCol"]["default"])
nExamplesPerUser = lambda i: max(minExamples, i + 1)
userKeys = [[i for _ in keyCols] for i in range(NUSERS)]
features = [[featureGen() for _ in range(nExamplesPerUser(i))] for i in range(NUSERS)]
useless = [["useless col" for _ in range(nExamplesPerUser(i))] for i in range(NUSERS)]
if isPredictor:
labels = [[labelGen() for _ in range(nExamplesPerUser(i))] for i in range(NUSERS)]
else:
labels = None
Xs = [np.vstack(x) for x in features]
ys = [np.array(y) for y in labels] if isPredictor else repeat(None)
localEstimators = [sklearn.base.clone(kwargs["sklearnEstimator"]).fit(X, y)
for X, y in zip(Xs, ys)]
expectedDF = pd.DataFrame(userKeys, columns=keyCols)
expectedDF["estimator"] = localEstimators
def flattenAndConvertNumpy(x):
return [Vectors.dense(i) if isinstance(i, np.ndarray) else i
for i in chain.from_iterable(x)]
inputDF = pd.DataFrame.from_dict(
{k: [i for i in range(NUSERS) for _ in range(nExamplesPerUser(i))] for k in keyCols})
inputDF[xCol] = flattenAndConvertNumpy(features)
inputDF["useless"] = flattenAndConvertNumpy(useless)
if labels:
inputDF[kwargs["yCol"]] = flattenAndConvertNumpy(labels)
inputDF = self.spark.createDataFrame(inputDF)
ke = KeyedEstimator(**kwargs)
km = ke.fit(inputDF)
actualDF = km.keyedModels.toPandas()
_assertPandasAlmostEqual(actualDF, expectedDF, keyCols)
# Test users with different amounts of points.
nTestPerUser = lambda i: NUSERS // 4 if i < NUSERS // 2 else NUSERS * 3 // 4
testFeatures = [[featureGen() for _ in range(nTestPerUser(i))] for i in range(NUSERS)]
# "useless" column has nothing to do with computation, but is essential for keeping order
# the same between the spark and non-spark versions
useless = [range(nTestPerUser(i)) for i in range(NUSERS)]
inputDF = pd.DataFrame.from_dict(
{k: [i for i in range(NUSERS) for _ in range(nTestPerUser(i))] for k in keyCols})
inputDF[xCol] = flattenAndConvertNumpy(testFeatures)
inputDF["useless"] = flattenAndConvertNumpy(useless)
estimatorType = km.sklearnEstimatorType # tested to be correct elsewhere
def makeOutput(estimator, X):
if estimatorType == "transformer":
return estimator.transform(X)
else:
assert estimatorType == "predictor" or estimatorType == "clusterer"
return estimator.predict(X).tolist()
Xs = [np.vstack(x) for x in testFeatures]
expectedOutput = map(makeOutput, localEstimators, Xs)
expectedDF = inputDF.copy(deep=True)
expectedDF[outputCol] = flattenAndConvertNumpy(expectedOutput)
inputDF = self.spark.createDataFrame(inputDF)
actualDF = km.transform(inputDF).toPandas()
_assertPandasAlmostEqual(actualDF, expectedDF, keyCols + ["useless"])
def test_transformer(self):
minExamples = 1
featureGen = lambda: np.random.random(KeyedModelTests.NDIM)
labelGen = None
self.checkKeyedModelEquivalent(minExamples, featureGen, labelGen,
sklearnEstimator=PCA())
def test_clusterer(self):
minExamples = 3
featureGen = lambda: np.random.random(KeyedModelTests.NDIM)
labelGen = None
self.checkKeyedModelEquivalent(minExamples, featureGen, labelGen,
sklearnEstimator=KMeans(random_state=0,
n_clusters=minExamples))
def test_regression_predictor(self):
minExamples = 1
featureGen = lambda: np.random.random(KeyedModelTests.NDIM)
labelGen = lambda: np.random.random()
self.checkKeyedModelEquivalent(minExamples, featureGen, labelGen,
sklearnEstimator=LinearRegression(), yCol="y")
def test_classification_predictor(self):
minExamples = 2
featureGen = lambda: np.random.random(KeyedModelTests.NDIM)
# Need to ensure each user has at least one of each label to train on.
cyc = cycle([-1, 1])
labelGen = lambda: next(cyc)
lr = LogisticRegression(random_state=0)
self.checkKeyedModelEquivalent(minExamples, featureGen, labelGen,
sklearnEstimator=lr, yCol="y")
def test_diff_type_input(self):
# Integer array
minExamples = 1
featureGen = lambda: np.random.randint(low=0, high=10, size=KeyedModelTests.NDIM)
labelGen = lambda: np.random.random()
self.checkKeyedModelEquivalent(minExamples, featureGen, labelGen,
sklearnEstimator=LinearRegression(), yCol="y")
# float input
featureGen = lambda: np.random.random()
self.checkKeyedModelEquivalent(minExamples, featureGen, labelGen,
sklearnEstimator=LinearRegression(), yCol="y")
# integer input
featureGen = lambda: np.random.randint(100)
self.checkKeyedModelEquivalent(minExamples, featureGen, labelGen,
sklearnEstimator=LinearRegression(), yCol="y")
def test_no_defaults(self):
minExamples = 1
featureGen = lambda: np.random.random(KeyedModelTests.NDIM)
labelGen = lambda: np.random.random()
self.checkKeyedModelEquivalent(minExamples, featureGen, labelGen,
sklearnEstimator=LinearRegression(), yCol="myy",
xCol="myfeatures", keyCols=["mykey1", "mykey2"])
def test_surprise_key(self):
ke = KeyedEstimator(sklearnEstimator=PCA())
schema = StructType().add("features", LongType()).add("key", LongType())
df = self.spark.createDataFrame([], schema)
km = ke.fit(df)
self.assertEqual(km.keyedModels.collect(), [])
self.assertEqual(km.keyedModels.dtypes,
[("key", LongType().simpleString()),
("estimator", "sklearn-estimator")])
df = self.spark.createDataFrame([(1, 2)], schema)
df = km.transform(df)
self.assertEqual(df.collect(), [(1, 2, None)])
self.assertEqual(df.dtypes,
[("features", "bigint"),
("key", "bigint"),
("output", "vector")])
| 45.911854 | 98 | 0.652698 |
from itertools import chain, repeat, cycle
import numpy as np
import pandas as pd
from sklearn.cluster import DBSCAN, KMeans
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
import unittest
from pyspark.sql.types import *
from pyspark.ml.linalg import Vectors
import sklearn.base
from spark_sklearn.keyed_models import KeyedEstimator, KeyedModel, SparkSklearnEstimator
from spark_sklearn.test_utils import fixtureReuseSparkSession, assertPandasAlmostEqual, RandomTest
def _sortByComponentWeight(pca):
zipped = zip(pca.components_, pca.explained_variance_ratio_)
ordered = sorted(zipped, key=lambda x: x[1])
return tuple(np.array(unzipped) for unzipped in zip(*ordered))
def _assertPandasAlmostEqual(actual, expected, sortby):
def convert_estimators(x):
if isinstance(x, SparkSklearnEstimator):
x = x.estimator
if isinstance(x, LinearRegression) or isinstance(x, LogisticRegression):
return x.coef_, x.intercept_
if isinstance(x, PCA):
return _sortByComponentWeight(x)
if isinstance(x, KMeans):
return x.cluster_centers_, x.labels_
return x
assertPandasAlmostEqual(actual, expected, convert=convert_estimators, sortby=sortby)
@fixtureReuseSparkSession
class KeyedModelTests(RandomTest):
NDIM = 5
class _CustomClusterer(sklearn.base.BaseEstimator):
def fit(X, y=None):
pass
def transform(X):
return X
def fit_predict(X):
return np.zeros(len(X))
class _CustomTransformer(sklearn.base.BaseEstimator):
def fit(X):
pass
def transform(X):
return X
def predict(X):
return np.zeros(len(X))
class _CustomMissingFit(sklearn.base.BaseEstimator):
def transform(X):
return X
def checkEstimatorType(self, keyedEstimator, expectedType):
self.assertEqual(keyedEstimator.sklearnEstimatorType, expectedType)
schema = StructType().add("features", DoubleType()).add("key", LongType())
yCol = keyedEstimator.getOrDefault("yCol")
if yCol is not None:
schema = schema.add(yCol, DoubleType())
emptyDF = self.spark.createDataFrame([], schema=schema)
keyedModel = keyedEstimator.fit(emptyDF)
self.assertEqual(keyedModel.sklearnEstimatorType, expectedType)
def test_correct_estimator_type(self):
self.checkEstimatorType(KeyedEstimator(sklearnEstimator=PCA()), "transformer")
self.checkEstimatorType(KeyedEstimator(sklearnEstimator=LinearRegression(), yCol="y"),
"predictor")
self.checkEstimatorType(KeyedEstimator(sklearnEstimator=DBSCAN()), "clusterer")
self.checkEstimatorType(KeyedEstimator(sklearnEstimator=KMeans()), "clusterer")
ke = KeyedEstimator(sklearnEstimator=KMeans(), estimatorType="transformer")
self.checkEstimatorType(ke, "transformer")
custom = KeyedModelTests._CustomClusterer()
ke = KeyedEstimator(sklearnEstimator=custom)
self.checkEstimatorType(ke, "clusterer")
ke = KeyedEstimator(sklearnEstimator=custom, estimatorType="transformer")
self.checkEstimatorType(ke, "transformer")
custom = KeyedModelTests._CustomTransformer()
self.checkEstimatorType(KeyedEstimator(sklearnEstimator=custom), "transformer")
def test_invalid_argument(self):
self.assertRaises(ValueError, KeyedEstimator)
create = lambda: KeyedEstimator(sklearnEstimator=5)
self.assertRaises(ValueError, create)
class SomeUDC(object):
pass
create = lambda: KeyedEstimator(sklearnEstimator=SomeUDC())
self.assertRaises(ValueError, create)
create = lambda: KeyedEstimator(sklearnEstimator=KeyedModelTests._CustomMissingFit())
self.assertRaises(AttributeError, create)
create = lambda: KeyedEstimator(sklearnEstimator=PCA(), keyCols=[])
self.assertRaises(ValueError, create)
create = lambda: KeyedEstimator(sklearnEstimator=PCA(), keyCols=["key", "estimator"])
self.assertRaises(ValueError, create)
create = lambda: KeyedEstimator(sklearnEstimator=PCA(), xCol="estimator")
self.assertRaises(ValueError, create)
create = lambda: KeyedEstimator(sklearnEstimator=LinearRegression(), yCol="estimator")
self.assertRaises(ValueError, create)
create = lambda: KeyedEstimator(sklearnEstimator=PCA(), yCol="estimator")
self.assertRaises(ValueError, create)
# Presence of yCol requires predictor
create = lambda: KeyedEstimator(sklearnEstimator=LinearRegression(), yCol="y",
estimatorType="transformer")
self.assertRaises(ValueError, create)
create = lambda: KeyedEstimator(sklearnEstimator=LinearRegression(), yCol="y",
estimatorType="clusterer")
self.assertRaises(ValueError, create)
# estimatorType must be one of the three options
create = lambda: KeyedEstimator(sklearnEstimator=PCA(), estimatorType="regressor")
self.assertRaises(ValueError, create)
# Checks that only the model throws an AttributeError at transform time.
def checkPredictionAttrError(self, keyedEstimator):
schema = StructType().add("features", DoubleType()).add("key", LongType())
yCol = keyedEstimator.getOrDefault("yCol")
if yCol is not None:
schema = schema.add(yCol, DoubleType())
emptyDF = self.spark.createDataFrame([], schema=schema)
keyedModel = keyedEstimator.fit(emptyDF)
self.assertRaises(AttributeError, keyedModel.transform, emptyDF)
def test_attr_error(self):
ke = KeyedEstimator(sklearnEstimator=PCA(), estimatorType="clusterer")
self.checkPredictionAttrError(ke)
ke = KeyedEstimator(sklearnEstimator=PCA(), yCol="y", estimatorType="predictor")
self.checkPredictionAttrError(ke)
ke = KeyedEstimator(sklearnEstimator=DBSCAN(), estimatorType="transformer")
self.checkPredictionAttrError(ke)
ke = KeyedEstimator(sklearnEstimator=DBSCAN(), yCol="y", estimatorType="predictor")
self.checkPredictionAttrError(ke)
# LinearRegression() or any other predictor would actually fail at fit-time if we used a
# non-empty DF with the wrong estimatorType since no y value would be passed, so
# scikit-learn would complain.
def test_type_error(self):
df = self.spark.createDataFrame([("a", 0), ("b", 0)]).toDF("features", "key")
keyedPCA = KeyedEstimator(sklearnEstimator=PCA())
self.assertRaises(TypeError, keyedPCA.fit, df)
df = self.spark.createDataFrame([(Vectors.dense([i]), [i], 0) for i in range(10)])
df = df.toDF("features", "y", "key")
keyedLR = KeyedEstimator(sklearnEstimator=LinearRegression(), yCol="y")
self.assertRaises(TypeError, keyedLR.fit, df)
def checkKeyedModelEquivalent(self, minExamples, featureGen, labelGen, **kwargs):
NUSERS = 10
# featureGen() should generate a np rank-1 ndarray of equal length
# labelGen() should generate a scalar
assert (labelGen is not None) == ("yCol" in kwargs)
isPredictor = labelGen is not None
# sklearn's LinearRegression estimator is stable even if undetermined.
keyCols = kwargs.get("keyCols", KeyedEstimator._paramSpecs["keyCols"]["default"])
outputCol = kwargs.get("outputCol", KeyedEstimator._paramSpecs["outputCol"]["default"])
xCol = kwargs.get("xCol", KeyedEstimator._paramSpecs["xCol"]["default"])
nExamplesPerUser = lambda i: max(minExamples, i + 1)
userKeys = [[i for _ in keyCols] for i in range(NUSERS)]
features = [[featureGen() for _ in range(nExamplesPerUser(i))] for i in range(NUSERS)]
useless = [["useless col" for _ in range(nExamplesPerUser(i))] for i in range(NUSERS)]
if isPredictor:
labels = [[labelGen() for _ in range(nExamplesPerUser(i))] for i in range(NUSERS)]
else:
labels = None
Xs = [np.vstack(x) for x in features]
ys = [np.array(y) for y in labels] if isPredictor else repeat(None)
localEstimators = [sklearn.base.clone(kwargs["sklearnEstimator"]).fit(X, y)
for X, y in zip(Xs, ys)]
expectedDF = pd.DataFrame(userKeys, columns=keyCols)
expectedDF["estimator"] = localEstimators
def flattenAndConvertNumpy(x):
return [Vectors.dense(i) if isinstance(i, np.ndarray) else i
for i in chain.from_iterable(x)]
inputDF = pd.DataFrame.from_dict(
{k: [i for i in range(NUSERS) for _ in range(nExamplesPerUser(i))] for k in keyCols})
inputDF[xCol] = flattenAndConvertNumpy(features)
inputDF["useless"] = flattenAndConvertNumpy(useless)
if labels:
inputDF[kwargs["yCol"]] = flattenAndConvertNumpy(labels)
inputDF = self.spark.createDataFrame(inputDF)
ke = KeyedEstimator(**kwargs)
km = ke.fit(inputDF)
actualDF = km.keyedModels.toPandas()
_assertPandasAlmostEqual(actualDF, expectedDF, keyCols)
nTestPerUser = lambda i: NUSERS // 4 if i < NUSERS // 2 else NUSERS * 3 // 4
testFeatures = [[featureGen() for _ in range(nTestPerUser(i))] for i in range(NUSERS)]
useless = [range(nTestPerUser(i)) for i in range(NUSERS)]
inputDF = pd.DataFrame.from_dict(
{k: [i for i in range(NUSERS) for _ in range(nTestPerUser(i))] for k in keyCols})
inputDF[xCol] = flattenAndConvertNumpy(testFeatures)
inputDF["useless"] = flattenAndConvertNumpy(useless)
estimatorType = km.sklearnEstimatorType
def makeOutput(estimator, X):
if estimatorType == "transformer":
return estimator.transform(X)
else:
assert estimatorType == "predictor" or estimatorType == "clusterer"
return estimator.predict(X).tolist()
Xs = [np.vstack(x) for x in testFeatures]
expectedOutput = map(makeOutput, localEstimators, Xs)
expectedDF = inputDF.copy(deep=True)
expectedDF[outputCol] = flattenAndConvertNumpy(expectedOutput)
inputDF = self.spark.createDataFrame(inputDF)
actualDF = km.transform(inputDF).toPandas()
_assertPandasAlmostEqual(actualDF, expectedDF, keyCols + ["useless"])
def test_transformer(self):
minExamples = 1
featureGen = lambda: np.random.random(KeyedModelTests.NDIM)
labelGen = None
self.checkKeyedModelEquivalent(minExamples, featureGen, labelGen,
sklearnEstimator=PCA())
def test_clusterer(self):
minExamples = 3
featureGen = lambda: np.random.random(KeyedModelTests.NDIM)
labelGen = None
self.checkKeyedModelEquivalent(minExamples, featureGen, labelGen,
sklearnEstimator=KMeans(random_state=0,
n_clusters=minExamples))
def test_regression_predictor(self):
minExamples = 1
featureGen = lambda: np.random.random(KeyedModelTests.NDIM)
labelGen = lambda: np.random.random()
self.checkKeyedModelEquivalent(minExamples, featureGen, labelGen,
sklearnEstimator=LinearRegression(), yCol="y")
def test_classification_predictor(self):
minExamples = 2
featureGen = lambda: np.random.random(KeyedModelTests.NDIM)
cyc = cycle([-1, 1])
labelGen = lambda: next(cyc)
lr = LogisticRegression(random_state=0)
self.checkKeyedModelEquivalent(minExamples, featureGen, labelGen,
sklearnEstimator=lr, yCol="y")
def test_diff_type_input(self):
minExamples = 1
featureGen = lambda: np.random.randint(low=0, high=10, size=KeyedModelTests.NDIM)
labelGen = lambda: np.random.random()
self.checkKeyedModelEquivalent(minExamples, featureGen, labelGen,
sklearnEstimator=LinearRegression(), yCol="y")
featureGen = lambda: np.random.random()
self.checkKeyedModelEquivalent(minExamples, featureGen, labelGen,
sklearnEstimator=LinearRegression(), yCol="y")
featureGen = lambda: np.random.randint(100)
self.checkKeyedModelEquivalent(minExamples, featureGen, labelGen,
sklearnEstimator=LinearRegression(), yCol="y")
def test_no_defaults(self):
minExamples = 1
featureGen = lambda: np.random.random(KeyedModelTests.NDIM)
labelGen = lambda: np.random.random()
self.checkKeyedModelEquivalent(minExamples, featureGen, labelGen,
sklearnEstimator=LinearRegression(), yCol="myy",
xCol="myfeatures", keyCols=["mykey1", "mykey2"])
def test_surprise_key(self):
ke = KeyedEstimator(sklearnEstimator=PCA())
schema = StructType().add("features", LongType()).add("key", LongType())
df = self.spark.createDataFrame([], schema)
km = ke.fit(df)
self.assertEqual(km.keyedModels.collect(), [])
self.assertEqual(km.keyedModels.dtypes,
[("key", LongType().simpleString()),
("estimator", "sklearn-estimator")])
df = self.spark.createDataFrame([(1, 2)], schema)
df = km.transform(df)
self.assertEqual(df.collect(), [(1, 2, None)])
self.assertEqual(df.dtypes,
[("features", "bigint"),
("key", "bigint"),
("output", "vector")])
| true | true |
1c3d3203b79ea93afcb26f6828b432634958b1ba | 1,049 | py | Python | xair/envs/lvaircraft_random_pitch.py | xikasan/xair | c10f7a5d6a279eb5d2498b2f2df489ccc85ee36c | [
"MIT"
] | null | null | null | xair/envs/lvaircraft_random_pitch.py | xikasan/xair | c10f7a5d6a279eb5d2498b2f2df489ccc85ee36c | [
"MIT"
] | null | null | null | xair/envs/lvaircraft_random_pitch.py | xikasan/xair | c10f7a5d6a279eb5d2498b2f2df489ccc85ee36c | [
"MIT"
] | null | null | null | # coding: utf-8
import gym
import xsim
import numpy as np
import xtools as xt
from .base import BaseEnv
from ..models.lvaircraft import LVAircraft
from .lvaircraft_pitch import LVAircraftPitchV3
class LVAircraftPitchV4(LVAircraftPitchV3):
IX_T = 0
IX_q = 1
IX_r = 2
IX_dt = 0
IX_de = 1
def __init__(
self,
dt=1/100,
target_range=xt.d2r([-10, 10]),
target_period=10.0,
fail_mode="nomal",
fail_range=[0.2, 0.7],
dtype=np.float32,
name="LVAircraftRandomPitchV0"
):
super().__init__(
dt,
target_range=target_range,
target_period=target_period,
fail_mode=fail_mode,
fail_range=fail_range,
dtype=dtype,
name=name
)
target_width = (np.max(target_range) - np.min(target_range)) / 2
self._ref = xsim.PoissonRectangularCommand(
max_amplitude=target_width,
interval=target_period
)
| 23.311111 | 72 | 0.575786 |
import gym
import xsim
import numpy as np
import xtools as xt
from .base import BaseEnv
from ..models.lvaircraft import LVAircraft
from .lvaircraft_pitch import LVAircraftPitchV3
class LVAircraftPitchV4(LVAircraftPitchV3):
IX_T = 0
IX_q = 1
IX_r = 2
IX_dt = 0
IX_de = 1
def __init__(
self,
dt=1/100,
target_range=xt.d2r([-10, 10]),
target_period=10.0,
fail_mode="nomal",
fail_range=[0.2, 0.7],
dtype=np.float32,
name="LVAircraftRandomPitchV0"
):
super().__init__(
dt,
target_range=target_range,
target_period=target_period,
fail_mode=fail_mode,
fail_range=fail_range,
dtype=dtype,
name=name
)
target_width = (np.max(target_range) - np.min(target_range)) / 2
self._ref = xsim.PoissonRectangularCommand(
max_amplitude=target_width,
interval=target_period
)
| true | true |
1c3d322ee39108612c2cd9a3002a5d3cce01f18f | 9,895 | py | Python | micronet/compression/pruning/main.py | hyterazzb/micronet | 4a90918598b9f7601a9051720f8dcb3303c72c96 | [
"MIT"
] | 1 | 2021-05-12T15:38:02.000Z | 2021-05-12T15:38:02.000Z | micronet/compression/pruning/main.py | jay757425789/micronet | 351d184527e9867e0394878cf91b64ffd5c6b109 | [
"MIT"
] | null | null | null | micronet/compression/pruning/main.py | jay757425789/micronet | 351d184527e9867e0394878cf91b64ffd5c6b109 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append("../..")
import math
import os
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
from torch.nn import init
from models import nin_gc, nin
def setup_seed(seed):
torch.manual_seed(seed)
# torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
def save_state(model, best_acc):
print('==> Saving model ...')
state = {
'best_acc': best_acc,
'state_dict': model.state_dict(),
}
state_copy = state['state_dict'].copy()
for key in state_copy.keys():
if 'module' in key:
state['state_dict'][key.replace('module.', '')] = \
state['state_dict'].pop(key)
if args.model_type == 0:
if args.sr:
torch.save(state, 'models_save/nin_sparse.pth')
elif args.prune_refine:
torch.save({'cfg': cfg, 'best_acc': best_acc,
'state_dict': state['state_dict']}, 'models_save/nin_finetune.pth')
else:
torch.save(state, 'models_save/nin.pth')
else:
if args.sr:
torch.save(state, 'models_save/nin_gc_sparse.pth')
elif args.gc_prune_refine:
torch.save({'cfg': cfg, 'best_acc': best_acc,
'state_dict': state['state_dict']}, 'models_save/nin_gc_retrain.pth')
else:
torch.save(state, 'models_save/nin_gc.pth')
# ***********************稀疏训练(对BN层γ进行约束)**************************
def updateBN():
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
if hasattr(m.weight, 'data'):
m.weight.grad.data.add_(args.s * torch.sign(m.weight.data)) # L1正则
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(trainloader):
if not args.cpu:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
# ***********************稀疏训练(对BN层γ进行约束)**************************
if args.sr:
updateBN()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLR: {}'.format(
epoch, batch_idx * len(data), len(trainloader.dataset),
100. * batch_idx / len(trainloader), loss.data.item(),
optimizer.param_groups[0]['lr']))
return
def test():
global best_acc
model.eval()
test_loss = 0
correct = 0
for data, target in testloader:
if not args.cpu:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
test_loss += criterion(output, target).data.item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
acc = 100. * float(correct) / len(testloader.dataset)
if acc > best_acc:
best_acc = acc
save_state(model, best_acc)
average_test_loss = test_loss / (len(testloader.dataset) / args.eval_batch_size)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
average_test_loss, correct, len(testloader.dataset),
100. * float(correct) / len(testloader.dataset)))
print('Best Accuracy: {:.2f}%\n'.format(best_acc))
return
def adjust_learning_rate(optimizer, epoch):
update_list = [80, 130, 180, 230, 280]
if epoch in update_list:
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * 0.1
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', action='store_true',
help='set if only CPU is available')
parser.add_argument('--gpu_id', action='store', default='',
help='gpu_id')
parser.add_argument('--data', action='store', default='../../data',
help='dataset path')
parser.add_argument('--lr', action='store', default=0.01,
help='the intial learning rate')
parser.add_argument('--wd', action='store', default=1e-7,
help='nin_gc:0, nin:1e-5')
# prune_refine
parser.add_argument('--prune_refine', default='', type=str, metavar='PATH',
help='the path to the prune_refine model')
# refine
parser.add_argument('--refine', default='', type=str, metavar='PATH',
help='the path to the float_refine model')
# resume
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='the path to the resume model')
# gc_prune_refine的cfg
parser.add_argument('--gc_prune_refine', nargs='+', type=int,
help='gc_prune_refine-cfg')
parser.add_argument('--train_batch_size', type=int, default=128)
parser.add_argument('--eval_batch_size', type=int, default=256)
parser.add_argument('--num_workers', type=int, default=2)
parser.add_argument('--epochs', type=int, default=300, metavar='N',
help='number of epochs to train')
# sr(稀疏标志)
parser.add_argument('--sparsity-regularization', '-sr', dest='sr', action='store_true',
help='train with channel sparsity regularization')
# s(稀疏率)
parser.add_argument('--s', type=float, default=0.0001,
help='nin:0.0001, nin_gc:0.001')
parser.add_argument('--model_type', type=int, default=1,
help='model type:0-nin,1-nin_gc')
args = parser.parse_args()
print('==> Options:', args)
if args.gpu_id:
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
setup_seed(1)
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
trainset = torchvision.datasets.CIFAR10(root=args.data, train=True, download=True,
transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.train_batch_size,
shuffle=True, num_workers=args.num_workers)
testset = torchvision.datasets.CIFAR10(root=args.data, train=False, download=True,
transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.eval_batch_size,
shuffle=False, num_workers=args.num_workers)
classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
if args.prune_refine:
print('******Prune Refine model******')
#checkpoint = torch.load('models_save/nin_prune.pth')
checkpoint = torch.load(args.prune_refine)
cfg = checkpoint['cfg']
model = nin.Net(cfg=checkpoint['cfg'])
model.load_state_dict(checkpoint['state_dict'])
best_acc = 0
elif args.refine:
print('******Float Refine model******')
#checkpoint = torch.load('models_save/nin.pth')
state_dict = torch.load(args.refine)
if args.model_type == 0:
model = nin.Net()
else:
model = nin_gc.Net()
model.load_state_dict(state_dict)
best_acc = 0
elif args.resume:
print('******Reume model******')
#checkpoint = torch.load('models_save/nin.pth')
#checkpoint = torch.load('models_save/nin_sparse.pth')
checkpoint = torch.load(args.resume)
if args.model_type == 0:
model = nin.Net()
else:
model = nin_gc.Net()
model.load_state_dict(checkpoint['state_dict'])
best_acc = checkpoint['best_acc']
else:
# nin_gc_retrain
if args.gc_prune_refine:
print('******GCPrune Refine model******')
cfg = args.gc_prune_refine
model = nin_gc.Net(cfg=cfg)
else:
print('******Initializing model******')
if args.model_type == 0:
model = nin.Net()
else:
model = nin_gc.Net()
best_acc = 0
for m in model.modules():
if isinstance(m, nn.Conv2d):
init.xavier_uniform_(m.weight)
if m.bias is not None:
init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
init.zeros_(m.bias)
if not args.cpu:
model.cuda()
model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
print(model)
base_lr = float(args.lr)
param_dict = dict(model.named_parameters())
params = []
for key, value in param_dict.items():
params += [{'params': [value], 'lr': base_lr, 'weight_decay':args.wd}]
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(params, lr=base_lr, weight_decay=args.wd)
for epoch in range(1, args.epochs):
adjust_learning_rate(optimizer, epoch)
train(epoch)
test()
| 36.784387 | 93 | 0.57999 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append("../..")
import math
import os
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
from torch.nn import init
from models import nin_gc, nin
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
def save_state(model, best_acc):
print('==> Saving model ...')
state = {
'best_acc': best_acc,
'state_dict': model.state_dict(),
}
state_copy = state['state_dict'].copy()
for key in state_copy.keys():
if 'module' in key:
state['state_dict'][key.replace('module.', '')] = \
state['state_dict'].pop(key)
if args.model_type == 0:
if args.sr:
torch.save(state, 'models_save/nin_sparse.pth')
elif args.prune_refine:
torch.save({'cfg': cfg, 'best_acc': best_acc,
'state_dict': state['state_dict']}, 'models_save/nin_finetune.pth')
else:
torch.save(state, 'models_save/nin.pth')
else:
if args.sr:
torch.save(state, 'models_save/nin_gc_sparse.pth')
elif args.gc_prune_refine:
torch.save({'cfg': cfg, 'best_acc': best_acc,
'state_dict': state['state_dict']}, 'models_save/nin_gc_retrain.pth')
else:
torch.save(state, 'models_save/nin_gc.pth')
def updateBN():
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
if hasattr(m.weight, 'data'):
m.weight.grad.data.add_(args.s * torch.sign(m.weight.data))
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(trainloader):
if not args.cpu:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
if args.sr:
updateBN()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLR: {}'.format(
epoch, batch_idx * len(data), len(trainloader.dataset),
100. * batch_idx / len(trainloader), loss.data.item(),
optimizer.param_groups[0]['lr']))
return
def test():
global best_acc
model.eval()
test_loss = 0
correct = 0
for data, target in testloader:
if not args.cpu:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
test_loss += criterion(output, target).data.item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
acc = 100. * float(correct) / len(testloader.dataset)
if acc > best_acc:
best_acc = acc
save_state(model, best_acc)
average_test_loss = test_loss / (len(testloader.dataset) / args.eval_batch_size)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
average_test_loss, correct, len(testloader.dataset),
100. * float(correct) / len(testloader.dataset)))
print('Best Accuracy: {:.2f}%\n'.format(best_acc))
return
def adjust_learning_rate(optimizer, epoch):
update_list = [80, 130, 180, 230, 280]
if epoch in update_list:
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * 0.1
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', action='store_true',
help='set if only CPU is available')
parser.add_argument('--gpu_id', action='store', default='',
help='gpu_id')
parser.add_argument('--data', action='store', default='../../data',
help='dataset path')
parser.add_argument('--lr', action='store', default=0.01,
help='the intial learning rate')
parser.add_argument('--wd', action='store', default=1e-7,
help='nin_gc:0, nin:1e-5')
parser.add_argument('--prune_refine', default='', type=str, metavar='PATH',
help='the path to the prune_refine model')
parser.add_argument('--refine', default='', type=str, metavar='PATH',
help='the path to the float_refine model')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='the path to the resume model')
parser.add_argument('--gc_prune_refine', nargs='+', type=int,
help='gc_prune_refine-cfg')
parser.add_argument('--train_batch_size', type=int, default=128)
parser.add_argument('--eval_batch_size', type=int, default=256)
parser.add_argument('--num_workers', type=int, default=2)
parser.add_argument('--epochs', type=int, default=300, metavar='N',
help='number of epochs to train')
parser.add_argument('--sparsity-regularization', '-sr', dest='sr', action='store_true',
help='train with channel sparsity regularization')
parser.add_argument('--s', type=float, default=0.0001,
help='nin:0.0001, nin_gc:0.001')
parser.add_argument('--model_type', type=int, default=1,
help='model type:0-nin,1-nin_gc')
args = parser.parse_args()
print('==> Options:', args)
if args.gpu_id:
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
setup_seed(1)
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
trainset = torchvision.datasets.CIFAR10(root=args.data, train=True, download=True,
transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.train_batch_size,
shuffle=True, num_workers=args.num_workers)
testset = torchvision.datasets.CIFAR10(root=args.data, train=False, download=True,
transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.eval_batch_size,
shuffle=False, num_workers=args.num_workers)
classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
if args.prune_refine:
print('******Prune Refine model******')
checkpoint = torch.load(args.prune_refine)
cfg = checkpoint['cfg']
model = nin.Net(cfg=checkpoint['cfg'])
model.load_state_dict(checkpoint['state_dict'])
best_acc = 0
elif args.refine:
print('******Float Refine model******')
state_dict = torch.load(args.refine)
if args.model_type == 0:
model = nin.Net()
else:
model = nin_gc.Net()
model.load_state_dict(state_dict)
best_acc = 0
elif args.resume:
print('******Reume model******')
checkpoint = torch.load(args.resume)
if args.model_type == 0:
model = nin.Net()
else:
model = nin_gc.Net()
model.load_state_dict(checkpoint['state_dict'])
best_acc = checkpoint['best_acc']
else:
if args.gc_prune_refine:
print('******GCPrune Refine model******')
cfg = args.gc_prune_refine
model = nin_gc.Net(cfg=cfg)
else:
print('******Initializing model******')
if args.model_type == 0:
model = nin.Net()
else:
model = nin_gc.Net()
best_acc = 0
for m in model.modules():
if isinstance(m, nn.Conv2d):
init.xavier_uniform_(m.weight)
if m.bias is not None:
init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
init.zeros_(m.bias)
if not args.cpu:
model.cuda()
model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
print(model)
base_lr = float(args.lr)
param_dict = dict(model.named_parameters())
params = []
for key, value in param_dict.items():
params += [{'params': [value], 'lr': base_lr, 'weight_decay':args.wd}]
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(params, lr=base_lr, weight_decay=args.wd)
for epoch in range(1, args.epochs):
adjust_learning_rate(optimizer, epoch)
train(epoch)
test()
| true | true |
1c3d324f2425e174e7561a331a6d2849d512e0d5 | 2,805 | py | Python | tests/test_score_board.py | atw1020/sente | ebc6150124ad0bd82b0f5c414cbedb242e3c3a37 | [
"MIT"
] | 3 | 2021-07-30T02:17:52.000Z | 2021-12-11T15:57:04.000Z | tests/test_score_board.py | atw1020/sente | ebc6150124ad0bd82b0f5c414cbedb242e3c3a37 | [
"MIT"
] | null | null | null | tests/test_score_board.py | atw1020/sente | ebc6150124ad0bd82b0f5c414cbedb242e3c3a37 | [
"MIT"
] | null | null | null | """
Author: Arthur Wesley
"""
from unittest import TestCase
import sente
class TestNoDeadStones(TestCase):
def play_simple_game(self, game):
"""
plays out a simple game in a cordner
:return:
"""
game.play(3, 1)
game.play(1, 3)
game.play(3, 2)
game.play(2, 3)
game.play(4, 3)
game.play(3, 4)
game.play(5, 3)
game.play(3, 5)
game.play(6, 2)
game.play(2, 6)
game.play(6, 1)
game.play(1, 6)
def play_capture_stones_game(self, game):
"""
plays out a game in which both player capture some stones
:param game:
:return:
"""
game.play(1, 1)
game.play(19, 19)
game.play(18, 19)
game.play(2, 1)
game.play(19, 18)
game.play(1, 2)
def end_game(self, game):
"""
:param game:
:return:
"""
game.pss()
game.pss()
def test_simple_chinese(self):
"""
:return:
"""
game = sente.Game(19, sente.CHINESE)
self.play_simple_game(game)
self.end_game(game)
result = game.score()
self.assertEqual(sente.stone.WHITE, game.get_winner())
self.assertEqual(10, result.get_black_points())
self.assertEqual(17.5, result.get_white_points())
def test_simple_japanese(self):
"""
:return:
"""
game = sente.Game(19, sente.JAPANESE)
self.play_simple_game(game)
self.end_game(game)
result = game.score()
self.assertEqual(sente.stone.WHITE, game.get_winner())
self.assertEqual(4, result.get_black_points())
self.assertEqual(10.5, result.get_white_points())
def test_count_captured_stones_chinese(self):
"""
tests to see if captured stones are accurately recorded with chinese rules
:return:
"""
game = sente.Game(19, sente.CHINESE)
self.play_capture_stones_game(game)
self.end_game(game)
result = game.score()
self.assertEqual(sente.stone.WHITE, game.get_winner())
self.assertEqual(3, result.get_black_points())
self.assertEqual(10.5, result.get_white_points())
def test_count_captured_stones_japanese(self):
"""
tests to see if captured stones are accurately recorded with japanese rules
:return:
"""
game = sente.Game(19, sente.JAPANESE)
self.play_capture_stones_game(game)
self.end_game(game)
result = game.score()
self.assertEqual(sente.stone.WHITE, game.get_winner())
self.assertEqual(0, result.get_black_points())
self.assertEqual(6.5, result.get_white_points())
| 20.035714 | 83 | 0.569697 |
from unittest import TestCase
import sente
class TestNoDeadStones(TestCase):
def play_simple_game(self, game):
game.play(3, 1)
game.play(1, 3)
game.play(3, 2)
game.play(2, 3)
game.play(4, 3)
game.play(3, 4)
game.play(5, 3)
game.play(3, 5)
game.play(6, 2)
game.play(2, 6)
game.play(6, 1)
game.play(1, 6)
def play_capture_stones_game(self, game):
game.play(1, 1)
game.play(19, 19)
game.play(18, 19)
game.play(2, 1)
game.play(19, 18)
game.play(1, 2)
def end_game(self, game):
game.pss()
game.pss()
def test_simple_chinese(self):
game = sente.Game(19, sente.CHINESE)
self.play_simple_game(game)
self.end_game(game)
result = game.score()
self.assertEqual(sente.stone.WHITE, game.get_winner())
self.assertEqual(10, result.get_black_points())
self.assertEqual(17.5, result.get_white_points())
def test_simple_japanese(self):
game = sente.Game(19, sente.JAPANESE)
self.play_simple_game(game)
self.end_game(game)
result = game.score()
self.assertEqual(sente.stone.WHITE, game.get_winner())
self.assertEqual(4, result.get_black_points())
self.assertEqual(10.5, result.get_white_points())
def test_count_captured_stones_chinese(self):
game = sente.Game(19, sente.CHINESE)
self.play_capture_stones_game(game)
self.end_game(game)
result = game.score()
self.assertEqual(sente.stone.WHITE, game.get_winner())
self.assertEqual(3, result.get_black_points())
self.assertEqual(10.5, result.get_white_points())
def test_count_captured_stones_japanese(self):
game = sente.Game(19, sente.JAPANESE)
self.play_capture_stones_game(game)
self.end_game(game)
result = game.score()
self.assertEqual(sente.stone.WHITE, game.get_winner())
self.assertEqual(0, result.get_black_points())
self.assertEqual(6.5, result.get_white_points())
| true | true |
1c3d335551dcafaf023a38dadc62bacadb07d10b | 2,637 | py | Python | sdk/python/pulumi_azure_nextgen/network/v20191201/get_active_sessions.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/network/v20191201/get_active_sessions.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/network/v20191201/get_active_sessions.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetActiveSessionsResult',
'AwaitableGetActiveSessionsResult',
'get_active_sessions',
]
@pulumi.output_type
class GetActiveSessionsResult:
"""
Response for GetActiveSessions.
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> Optional[str]:
"""
The URL to get the next set of results.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.BastionActiveSessionResponseResult']]:
"""
List of active sessions on the bastion.
"""
return pulumi.get(self, "value")
class AwaitableGetActiveSessionsResult(GetActiveSessionsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetActiveSessionsResult(
next_link=self.next_link,
value=self.value)
def get_active_sessions(bastion_host_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetActiveSessionsResult:
"""
Response for GetActiveSessions.
:param str bastion_host_name: The name of the Bastion Host.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['bastionHostName'] = bastion_host_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20191201:getActiveSessions', __args__, opts=opts, typ=GetActiveSessionsResult).value
return AwaitableGetActiveSessionsResult(
next_link=__ret__.next_link,
value=__ret__.value)
| 32.9625 | 144 | 0.675768 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetActiveSessionsResult',
'AwaitableGetActiveSessionsResult',
'get_active_sessions',
]
@pulumi.output_type
class GetActiveSessionsResult:
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> Optional[str]:
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.BastionActiveSessionResponseResult']]:
return pulumi.get(self, "value")
class AwaitableGetActiveSessionsResult(GetActiveSessionsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetActiveSessionsResult(
next_link=self.next_link,
value=self.value)
def get_active_sessions(bastion_host_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetActiveSessionsResult:
__args__ = dict()
__args__['bastionHostName'] = bastion_host_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20191201:getActiveSessions', __args__, opts=opts, typ=GetActiveSessionsResult).value
return AwaitableGetActiveSessionsResult(
next_link=__ret__.next_link,
value=__ret__.value)
| true | true |
1c3d35000a369e06b58598e025d5b08b703b8893 | 2,217 | py | Python | backend/right_one_33576/urls.py | crowdbotics-apps/right-one-33576 | 7303fd852ac6ca7db7652e56c67e2dd13a9d931e | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/right_one_33576/urls.py | crowdbotics-apps/right-one-33576 | 7303fd852ac6ca7db7652e56c67e2dd13a9d931e | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/right_one_33576/urls.py | crowdbotics-apps/right-one-33576 | 7303fd852ac6ca7db7652e56c67e2dd13a9d931e | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """right_one_33576 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "right one"
admin.site.site_title = "right one Admin Portal"
admin.site.index_title = "right one Admin"
# swagger
api_info = openapi.Info(
title="right one API",
default_version="v1",
description="API documentation for right one App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| 35.190476 | 87 | 0.710871 |
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "right one"
admin.site.site_title = "right one Admin Portal"
admin.site.index_title = "right one Admin"
api_info = openapi.Info(
title="right one API",
default_version="v1",
description="API documentation for right one App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| true | true |
1c3d353a1df81ab9cd2b5335ad4a5360c1b1542b | 1,867 | py | Python | 2021/day_04/day_04.py | stbisplinghoff/adventofcode | d97270987f890eabd8bc3ae90e11cbf87bc43715 | [
"CC0-1.0"
] | null | null | null | 2021/day_04/day_04.py | stbisplinghoff/adventofcode | d97270987f890eabd8bc3ae90e11cbf87bc43715 | [
"CC0-1.0"
] | null | null | null | 2021/day_04/day_04.py | stbisplinghoff/adventofcode | d97270987f890eabd8bc3ae90e11cbf87bc43715 | [
"CC0-1.0"
] | null | null | null | # Solution to Advent of Code 2021 day 4
# Read input
with open("input.txt") as inFile:
data = inFile.read().split("\n\n")
numbers = [int(value) for value in data[0].split(",")]
fields = [[[int(element) for element in column.replace(" ", " ").split(" ")[-5:]] for column in rows.split("\n")] for
rows in data[1:]]
def checkRowBingo(field):
result = False
for row in field:
if sum(row) == 0:
result = True
return result
def checkColumnBingo(field):
result = False
for column in range(5):
column = [field[row][column] for row in range(5)]
if sum(column) == 0:
result = True
return result
def checkNumber(field, number):
bingo = False
for row in field:
if number in row:
row[row.index(number)] = 0
bingo = checkRowBingo(field) | checkColumnBingo(field)
return bingo
## Part 1
bingoFlag = False
for round in range(len(numbers)):
number = numbers[round]
for field in fields:
if checkNumber(field, number):
print("Bingo!", field)
print("Sum of remaining fields:", sum([sum(row) for row in field]))
print("Solution for part 1: ", sum([sum(row) for row in field]) * number)
bingoFlag = True
break
if bingoFlag:
break
## Part 2
for round in range(len(numbers)):
winningFields = []
number = numbers[round]
for fieldIdx in range(len(fields)):
field = fields[fieldIdx]
if checkNumber(field, number):
winningFields.append(field)
if len(winningFields):
for field in winningFields:
fields.remove(field)
if len(fields) == 0:
print("Last winning field:", winningFields[-1])
print("Solution for part 2: ", sum([sum(row) for row in winningFields[-1]]) * number)
| 28.723077 | 118 | 0.587038 |
with open("input.txt") as inFile:
data = inFile.read().split("\n\n")
numbers = [int(value) for value in data[0].split(",")]
fields = [[[int(element) for element in column.replace(" ", " ").split(" ")[-5:]] for column in rows.split("\n")] for
rows in data[1:]]
def checkRowBingo(field):
result = False
for row in field:
if sum(row) == 0:
result = True
return result
def checkColumnBingo(field):
result = False
for column in range(5):
column = [field[row][column] for row in range(5)]
if sum(column) == 0:
result = True
return result
def checkNumber(field, number):
bingo = False
for row in field:
if number in row:
row[row.index(number)] = 0
bingo = checkRowBingo(field) | checkColumnBingo(field)
return bingo
ag = False
for round in range(len(numbers)):
number = numbers[round]
for field in fields:
if checkNumber(field, number):
print("Bingo!", field)
print("Sum of remaining fields:", sum([sum(row) for row in field]))
print("Solution for part 1: ", sum([sum(row) for row in field]) * number)
bingoFlag = True
break
if bingoFlag:
break
nd in range(len(numbers)):
winningFields = []
number = numbers[round]
for fieldIdx in range(len(fields)):
field = fields[fieldIdx]
if checkNumber(field, number):
winningFields.append(field)
if len(winningFields):
for field in winningFields:
fields.remove(field)
if len(fields) == 0:
print("Last winning field:", winningFields[-1])
print("Solution for part 2: ", sum([sum(row) for row in winningFields[-1]]) * number)
| true | true |
1c3d356e4c9f0e37834a40bc83b202f056f4ab76 | 3,814 | py | Python | examples/ApseLock/makeplot.py | alex-w/vplanet | e901ac08208f7fd5edb30677f32f36619eb8ca8c | [
"MIT"
] | null | null | null | examples/ApseLock/makeplot.py | alex-w/vplanet | e901ac08208f7fd5edb30677f32f36619eb8ca8c | [
"MIT"
] | null | null | null | examples/ApseLock/makeplot.py | alex-w/vplanet | e901ac08208f7fd5edb30677f32f36619eb8ca8c | [
"MIT"
] | null | null | null | """
This script produces a reproduction of Figures 2 and 3 from Rodriguez+2011, the
dynamics of CoRoT-7b and c, using VPLANET's STELLAR, EQTIDE, and DISTORB modules.
David P. Fleming, University of Washington, 2018
"""
from __future__ import division, print_function
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import vplot as vpl
import sys
# Check correct number of arguments
if (len(sys.argv) != 2):
print('ERROR: Incorrect number of arguments.')
print('Usage: '+sys.argv[0]+' <pdf | png>')
exit(1)
if (sys.argv[1] != 'pdf' and sys.argv[1] != 'png'):
print('ERROR: Unknown file format: '+sys.argv[1])
print('Options are: pdf, png')
exit(1)
#Typical plot parameters that make for pretty plot
mpl.rcParams['figure.figsize'] = (10,8)
mpl.rcParams['font.size'] = 16.0
# Load data
output = vpl.GetOutput()
# Extract data
time = output.b.Time/1.0e6 # Scale to Myr
ecc1 = output.b.Eccentricity
ecc2 = output.c.Eccentricity
varpi1 = output.b.LongP
varpi2 = output.c.LongP
a1 = output.b.SemiMajorAxis
a2 = output.c.SemiMajorAxis
# Plot
fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True)
color = "k"
## Upper left: a1 ##
axes[0,0].plot(time, a1, color="C3", zorder=-1, label="CoRoT-7b")
# Format
axes[0,0].set_xlim(time.min(),time.max())
axes[0,0].legend(loc="best")
axes[0,0].set_ylim(0.0165,0.019)
axes[0,0].set_ylabel("Semi-major Axis [AU]")
## Upper right: eccentricities ##
axes[0,1].plot(time, ecc1, color="C3", zorder=-1)
axes[0,1].plot(time, ecc2, color="C0", zorder=-1)
# Format
axes[0,1].set_xlim(time.min(),time.max())
axes[0,1].set_ylim(0.0,0.2)
axes[0,1].set_ylabel("Eccentricity")
## Lower left: a2 ##
axes[1,0].plot(time, a2, color="C0", zorder=-1, label="CoRoT-7c")
# Format
axes[1,0].set_xlim(time.min(),time.max())
axes[1,0].legend(loc="best")
axes[1,0].set_ylim(0.0459,0.0462)
axes[1,0].set_xlabel("Time [Myr]")
axes[1,0].set_ylabel("Semi-major Axis [AU]")
## Lower right: diff between longitude of periapses ##
varpiDiff = np.fabs(np.fmod(varpi1-varpi2, 360.0))
axes[1,1].scatter(time, varpiDiff, color="C3", s=10, zorder=-1)
# Format
axes[1,1].set_xlim(time.min(),time.max())
axes[1,1].set_ylim(0, 360)
axes[1,1].set_xlabel("Time [Myr]")
axes[1,1].set_ylabel(r"$\Delta \varpi$ [$^{\circ}$]")
# Final formating
fig.tight_layout()
for ax in axes.flatten():
# Rasterize
ax.set_rasterization_zorder(0)
# Set tick locations
ax.set_xticklabels(["0", "2", "4", "6", "8", "10"])
ax.set_xticks([0, 2, 4, 6, 8, 10])
# Show late-term ecc damping
inset1 = fig.add_axes([0.74, 0.735, 0.2, 0.2])
inset1.plot(time, ecc1, color="C3", zorder=20)
inset1.plot(time, ecc2, color="C0", zorder=20)
inset1.set_xlabel("Time [Myr]", fontsize=12)
inset1.set_ylabel("Eccentricity", fontsize=12)
inset1.set_xlim(8,10)
inset1.set_xticks([8, 9, 10])
inset1.set_xticklabels(["8", "9", "10"], fontsize=12)
inset1.set_yticks([1.0e-4, 1.0e-3, 1.0e-2])
inset1.set_yticklabels(["$10^{-4}$", "$10^{-3}$", "$10^{-2}$"], fontsize=12)
inset1.set_yscale("log")
# Show early apsidal locking
inset2 = fig.add_axes([0.74, 0.235, 0.2, 0.2])
inset2.scatter(time, varpiDiff, color="C3", s=10, zorder=20)
inset2.set_xlim(0.1,3)
inset2.set_ylim(0,360)
inset2.set_xscale("log")
inset2.set_yticks([0, 180, 360])
inset2.set_yticklabels(["0", "180", "360"], fontsize=12)
inset2.set_ylabel(r"$\Delta \varpi$ [$^{\circ}$]", fontsize=12)
inset2.set_xticks([0.1, 0.25, 0.5, 1, 2, 3])
inset2.set_xticklabels(["0.1", "0.25", "0.5", "1", "2", "3"], fontsize=12)
inset2.set_xlabel("Time [Myr]", fontsize=12)
if (sys.argv[1] == 'pdf'):
fig.savefig('ApseLock.pdf', bbox_inches="tight", dpi=600)
if (sys.argv[1] == 'png'):
fig.savefig('ApseLock.png', bbox_inches="tight", dpi=600)
#fig.savefig("Rodriguez2011_Figs23.pdf", bbox_inches="tight", dpi=600)
| 30.031496 | 81 | 0.674095 |
from __future__ import division, print_function
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import vplot as vpl
import sys
if (len(sys.argv) != 2):
print('ERROR: Incorrect number of arguments.')
print('Usage: '+sys.argv[0]+' <pdf | png>')
exit(1)
if (sys.argv[1] != 'pdf' and sys.argv[1] != 'png'):
print('ERROR: Unknown file format: '+sys.argv[1])
print('Options are: pdf, png')
exit(1)
mpl.rcParams['figure.figsize'] = (10,8)
mpl.rcParams['font.size'] = 16.0
output = vpl.GetOutput()
time = output.b.Time/1.0e6
ecc1 = output.b.Eccentricity
ecc2 = output.c.Eccentricity
varpi1 = output.b.LongP
varpi2 = output.c.LongP
a1 = output.b.SemiMajorAxis
a2 = output.c.SemiMajorAxis
fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True)
color = "k"
a1, color="C3", zorder=-1, label="CoRoT-7b")
axes[0,0].set_xlim(time.min(),time.max())
axes[0,0].legend(loc="best")
axes[0,0].set_ylim(0.0165,0.019)
axes[0,0].set_ylabel("Semi-major Axis [AU]")
C3", zorder=-1)
axes[0,1].plot(time, ecc2, color="C0", zorder=-1)
axes[0,1].set_xlim(time.min(),time.max())
axes[0,1].set_ylim(0.0,0.2)
axes[0,1].set_ylabel("Eccentricity")
a2, color="C0", zorder=-1, label="CoRoT-7c")
axes[1,0].set_xlim(time.min(),time.max())
axes[1,0].legend(loc="best")
axes[1,0].set_ylim(0.0459,0.0462)
axes[1,0].set_xlabel("Time [Myr]")
axes[1,0].set_ylabel("Semi-major Axis [AU]")
[1,1].scatter(time, varpiDiff, color="C3", s=10, zorder=-1)
axes[1,1].set_xlim(time.min(),time.max())
axes[1,1].set_ylim(0, 360)
axes[1,1].set_xlabel("Time [Myr]")
axes[1,1].set_ylabel(r"$\Delta \varpi$ [$^{\circ}$]")
fig.tight_layout()
for ax in axes.flatten():
ax.set_rasterization_zorder(0)
ax.set_xticklabels(["0", "2", "4", "6", "8", "10"])
ax.set_xticks([0, 2, 4, 6, 8, 10])
inset1 = fig.add_axes([0.74, 0.735, 0.2, 0.2])
inset1.plot(time, ecc1, color="C3", zorder=20)
inset1.plot(time, ecc2, color="C0", zorder=20)
inset1.set_xlabel("Time [Myr]", fontsize=12)
inset1.set_ylabel("Eccentricity", fontsize=12)
inset1.set_xlim(8,10)
inset1.set_xticks([8, 9, 10])
inset1.set_xticklabels(["8", "9", "10"], fontsize=12)
inset1.set_yticks([1.0e-4, 1.0e-3, 1.0e-2])
inset1.set_yticklabels(["$10^{-4}$", "$10^{-3}$", "$10^{-2}$"], fontsize=12)
inset1.set_yscale("log")
inset2 = fig.add_axes([0.74, 0.235, 0.2, 0.2])
inset2.scatter(time, varpiDiff, color="C3", s=10, zorder=20)
inset2.set_xlim(0.1,3)
inset2.set_ylim(0,360)
inset2.set_xscale("log")
inset2.set_yticks([0, 180, 360])
inset2.set_yticklabels(["0", "180", "360"], fontsize=12)
inset2.set_ylabel(r"$\Delta \varpi$ [$^{\circ}$]", fontsize=12)
inset2.set_xticks([0.1, 0.25, 0.5, 1, 2, 3])
inset2.set_xticklabels(["0.1", "0.25", "0.5", "1", "2", "3"], fontsize=12)
inset2.set_xlabel("Time [Myr]", fontsize=12)
if (sys.argv[1] == 'pdf'):
fig.savefig('ApseLock.pdf', bbox_inches="tight", dpi=600)
if (sys.argv[1] == 'png'):
fig.savefig('ApseLock.png', bbox_inches="tight", dpi=600)
| true | true |
1c3d36a478576a963a1f769d426cbef47781f685 | 105 | py | Python | test/main.py | maagauiya/Adv_Python_assign1 | 12b04135b2a5c93d9e1e11c393322cc2e4b4ac7f | [
"Apache-2.0"
] | null | null | null | test/main.py | maagauiya/Adv_Python_assign1 | 12b04135b2a5c93d9e1e11c393322cc2e4b4ac7f | [
"Apache-2.0"
] | null | null | null | test/main.py | maagauiya/Adv_Python_assign1 | 12b04135b2a5c93d9e1e11c393322cc2e4b4ac7f | [
"Apache-2.0"
] | null | null | null | import assign1
print("Enter the number up to which you want to filter")
n=int(input())
assign1.topN(n) | 26.25 | 57 | 0.733333 | import assign1
print("Enter the number up to which you want to filter")
n=int(input())
assign1.topN(n) | true | true |
1c3d3bad83946917e9e5257c062b1be0fd493220 | 9,765 | py | Python | financial_statement/models.py | jiun0507/minestock | b333298575cae1c426cc4450e85e9e576458b74a | [
"Unlicense"
] | null | null | null | financial_statement/models.py | jiun0507/minestock | b333298575cae1c426cc4450e85e9e576458b74a | [
"Unlicense"
] | null | null | null | financial_statement/models.py | jiun0507/minestock | b333298575cae1c426cc4450e85e9e576458b74a | [
"Unlicense"
] | 1 | 2021-10-15T20:10:39.000Z | 2021-10-15T20:10:39.000Z | from dataclasses import dataclass
from datetime import datetime
from django.db import models
from django.db.models.fields import DateTimeField
from django.urls import reverse
@dataclass
class FinancialStatementEntity:
ticker: str
period: str
calendar_date: str
report_period: str
updated: str
accumulated_other_comprehensive_income: int
assets: int
assets_average: int
assets_current: int
asset_turnover: int
assets_non_current: int
book_value_per_share: int
capital_expenditure: int
cash_and_equivalents: int
cash_and_equivalentsUSD: int
cost_of_revenue: int
consolidated_income: int
current_ratio: int
debt_to_equity_ratio: int
debt: int
debt_current: int
debt_non_current: int
debtUSD: int
deferred_revenue: int
depreciation_amortization_and_accretion: int
deposits: int
dividend_yield: int
dividends_per_basic_common_share: int
earning_before_interest_taxes: int
earnings_before_interest_taxes_depreciation_amortization: int
EBITDA_margin: int
earnings_before_interest_taxes_depreciation_amortizationUSD: int
earning_before_interest_taxesUSD: int
earnings_before_tax: int
earnings_per_basic_share: int
earnings_per_diluted_share: int
earnings_per_basic_shareUSD: int
shareholders_equity: int
average_equity: int
shareholders_equityUSD: int
enterprise_value: int
enterprise_value_overEBIT: int
enterprise_value_overEBITDA: int
free_cash_flow: int
free_cash_flow_per_share: int
foreign_currencyUSD_exchange_Rate: int
gross_profit: int
gross_margin: int
goodwill_and_intangible_assets: int
interest_expense: int
invested_Capital: int
invested_capital_average: int
inventory: int
investments: int
investments_Current: int
investments_non_current: int
total_liabilities: int
current_liabilities: int
liabilities_non_current: int
market_capitalization: int
net_cash_flow: int
net_cash_flow_business_acquisitions_disposals: int
issuance_equity_shares: int
issuance_debt_securities: int
payment_dividends_other_cash_distributions: int
net_cash_flow_from_financing: int
net_cash_flow_from_investing: int
net_cash_flow_investment_acquisitions_disposals: int
net_cash_flow_from_operations: int
effect_of_exchange_rate_changes_on_cash: int
net_income: int
net_income_common_stock: int
net_income_common_stockUSD: int
net_loss_income_from_discontinued_operations: int
net_income_to_non_controlling_interests: int
profit_margin: int
operating_expenses: int
operating_income: int
trade_and_non_trade_payables: int
payout_ratio: int
price_to_book_value: int
price_earnings: int
price_to_earnings_ratio: int
property_plant_equipment_net: int
preferred_dividends_income_statement_impact: int
share_price_adjusted_close: int
price_sales: int
price_to_sales_ratio: int
trade_and_non_trade_receivables: int
accumulated_retained_earnings_deficit: int
revenues: int
revenuesUSD: int
research_and_development_expense: int
return_on_average_assets: int
return_on_average_equity: int
return_on_invested_capital: int
return_on_sales: int
share_based_compensation: int
selling_general_and_administrative_expense: int
share_factor: int
shares: int
weighted_average_shares: int
weighted_average_shares_diluted: int
sales_per_share: int
tangible_asset_value: int
tax_assets: int
income_tax_expense: int
tax_liabilities: int
tangible_assets_book_value_per_share: int
working_capital: int
class FinancialStatement(models.Model):
ticker = models.CharField(max_length=10)
period = models.CharField(max_length=10)
calendar_date = models.CharField(max_length=10)
report_period = models.CharField(max_length=10)
updated = models.CharField(max_length=10)
accumulated_other_comprehensive_income = models.IntegerField()
assets = models.IntegerField()
assets_average = models.IntegerField()
assets_current = models.IntegerField()
asset_turnover = models.IntegerField()
assets_non_current = models.IntegerField()
book_value_per_share = models.IntegerField()
capital_expenditure = models.IntegerField()
cash_and_equivalents = models.IntegerField()
cash_and_equivalentsUSD = models.IntegerField()
cost_of_revenue = models.IntegerField()
consolidated_income = models.IntegerField()
current_ratio = models.IntegerField()
debt_to_equity_ratio = models.IntegerField()
debt = models.IntegerField()
debt_current = models.IntegerField()
debt_non_current = models.IntegerField()
debtUSD = models.IntegerField()
deferred_revenue = models.IntegerField()
depreciation_amortization_and_accretion = models.IntegerField()
deposits = models.IntegerField()
dividend_yield = models.IntegerField()
dividends_per_basic_common_share = models.IntegerField()
earning_before_interest_taxes = models.IntegerField()
earnings_before_interest_taxes_depreciation_amortization = models.IntegerField()
EBITDA_margin = models.IntegerField()
earnings_before_interest_taxes_depreciation_amortizationUSD = models.IntegerField()
earning_before_interest_taxesUSD = models.IntegerField()
earnings_before_tax = models.IntegerField()
earnings_per_basic_share = models.IntegerField()
earnings_per_diluted_share = models.IntegerField()
earnings_per_basic_shareUSD = models.IntegerField()
shareholders_equity = models.IntegerField()
average_equity = models.IntegerField()
shareholders_equityUSD = models.IntegerField()
enterprise_value = models.IntegerField()
enterprise_value_overEBIT = models.IntegerField()
enterprise_value_overEBITDA = models.IntegerField()
free_cash_flow = models.IntegerField()
free_cash_flow_per_share = models.IntegerField()
foreign_currencyUSD_exchange_Rate = models.IntegerField()
gross_profit = models.IntegerField()
gross_margin = models.IntegerField()
goodwill_and_intangible_assets = models.IntegerField()
interest_expense = models.IntegerField()
invested_Capital = models.IntegerField()
invested_capital_average = models.IntegerField()
inventory = models.IntegerField()
investments = models.IntegerField()
investments_Current = models.IntegerField()
investments_non_current = models.IntegerField()
total_liabilities = models.IntegerField()
current_liabilities = models.IntegerField()
liabilities_non_current = models.IntegerField()
market_capitalization = models.IntegerField()
net_cash_flow = models.IntegerField()
net_cash_flow_business_acquisitions_disposals = models.IntegerField()
issuance_equity_shares = models.IntegerField()
issuance_debt_securities = models.IntegerField()
payment_dividends_other_cash_distributions = models.IntegerField()
net_cash_flow_from_financing = models.IntegerField()
net_cash_flow_from_investing = models.IntegerField()
net_cash_flow_investment_acquisitions_disposals = models.IntegerField()
net_cash_flow_from_operations = models.IntegerField()
effect_of_exchange_rate_changes_on_cash = models.IntegerField()
net_income = models.IntegerField()
net_income_common_stock = models.IntegerField()
net_income_common_stockUSD = models.IntegerField()
net_loss_income_from_discontinued_operations = models.IntegerField()
net_income_to_non_controlling_interests = models.IntegerField()
profit_margin = models.IntegerField()
operating_expenses = models.IntegerField()
operating_income = models.IntegerField()
trade_and_non_trade_payables = models.IntegerField()
payout_ratio = models.IntegerField()
price_to_book_value = models.IntegerField()
price_earnings = models.IntegerField()
price_to_earnings_ratio = models.IntegerField()
property_plant_equipment_net = models.IntegerField()
preferred_dividends_income_statement_impact = models.IntegerField()
share_price_adjusted_close = models.IntegerField()
price_sales = models.IntegerField()
price_to_sales_ratio = models.IntegerField()
trade_and_non_trade_receivables = models.IntegerField()
accumulated_retained_earnings_deficit = models.IntegerField()
revenues = models.IntegerField()
revenuesUSD = models.IntegerField()
research_and_development_expense = models.IntegerField()
return_on_average_assets = models.IntegerField()
return_on_average_equity = models.IntegerField()
return_on_invested_capital = models.IntegerField()
return_on_sales = models.IntegerField()
share_based_compensation = models.IntegerField()
selling_general_and_administrative_expense = models.IntegerField()
share_factor = models.IntegerField()
shares = models.IntegerField()
weighted_average_shares = models.IntegerField()
weighted_average_shares_diluted = models.IntegerField()
sales_per_share = models.IntegerField()
tangible_asset_value = models.IntegerField()
tax_assets = models.IntegerField()
income_tax_expense = models.IntegerField()
tax_liabilities = models.IntegerField()
tangible_assets_book_value_per_share = models.IntegerField()
working_capital = models.IntegerField()
def get_absolute_url(self):
return reverse(
"financial_statement:financial_statement", kwargs={"id": self.id}
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["ticker", "period", "calendar_date", "report_period"],
name="financial statement quarterly/yearly restraint",
),
]
| 39.534413 | 87 | 0.774296 | from dataclasses import dataclass
from datetime import datetime
from django.db import models
from django.db.models.fields import DateTimeField
from django.urls import reverse
@dataclass
class FinancialStatementEntity:
ticker: str
period: str
calendar_date: str
report_period: str
updated: str
accumulated_other_comprehensive_income: int
assets: int
assets_average: int
assets_current: int
asset_turnover: int
assets_non_current: int
book_value_per_share: int
capital_expenditure: int
cash_and_equivalents: int
cash_and_equivalentsUSD: int
cost_of_revenue: int
consolidated_income: int
current_ratio: int
debt_to_equity_ratio: int
debt: int
debt_current: int
debt_non_current: int
debtUSD: int
deferred_revenue: int
depreciation_amortization_and_accretion: int
deposits: int
dividend_yield: int
dividends_per_basic_common_share: int
earning_before_interest_taxes: int
earnings_before_interest_taxes_depreciation_amortization: int
EBITDA_margin: int
earnings_before_interest_taxes_depreciation_amortizationUSD: int
earning_before_interest_taxesUSD: int
earnings_before_tax: int
earnings_per_basic_share: int
earnings_per_diluted_share: int
earnings_per_basic_shareUSD: int
shareholders_equity: int
average_equity: int
shareholders_equityUSD: int
enterprise_value: int
enterprise_value_overEBIT: int
enterprise_value_overEBITDA: int
free_cash_flow: int
free_cash_flow_per_share: int
foreign_currencyUSD_exchange_Rate: int
gross_profit: int
gross_margin: int
goodwill_and_intangible_assets: int
interest_expense: int
invested_Capital: int
invested_capital_average: int
inventory: int
investments: int
investments_Current: int
investments_non_current: int
total_liabilities: int
current_liabilities: int
liabilities_non_current: int
market_capitalization: int
net_cash_flow: int
net_cash_flow_business_acquisitions_disposals: int
issuance_equity_shares: int
issuance_debt_securities: int
payment_dividends_other_cash_distributions: int
net_cash_flow_from_financing: int
net_cash_flow_from_investing: int
net_cash_flow_investment_acquisitions_disposals: int
net_cash_flow_from_operations: int
effect_of_exchange_rate_changes_on_cash: int
net_income: int
net_income_common_stock: int
net_income_common_stockUSD: int
net_loss_income_from_discontinued_operations: int
net_income_to_non_controlling_interests: int
profit_margin: int
operating_expenses: int
operating_income: int
trade_and_non_trade_payables: int
payout_ratio: int
price_to_book_value: int
price_earnings: int
price_to_earnings_ratio: int
property_plant_equipment_net: int
preferred_dividends_income_statement_impact: int
share_price_adjusted_close: int
price_sales: int
price_to_sales_ratio: int
trade_and_non_trade_receivables: int
accumulated_retained_earnings_deficit: int
revenues: int
revenuesUSD: int
research_and_development_expense: int
return_on_average_assets: int
return_on_average_equity: int
return_on_invested_capital: int
return_on_sales: int
share_based_compensation: int
selling_general_and_administrative_expense: int
share_factor: int
shares: int
weighted_average_shares: int
weighted_average_shares_diluted: int
sales_per_share: int
tangible_asset_value: int
tax_assets: int
income_tax_expense: int
tax_liabilities: int
tangible_assets_book_value_per_share: int
working_capital: int
class FinancialStatement(models.Model):
ticker = models.CharField(max_length=10)
period = models.CharField(max_length=10)
calendar_date = models.CharField(max_length=10)
report_period = models.CharField(max_length=10)
updated = models.CharField(max_length=10)
accumulated_other_comprehensive_income = models.IntegerField()
assets = models.IntegerField()
assets_average = models.IntegerField()
assets_current = models.IntegerField()
asset_turnover = models.IntegerField()
assets_non_current = models.IntegerField()
book_value_per_share = models.IntegerField()
capital_expenditure = models.IntegerField()
cash_and_equivalents = models.IntegerField()
cash_and_equivalentsUSD = models.IntegerField()
cost_of_revenue = models.IntegerField()
consolidated_income = models.IntegerField()
current_ratio = models.IntegerField()
debt_to_equity_ratio = models.IntegerField()
debt = models.IntegerField()
debt_current = models.IntegerField()
debt_non_current = models.IntegerField()
debtUSD = models.IntegerField()
deferred_revenue = models.IntegerField()
depreciation_amortization_and_accretion = models.IntegerField()
deposits = models.IntegerField()
dividend_yield = models.IntegerField()
dividends_per_basic_common_share = models.IntegerField()
earning_before_interest_taxes = models.IntegerField()
earnings_before_interest_taxes_depreciation_amortization = models.IntegerField()
EBITDA_margin = models.IntegerField()
earnings_before_interest_taxes_depreciation_amortizationUSD = models.IntegerField()
earning_before_interest_taxesUSD = models.IntegerField()
earnings_before_tax = models.IntegerField()
earnings_per_basic_share = models.IntegerField()
earnings_per_diluted_share = models.IntegerField()
earnings_per_basic_shareUSD = models.IntegerField()
shareholders_equity = models.IntegerField()
average_equity = models.IntegerField()
shareholders_equityUSD = models.IntegerField()
enterprise_value = models.IntegerField()
enterprise_value_overEBIT = models.IntegerField()
enterprise_value_overEBITDA = models.IntegerField()
free_cash_flow = models.IntegerField()
free_cash_flow_per_share = models.IntegerField()
foreign_currencyUSD_exchange_Rate = models.IntegerField()
gross_profit = models.IntegerField()
gross_margin = models.IntegerField()
goodwill_and_intangible_assets = models.IntegerField()
interest_expense = models.IntegerField()
invested_Capital = models.IntegerField()
invested_capital_average = models.IntegerField()
inventory = models.IntegerField()
investments = models.IntegerField()
investments_Current = models.IntegerField()
investments_non_current = models.IntegerField()
total_liabilities = models.IntegerField()
current_liabilities = models.IntegerField()
liabilities_non_current = models.IntegerField()
market_capitalization = models.IntegerField()
net_cash_flow = models.IntegerField()
net_cash_flow_business_acquisitions_disposals = models.IntegerField()
issuance_equity_shares = models.IntegerField()
issuance_debt_securities = models.IntegerField()
payment_dividends_other_cash_distributions = models.IntegerField()
net_cash_flow_from_financing = models.IntegerField()
net_cash_flow_from_investing = models.IntegerField()
net_cash_flow_investment_acquisitions_disposals = models.IntegerField()
net_cash_flow_from_operations = models.IntegerField()
effect_of_exchange_rate_changes_on_cash = models.IntegerField()
net_income = models.IntegerField()
net_income_common_stock = models.IntegerField()
net_income_common_stockUSD = models.IntegerField()
net_loss_income_from_discontinued_operations = models.IntegerField()
net_income_to_non_controlling_interests = models.IntegerField()
profit_margin = models.IntegerField()
operating_expenses = models.IntegerField()
operating_income = models.IntegerField()
trade_and_non_trade_payables = models.IntegerField()
payout_ratio = models.IntegerField()
price_to_book_value = models.IntegerField()
price_earnings = models.IntegerField()
price_to_earnings_ratio = models.IntegerField()
property_plant_equipment_net = models.IntegerField()
preferred_dividends_income_statement_impact = models.IntegerField()
share_price_adjusted_close = models.IntegerField()
price_sales = models.IntegerField()
price_to_sales_ratio = models.IntegerField()
trade_and_non_trade_receivables = models.IntegerField()
accumulated_retained_earnings_deficit = models.IntegerField()
revenues = models.IntegerField()
revenuesUSD = models.IntegerField()
research_and_development_expense = models.IntegerField()
return_on_average_assets = models.IntegerField()
return_on_average_equity = models.IntegerField()
return_on_invested_capital = models.IntegerField()
return_on_sales = models.IntegerField()
share_based_compensation = models.IntegerField()
selling_general_and_administrative_expense = models.IntegerField()
share_factor = models.IntegerField()
shares = models.IntegerField()
weighted_average_shares = models.IntegerField()
weighted_average_shares_diluted = models.IntegerField()
sales_per_share = models.IntegerField()
tangible_asset_value = models.IntegerField()
tax_assets = models.IntegerField()
income_tax_expense = models.IntegerField()
tax_liabilities = models.IntegerField()
tangible_assets_book_value_per_share = models.IntegerField()
working_capital = models.IntegerField()
def get_absolute_url(self):
return reverse(
"financial_statement:financial_statement", kwargs={"id": self.id}
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["ticker", "period", "calendar_date", "report_period"],
name="financial statement quarterly/yearly restraint",
),
]
| true | true |
1c3d3c3ea83ac485f66ecd43061b2ffea21ddbae | 516 | py | Python | src/utils/download_data.py | CBIIT/NCI-DOE-Collab-Pilot1-Center-Loss | ef6a27d8cc971b81ecf8c5a60fad6b24197a7630 | [
"MIT"
] | null | null | null | src/utils/download_data.py | CBIIT/NCI-DOE-Collab-Pilot1-Center-Loss | ef6a27d8cc971b81ecf8c5a60fad6b24197a7630 | [
"MIT"
] | null | null | null | src/utils/download_data.py | CBIIT/NCI-DOE-Collab-Pilot1-Center-Loss | ef6a27d8cc971b81ecf8c5a60fad6b24197a7630 | [
"MIT"
] | 1 | 2021-08-11T16:20:16.000Z | 2021-08-11T16:20:16.000Z | import os
from file_utils import get_file
response_collection_path="https://modac.cancer.gov/api/v2/dataObject/NCI_DOE_Archive/JDACS4C/JDACS4C_Pilot_1/cancer_drug_response_prediction_dataset"
combined_cl = "combined_cl_metadata"
cl_url = response_collection_path + "/" + combined_cl
rnaseq = "combined_rnaseq_data"
rnaseq_url = response_collection_path + "/" + rnaseq
data_dest = os.path.join("data", "ftp_data")
get_file(combined_cl , cl_url, datadir=data_dest)
get_file(rnaseq, rnaseq_url, datadir=data_dest)
| 36.857143 | 149 | 0.813953 | import os
from file_utils import get_file
response_collection_path="https://modac.cancer.gov/api/v2/dataObject/NCI_DOE_Archive/JDACS4C/JDACS4C_Pilot_1/cancer_drug_response_prediction_dataset"
combined_cl = "combined_cl_metadata"
cl_url = response_collection_path + "/" + combined_cl
rnaseq = "combined_rnaseq_data"
rnaseq_url = response_collection_path + "/" + rnaseq
data_dest = os.path.join("data", "ftp_data")
get_file(combined_cl , cl_url, datadir=data_dest)
get_file(rnaseq, rnaseq_url, datadir=data_dest)
| true | true |
1c3d3ce20bea319b44b4b69b58013997209dda26 | 8,639 | py | Python | src/niweb/apps/noclook/middleware.py | SUNET/ni | f652e230524346bf0801cdf8bbb6ee63f4985cc2 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | src/niweb/apps/noclook/middleware.py | SUNET/ni | f652e230524346bf0801cdf8bbb6ee63f4985cc2 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2019-07-24T12:41:11.000Z | 2020-03-31T10:10:04.000Z | src/niweb/apps/noclook/middleware.py | SUNET/ni | f652e230524346bf0801cdf8bbb6ee63f4985cc2 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2019-02-25T14:58:20.000Z | 2019-02-25T14:58:20.000Z | # -*- coding: utf-8 -*-
__author__ = 'ffuentes'
from datetime import datetime
from django.conf import settings
from django.contrib.auth.middleware import get_user
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
from django.contrib.sessions.backends.base import UpdateError
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import redirect
from django.utils.cache import patch_vary_headers
from django.utils.functional import SimpleLazyObject
from django.utils.http import cookie_date
from graphql_jwt import signals
from graphql_jwt.settings import jwt_settings
from graphql_jwt.shortcuts import get_token, get_user_by_token
from graphql_jwt.refresh_token.shortcuts import refresh_token_lazy
from graphql_jwt.refresh_token.signals import refresh_token_rotated
from graphql_jwt.utils import get_credentials, get_payload
from graphql_jwt.exceptions import JSONWebTokenError, JSONWebTokenExpired
from importlib import import_module
import time
import logging
logger = logging.getLogger(__name__)
def token_is_expired(token):
ret = False
try:
get_payload(token)
except JSONWebTokenError:
ret = True
except JSONWebTokenExpired:
ret = True
return ret
def get_user_from_session_key(session_key):
session = Session.objects.get(session_key=session_key)
session_data = session.get_decoded()
uid = session_data.get('_auth_user_id')
user = User.objects.get(id=uid)
return user
def delete_jwt_cookie(request, response):
max_age = request.session.get_expiry_age()
anti_expires_time = cookie_date(time.time() - max_age)
response.set_cookie(
jwt_settings.JWT_COOKIE_NAME,
'',
domain=settings.COOKIE_DOMAIN,
expires=anti_expires_time,
secure=settings.JWT_COOKIE_SECURE or None,
httponly=settings.JWT_COOKIE_HTTPONLY or None,
samesite='Lax',
)
class SRIJWTAuthMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
session_created = False
has_token = False
# add user
request.user = SimpleLazyObject(lambda: get_user(request))
token = get_credentials(request)
if token is not None and token != '' and token != 'None' and \
not token_is_expired(token):
user = get_user_by_token(token, request)
request.user = user
has_token = True
# add session
if not hasattr(request, 'session'):
session_engine = import_module(settings.SESSION_ENGINE)
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME)
# if the session cannot be saved, start with an empty session
try:
request.session = session_engine.SessionStore(session_key)
request.session.save()
session_created = True
except UpdateError:
response = redirect(request.get_full_path())
response.delete_cookie(
settings.SESSION_COOKIE_NAME,
path=settings.SESSION_COOKIE_PATH,
domain=settings.SESSION_COOKIE_DOMAIN,
)
response.delete_cookie(jwt_settings.JWT_COOKIE_NAME)
patch_vary_headers(response, ('Cookie',))
return response
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
anti_expires_time = cookie_date(time.time() - max_age)
cookie_expires = cookie_date(expires_time)
if request.session.get_expire_at_browser_close():
max_age = None
cookie_expires = None
if token and token_is_expired(token):
cookie_token = request.COOKIES.get(jwt_settings.JWT_COOKIE_NAME)
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME)
if cookie_token and cookie_token != '""':
try:
user = get_user_from_session_key(session_key)
request.user = user
refresh_token_lazy(request.user)
token = get_token(request.user)
refresh_token_rotated.send(
sender=SRIJWTAuthMiddleware,
request=request,
refresh_token=self,
)
signals.token_issued.send(
sender=SRIJWTAuthMiddleware, request=request, user=request.user)
except ObjectDoesNotExist:
## fallback solution
response = redirect(request.get_full_path())
delete_jwt_cookie(request, response)
patch_vary_headers(response, ('Cookie',))
return response
# process response with inner middleware
response = self.get_response(request)
if request.user.is_authenticated and not has_token:
token = get_token(request.user)
signals.token_issued.send(
sender=SRIJWTAuthMiddleware, request=request, user=request.user)
# if token is expired, refresh it
if token_is_expired(token):
refresh_token_lazy(request.user)
token = get_token(request.user)
refresh_token_rotated.send(
sender=SRIJWTAuthMiddleware,
request=request,
refresh_token=self,
)
signals.token_issued.send(
sender=SRIJWTAuthMiddleware, request=request, user=request.user)
#expires = datetime.utcnow() + jwt_settings.JWT_EXPIRATION_DELTA
response.set_cookie(
jwt_settings.JWT_COOKIE_NAME,
token,
domain=settings.COOKIE_DOMAIN,
max_age=max_age,
expires=cookie_expires,
secure=settings.JWT_COOKIE_SECURE or None,
httponly=settings.JWT_COOKIE_HTTPONLY or None,
samesite='Lax',
)
patch_vary_headers(response, ('Cookie',))
accessed = request.session.accessed
modified = request.session.modified
empty = request.session.is_empty()
# we'll force the session cookie creation if:
# * we have a valid token but we didn't have a session for the user
# * the session was not created because the user is logged in
create_session_cookie = token and session_created \
or token and not request.user.is_authenticated
if settings.SESSION_COOKIE_NAME in request.COOKIES and empty:
response.delete_cookie(
settings.SESSION_COOKIE_NAME,
path=settings.SESSION_COOKIE_PATH,
domain=settings.SESSION_COOKIE_DOMAIN,
)
response.delete_cookie(jwt_settings.JWT_COOKIE_NAME)
patch_vary_headers(response, ('Cookie',))
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
try:
SESSION_SAVE_EVERY_REQUEST = settings.SESSION_SAVE_EVERY_REQUEST
except AttributeError:
SESSION_SAVE_EVERY_REQUEST = None
if (modified or SESSION_SAVE_EVERY_REQUEST) and not empty or create_session_cookie:
# Save the session data and refresh the client cookie.
# Skip session save for 500 responses, refs #3881.
if response.status_code != 500:
try:
request.session.save()
except UpdateError:
raise SuspiciousOperation(
"The request's session was deleted before the "
"request completed. The user may have logged "
"out in a concurrent request, for example."
)
response.set_cookie(
settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=cookie_expires, domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None,
samesite='Strict',
)
return response
| 38.73991 | 95 | 0.615581 |
__author__ = 'ffuentes'
from datetime import datetime
from django.conf import settings
from django.contrib.auth.middleware import get_user
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
from django.contrib.sessions.backends.base import UpdateError
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import redirect
from django.utils.cache import patch_vary_headers
from django.utils.functional import SimpleLazyObject
from django.utils.http import cookie_date
from graphql_jwt import signals
from graphql_jwt.settings import jwt_settings
from graphql_jwt.shortcuts import get_token, get_user_by_token
from graphql_jwt.refresh_token.shortcuts import refresh_token_lazy
from graphql_jwt.refresh_token.signals import refresh_token_rotated
from graphql_jwt.utils import get_credentials, get_payload
from graphql_jwt.exceptions import JSONWebTokenError, JSONWebTokenExpired
from importlib import import_module
import time
import logging
logger = logging.getLogger(__name__)
def token_is_expired(token):
ret = False
try:
get_payload(token)
except JSONWebTokenError:
ret = True
except JSONWebTokenExpired:
ret = True
return ret
def get_user_from_session_key(session_key):
session = Session.objects.get(session_key=session_key)
session_data = session.get_decoded()
uid = session_data.get('_auth_user_id')
user = User.objects.get(id=uid)
return user
def delete_jwt_cookie(request, response):
max_age = request.session.get_expiry_age()
anti_expires_time = cookie_date(time.time() - max_age)
response.set_cookie(
jwt_settings.JWT_COOKIE_NAME,
'',
domain=settings.COOKIE_DOMAIN,
expires=anti_expires_time,
secure=settings.JWT_COOKIE_SECURE or None,
httponly=settings.JWT_COOKIE_HTTPONLY or None,
samesite='Lax',
)
class SRIJWTAuthMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
session_created = False
has_token = False
request.user = SimpleLazyObject(lambda: get_user(request))
token = get_credentials(request)
if token is not None and token != '' and token != 'None' and \
not token_is_expired(token):
user = get_user_by_token(token, request)
request.user = user
has_token = True
if not hasattr(request, 'session'):
session_engine = import_module(settings.SESSION_ENGINE)
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME)
try:
request.session = session_engine.SessionStore(session_key)
request.session.save()
session_created = True
except UpdateError:
response = redirect(request.get_full_path())
response.delete_cookie(
settings.SESSION_COOKIE_NAME,
path=settings.SESSION_COOKIE_PATH,
domain=settings.SESSION_COOKIE_DOMAIN,
)
response.delete_cookie(jwt_settings.JWT_COOKIE_NAME)
patch_vary_headers(response, ('Cookie',))
return response
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
anti_expires_time = cookie_date(time.time() - max_age)
cookie_expires = cookie_date(expires_time)
if request.session.get_expire_at_browser_close():
max_age = None
cookie_expires = None
if token and token_is_expired(token):
cookie_token = request.COOKIES.get(jwt_settings.JWT_COOKIE_NAME)
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME)
if cookie_token and cookie_token != '""':
try:
user = get_user_from_session_key(session_key)
request.user = user
refresh_token_lazy(request.user)
token = get_token(request.user)
refresh_token_rotated.send(
sender=SRIJWTAuthMiddleware,
request=request,
refresh_token=self,
)
signals.token_issued.send(
sender=SRIJWTAuthMiddleware, request=request, user=request.user)
except ObjectDoesNotExist:
response = redirect(request.get_full_path())
delete_jwt_cookie(request, response)
patch_vary_headers(response, ('Cookie',))
return response
response = self.get_response(request)
if request.user.is_authenticated and not has_token:
token = get_token(request.user)
signals.token_issued.send(
sender=SRIJWTAuthMiddleware, request=request, user=request.user)
if token_is_expired(token):
refresh_token_lazy(request.user)
token = get_token(request.user)
refresh_token_rotated.send(
sender=SRIJWTAuthMiddleware,
request=request,
refresh_token=self,
)
signals.token_issued.send(
sender=SRIJWTAuthMiddleware, request=request, user=request.user)
response.set_cookie(
jwt_settings.JWT_COOKIE_NAME,
token,
domain=settings.COOKIE_DOMAIN,
max_age=max_age,
expires=cookie_expires,
secure=settings.JWT_COOKIE_SECURE or None,
httponly=settings.JWT_COOKIE_HTTPONLY or None,
samesite='Lax',
)
patch_vary_headers(response, ('Cookie',))
accessed = request.session.accessed
modified = request.session.modified
empty = request.session.is_empty()
# * we have a valid token but we didn't have a session for the user
create_session_cookie = token and session_created \
or token and not request.user.is_authenticated
if settings.SESSION_COOKIE_NAME in request.COOKIES and empty:
response.delete_cookie(
settings.SESSION_COOKIE_NAME,
path=settings.SESSION_COOKIE_PATH,
domain=settings.SESSION_COOKIE_DOMAIN,
)
response.delete_cookie(jwt_settings.JWT_COOKIE_NAME)
patch_vary_headers(response, ('Cookie',))
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
try:
SESSION_SAVE_EVERY_REQUEST = settings.SESSION_SAVE_EVERY_REQUEST
except AttributeError:
SESSION_SAVE_EVERY_REQUEST = None
if (modified or SESSION_SAVE_EVERY_REQUEST) and not empty or create_session_cookie:
if response.status_code != 500:
try:
request.session.save()
except UpdateError:
raise SuspiciousOperation(
"The request's session was deleted before the "
"request completed. The user may have logged "
"out in a concurrent request, for example."
)
response.set_cookie(
settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=cookie_expires, domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None,
samesite='Strict',
)
return response
| true | true |
1c3d3ce73cff55a4d2af4b5dacc1b4ab363aa996 | 9,948 | py | Python | tests/utests/voltha/extensions/omci/test_onu_device_entry.py | sathishms77/test | bf8df6fc16c41720c7d99ed1ff17a64b543e9672 | [
"Apache-2.0"
] | 72 | 2017-01-18T02:36:34.000Z | 2022-02-12T15:28:30.000Z | tests/utests/voltha/extensions/omci/test_onu_device_entry.py | sathishms77/test | bf8df6fc16c41720c7d99ed1ff17a64b543e9672 | [
"Apache-2.0"
] | 11 | 2017-03-03T17:56:09.000Z | 2022-02-11T03:38:03.000Z | tests/utests/voltha/extensions/omci/test_onu_device_entry.py | sathishms77/test | bf8df6fc16c41720c7d99ed1ff17a64b543e9672 | [
"Apache-2.0"
] | 120 | 2017-02-02T23:26:11.000Z | 2022-03-13T05:30:23.000Z | #
# Copyright 2018 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase, main
from nose.tools import assert_raises
from nose.twistedtools import deferred
from copy import deepcopy
from mock.mock_adapter_agent import MockAdapterAgent, MockCore
from mock.mock_onu_handler import MockOnuHandler
from mock.mock_olt_handler import MockOltHandler
from mock.mock_onu import MockOnu
from voltha.extensions.omci.openomci_agent import OpenOMCIAgent, OpenOmciAgentDefaults
from voltha.extensions.omci.omci_defs import *
from common.utils.asleep import asleep
from voltha.extensions.omci.database.mib_db_api import DEVICE_ID_KEY, CLASS_ID_KEY, CREATED_KEY, \
MODIFIED_KEY, MDS_KEY, LAST_SYNC_KEY, VERSION_KEY, DatabaseStateError
from voltha.extensions.omci.database.mib_db_dict import MibDbVolatileDict
DEFAULT_OLT_DEVICE_ID = 'default_olt_mock'
DEFAULT_ONU_DEVICE_ID = 'default_onu_mock'
DEFAULT_PON_ID = 0
DEFAULT_ONU_ID = 0
DEFAULT_ONU_SN = 'TEST00000001'
OP = EntityOperations
RC = ReasonCodes
def chunk(indexable, chunk_size):
for i in range(0, len(indexable), chunk_size):
yield indexable[i:i + chunk_size]
def hex2raw(hex_string):
return ''.join(chr(int(byte, 16)) for byte in chunk(hex_string, 2))
class TestOnuDeviceEntry(TestCase):
"""
Test the ONU Device Entry methods
"""
def setUp(self):
self.adapter_agent = MockAdapterAgent()
custom = deepcopy(OpenOmciAgentDefaults)
custom['mib-synchronizer']['database'] = MibDbVolatileDict
self.agent = OpenOMCIAgent(MockCore, support_classes=custom)
self.agent.start()
def tearDown(self):
if self.agent is not None:
self.agent.stop()
if self.adapter_agent is not None:
self.adapter_agent.tearDown()
def setup_mock_olt(self, device_id=DEFAULT_OLT_DEVICE_ID):
handler = MockOltHandler(self.adapter_agent, device_id)
self.adapter_agent.add_device(handler.device)
return handler
def setup_mock_onu(self, parent_id=DEFAULT_OLT_DEVICE_ID,
device_id=DEFAULT_ONU_DEVICE_ID,
pon_id=DEFAULT_PON_ID,
onu_id=DEFAULT_ONU_ID,
serial_no=DEFAULT_ONU_SN):
handler = MockOnuHandler(self.adapter_agent, parent_id, device_id, pon_id, onu_id)
handler.serial_number = serial_no
onu = MockOnu(serial_no, self.adapter_agent, handler.device_id) \
if serial_no is not None else None
handler.onu_mock = onu
return handler
def setup_one_of_each(self):
# Most tests will use at lease one or more OLT and ONU
self.olt_handler = self.setup_mock_olt()
self.onu_handler = self.setup_mock_onu(parent_id=self.olt_handler.device_id)
self.onu_device = self.onu_handler.onu_mock
self.adapter_agent.add_child_device(self.olt_handler.device,
self.onu_handler.device)
def test_add_remove_device(self):
self.setup_one_of_each()
self.assertEqual(len(self.agent.device_ids()), 0)
onu_device = self.agent.add_device(DEFAULT_ONU_DEVICE_ID,
self.adapter_agent)
self.assertIsNotNone(onu_device)
self.assertEqual(len(self.agent.device_ids()), 1)
self.assertEqual(self.agent.get_device(DEFAULT_ONU_DEVICE_ID), onu_device)
# No MIB if not started
assert_raises(KeyError, onu_device.query_mib)
self.agent.remove_device(DEFAULT_ONU_DEVICE_ID)
self.assertEqual(len(self.agent.device_ids()), 1)
def test_delete_device(self):
self.setup_one_of_each()
self.assertEqual(len(self.agent.device_ids()), 0)
onu_device = self.agent.add_device(DEFAULT_ONU_DEVICE_ID,
self.adapter_agent)
self.assertIsNotNone(onu_device)
self.assertEqual(len(self.agent.device_ids()), 1)
self.assertEqual(self.agent.get_device(DEFAULT_ONU_DEVICE_ID), onu_device)
# Can delete if it was not started
onu_device.delete()
self.assertEqual(len(self.agent.device_ids()), 0)
##########################################
# Delete of ONU device okay if it is started
onu_device = self.agent.add_device(DEFAULT_ONU_DEVICE_ID,
self.adapter_agent)
self.assertIsNotNone(onu_device)
self.assertEqual(len(self.agent.device_ids()), 1)
self.assertEqual(self.agent.get_device(DEFAULT_ONU_DEVICE_ID), onu_device)
# Start it and then delete it
onu_device.start()
onu_device.delete()
self.assertEqual(len(self.agent.device_ids()), 0)
@deferred(timeout=5)
def test_mib_query_fails_if_dev_not_started(self):
self.setup_one_of_each()
onu_device = self.agent.add_device(DEFAULT_ONU_DEVICE_ID,
self.adapter_agent)
self.assertIsNotNone(onu_device)
self.assertEqual(len(self.agent.device_ids()), 1)
self.assertEqual(self.agent.get_device(DEFAULT_ONU_DEVICE_ID), onu_device)
def not_called(_reason):
assert False, 'Should never be called'
def check_status(_results):
# Device not yet started. Query should fail with KeyError since
# ONU is not in database yet
assert_raises(KeyError, onu_device.query_mib)
# Yield context so that MIB Database callLater runs. This is a waiting
# Async task from when the OpenOMCIAgent was started.
d = asleep(0.2)
d.addCallbacks(check_status, not_called)
return d
@deferred(timeout=5)
def test_mib_query_ok_if_dev_started(self):
self.setup_one_of_each()
onu_device = self.agent.add_device(DEFAULT_ONU_DEVICE_ID,
self.adapter_agent)
self.assertIsNotNone(onu_device)
self.assertEqual(len(self.agent.device_ids()), 1)
self.assertEqual(self.agent.get_device(DEFAULT_ONU_DEVICE_ID), onu_device)
def not_called(_reason):
onu_device.stop()
assert False, 'Should never be called'
def check_status(_results):
# Device started. Query will succeed but nothing should be populated
# but the most basic items
results = onu_device.query_mib()
self.assertTrue(isinstance(results, dict))
self.assertEqual(results.get(DEVICE_ID_KEY), DEFAULT_ONU_DEVICE_ID)
self.assertIsNotNone(results.get(VERSION_KEY))
self.assertIsNotNone(results.get(CREATED_KEY))
self.assertIsNone(results.get(MODIFIED_KEY)) # Created! but not yet modified
self.assertEqual(results.get(MDS_KEY), 0)
self.assertIsNone(results.get(LAST_SYNC_KEY))
self.assertIsNone(results.get(CLASS_ID_KEY))
# Stopping still allows a query. Note you just delete a device
# to clean up any associated databases
onu_device.stop()
results = onu_device.query_mib()
self.assertTrue(isinstance(results, dict))
# Yield context so that MIB Database callLater runs. This is a waiting
# Async task from when the OpenOMCIAgent was started. But also start the
# device so that it's queued async state machines can run as well
onu_device.start()
d = asleep(0.2)
d.addCallbacks(check_status, not_called)
return d
@deferred(timeout=5)
def test_delete_scrubs_mib(self):
self.setup_one_of_each()
onu_device = self.agent.add_device(DEFAULT_ONU_DEVICE_ID,
self.adapter_agent)
self.assertIsNotNone(onu_device)
self.assertEqual(len(self.agent.device_ids()), 1)
self.assertEqual(self.agent.get_device(DEFAULT_ONU_DEVICE_ID), onu_device)
def not_called(_reason):
onu_device.stop()
assert False, 'Should never be called'
def check_status(_results):
# Device started. Query will succeed but nothing should be populated
# but the most basic items
results = onu_device.query_mib()
self.assertTrue(isinstance(results, dict))
self.assertEqual(results.get(DEVICE_ID_KEY), DEFAULT_ONU_DEVICE_ID)
# Delete should wipe out any MIB data. Note that a delete of a started
# or stopped ONU device is allowed. In this case we are deleting a
# started ONU Device
onu_device.delete()
assert_raises(Exception, onu_device.query_mib)
# TODO: When capabilities are supported, make sure capabilities get cleared as well
# Yield context so that MIB Database callLater runs. This is a waiting
# Async task from when the OpenOMCIAgent was started. But also start the
# device so that it's queued async state machines can run as well
onu_device.start()
d = asleep(0.2)
d.addCallbacks(check_status, not_called)
return d
# TODO: Test pub/sub interface if possible
# TODO: Test custom/vendor-specific ME support
# TODO: Test override of various state machines or OMCI tasks if possible
if __name__ == '__main__':
main()
| 38.708171 | 98 | 0.668778 |
from unittest import TestCase, main
from nose.tools import assert_raises
from nose.twistedtools import deferred
from copy import deepcopy
from mock.mock_adapter_agent import MockAdapterAgent, MockCore
from mock.mock_onu_handler import MockOnuHandler
from mock.mock_olt_handler import MockOltHandler
from mock.mock_onu import MockOnu
from voltha.extensions.omci.openomci_agent import OpenOMCIAgent, OpenOmciAgentDefaults
from voltha.extensions.omci.omci_defs import *
from common.utils.asleep import asleep
from voltha.extensions.omci.database.mib_db_api import DEVICE_ID_KEY, CLASS_ID_KEY, CREATED_KEY, \
MODIFIED_KEY, MDS_KEY, LAST_SYNC_KEY, VERSION_KEY, DatabaseStateError
from voltha.extensions.omci.database.mib_db_dict import MibDbVolatileDict
DEFAULT_OLT_DEVICE_ID = 'default_olt_mock'
DEFAULT_ONU_DEVICE_ID = 'default_onu_mock'
DEFAULT_PON_ID = 0
DEFAULT_ONU_ID = 0
DEFAULT_ONU_SN = 'TEST00000001'
OP = EntityOperations
RC = ReasonCodes
def chunk(indexable, chunk_size):
for i in range(0, len(indexable), chunk_size):
yield indexable[i:i + chunk_size]
def hex2raw(hex_string):
return ''.join(chr(int(byte, 16)) for byte in chunk(hex_string, 2))
class TestOnuDeviceEntry(TestCase):
def setUp(self):
self.adapter_agent = MockAdapterAgent()
custom = deepcopy(OpenOmciAgentDefaults)
custom['mib-synchronizer']['database'] = MibDbVolatileDict
self.agent = OpenOMCIAgent(MockCore, support_classes=custom)
self.agent.start()
def tearDown(self):
if self.agent is not None:
self.agent.stop()
if self.adapter_agent is not None:
self.adapter_agent.tearDown()
def setup_mock_olt(self, device_id=DEFAULT_OLT_DEVICE_ID):
handler = MockOltHandler(self.adapter_agent, device_id)
self.adapter_agent.add_device(handler.device)
return handler
def setup_mock_onu(self, parent_id=DEFAULT_OLT_DEVICE_ID,
device_id=DEFAULT_ONU_DEVICE_ID,
pon_id=DEFAULT_PON_ID,
onu_id=DEFAULT_ONU_ID,
serial_no=DEFAULT_ONU_SN):
handler = MockOnuHandler(self.adapter_agent, parent_id, device_id, pon_id, onu_id)
handler.serial_number = serial_no
onu = MockOnu(serial_no, self.adapter_agent, handler.device_id) \
if serial_no is not None else None
handler.onu_mock = onu
return handler
def setup_one_of_each(self):
self.olt_handler = self.setup_mock_olt()
self.onu_handler = self.setup_mock_onu(parent_id=self.olt_handler.device_id)
self.onu_device = self.onu_handler.onu_mock
self.adapter_agent.add_child_device(self.olt_handler.device,
self.onu_handler.device)
def test_add_remove_device(self):
self.setup_one_of_each()
self.assertEqual(len(self.agent.device_ids()), 0)
onu_device = self.agent.add_device(DEFAULT_ONU_DEVICE_ID,
self.adapter_agent)
self.assertIsNotNone(onu_device)
self.assertEqual(len(self.agent.device_ids()), 1)
self.assertEqual(self.agent.get_device(DEFAULT_ONU_DEVICE_ID), onu_device)
assert_raises(KeyError, onu_device.query_mib)
self.agent.remove_device(DEFAULT_ONU_DEVICE_ID)
self.assertEqual(len(self.agent.device_ids()), 1)
def test_delete_device(self):
self.setup_one_of_each()
self.assertEqual(len(self.agent.device_ids()), 0)
onu_device = self.agent.add_device(DEFAULT_ONU_DEVICE_ID,
self.adapter_agent)
self.assertIsNotNone(onu_device)
self.assertEqual(len(self.agent.device_ids()), 1)
self.assertEqual(self.agent.get_device(DEFAULT_ONU_DEVICE_ID), onu_device)
onu_device.delete()
self.assertEqual(len(self.agent.device_ids()), 0)
_device)
def not_called(_reason):
assert False, 'Should never be called'
def check_status(_results):
assert_raises(KeyError, onu_device.query_mib)
d = asleep(0.2)
d.addCallbacks(check_status, not_called)
return d
@deferred(timeout=5)
def test_mib_query_ok_if_dev_started(self):
self.setup_one_of_each()
onu_device = self.agent.add_device(DEFAULT_ONU_DEVICE_ID,
self.adapter_agent)
self.assertIsNotNone(onu_device)
self.assertEqual(len(self.agent.device_ids()), 1)
self.assertEqual(self.agent.get_device(DEFAULT_ONU_DEVICE_ID), onu_device)
def not_called(_reason):
onu_device.stop()
assert False, 'Should never be called'
def check_status(_results):
results = onu_device.query_mib()
self.assertTrue(isinstance(results, dict))
self.assertEqual(results.get(DEVICE_ID_KEY), DEFAULT_ONU_DEVICE_ID)
self.assertIsNotNone(results.get(VERSION_KEY))
self.assertIsNotNone(results.get(CREATED_KEY))
self.assertIsNone(results.get(MODIFIED_KEY))
self.assertEqual(results.get(MDS_KEY), 0)
self.assertIsNone(results.get(LAST_SYNC_KEY))
self.assertIsNone(results.get(CLASS_ID_KEY))
onu_device.stop()
results = onu_device.query_mib()
self.assertTrue(isinstance(results, dict))
onu_device.start()
d = asleep(0.2)
d.addCallbacks(check_status, not_called)
return d
@deferred(timeout=5)
def test_delete_scrubs_mib(self):
self.setup_one_of_each()
onu_device = self.agent.add_device(DEFAULT_ONU_DEVICE_ID,
self.adapter_agent)
self.assertIsNotNone(onu_device)
self.assertEqual(len(self.agent.device_ids()), 1)
self.assertEqual(self.agent.get_device(DEFAULT_ONU_DEVICE_ID), onu_device)
def not_called(_reason):
onu_device.stop()
assert False, 'Should never be called'
def check_status(_results):
# Device started. Query will succeed but nothing should be populated
# but the most basic items
results = onu_device.query_mib()
self.assertTrue(isinstance(results, dict))
self.assertEqual(results.get(DEVICE_ID_KEY), DEFAULT_ONU_DEVICE_ID)
# Delete should wipe out any MIB data. Note that a delete of a started
# or stopped ONU device is allowed. In this case we are deleting a
# started ONU Device
onu_device.delete()
assert_raises(Exception, onu_device.query_mib)
# TODO: When capabilities are supported, make sure capabilities get cleared as well
# Yield context so that MIB Database callLater runs. This is a waiting
# Async task from when the OpenOMCIAgent was started. But also start the
# device so that it's queued async state machines can run as well
onu_device.start()
d = asleep(0.2)
d.addCallbacks(check_status, not_called)
return d
if __name__ == '__main__':
main()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.