hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7f771b3debc80fb259fdf618bfb257bb5537f0e | 6,112 | py | Python | tests/app/organisation/test_invite_rest.py | tlwr/notifications-api | 88a6b7729edb9be41ce3e7c027f1452b7b6d00d2 | [
"MIT"
] | 1 | 2021-02-26T18:31:50.000Z | 2021-02-26T18:31:50.000Z | tests/app/organisation/test_invite_rest.py | tlwr/notifications-api | 88a6b7729edb9be41ce3e7c027f1452b7b6d00d2 | [
"MIT"
] | null | null | null | tests/app/organisation/test_invite_rest.py | tlwr/notifications-api | 88a6b7729edb9be41ce3e7c027f1452b7b6d00d2 | [
"MIT"
] | null | null | null | import pytest
from app.models import Notification, INVITE_PENDING
from tests.app.db import create_invited_org_user
@pytest.mark.parametrize('platform_admin, expected_invited_by', (
(True, 'The GOV.UK Notify team'),
(False, 'Test User')
))
@pytest.mark.parametrize('extra_args, expected_start_of_invite_url', [
(
{},
'http://localhost:6012/organisation-invitation/'
),
(
{'invite_link_host': 'https://www.example.com'},
'https://www.example.com/organisation-invitation/'
),
])
def test_create_invited_org_user(
admin_request,
sample_organisation,
sample_user,
mocker,
org_invite_email_template,
extra_args,
expected_start_of_invite_url,
platform_admin,
expected_invited_by,
):
mocked = mocker.patch('app.celery.provider_tasks.deliver_email.apply_async')
email_address = 'invited_user@example.com'
sample_user.platform_admin = platform_admin
data = dict(
organisation=str(sample_organisation.id),
email_address=email_address,
invited_by=str(sample_user.id),
**extra_args
)
json_resp = admin_request.post(
'organisation_invite.invite_user_to_org',
organisation_id=sample_organisation.id,
_data=data,
_expected_status=201
)
assert json_resp['data']['organisation'] == str(sample_organisation.id)
assert json_resp['data']['email_address'] == email_address
assert json_resp['data']['invited_by'] == str(sample_user.id)
assert json_resp['data']['status'] == INVITE_PENDING
assert json_resp['data']['id']
notification = Notification.query.first()
assert notification.reply_to_text == sample_user.email_address
assert len(notification.personalisation.keys()) == 3
assert notification.personalisation['organisation_name'] == 'sample organisation'
assert notification.personalisation['user_name'] == expected_invited_by
assert notification.personalisation['url'].startswith(expected_start_of_invite_url)
assert len(notification.personalisation['url']) > len(expected_start_of_invite_url)
mocked.assert_called_once_with([(str(notification.id))], queue="notify-internal-tasks")
def test_create_invited_user_invalid_email(admin_request, sample_organisation, sample_user, mocker):
mocked = mocker.patch('app.celery.provider_tasks.deliver_email.apply_async')
email_address = 'notanemail'
data = {
'service': str(sample_organisation.id),
'email_address': email_address,
'invited_by': str(sample_user.id),
}
json_resp = admin_request.post(
'organisation_invite.invite_user_to_org',
organisation_id=sample_organisation.id,
_data=data,
_expected_status=400
)
assert json_resp['errors'][0]['message'] == 'email_address Not a valid email address'
assert mocked.call_count == 0
def test_get_all_invited_users_by_service(admin_request, sample_organisation, sample_user):
for i in range(5):
create_invited_org_user(
sample_organisation,
sample_user,
email_address='invited_user_{}@service.gov.uk'.format(i)
)
json_resp = admin_request.get(
'organisation_invite.get_invited_org_users_by_organisation',
organisation_id=sample_organisation.id
)
assert len(json_resp['data']) == 5
for invite in json_resp['data']:
assert invite['organisation'] == str(sample_organisation.id)
assert invite['invited_by'] == str(sample_user.id)
assert invite['id']
def test_get_invited_users_by_service_with_no_invites(admin_request, sample_organisation):
json_resp = admin_request.get(
'organisation_invite.get_invited_org_users_by_organisation',
organisation_id=sample_organisation.id
)
assert len(json_resp['data']) == 0
def test_get_invited_user_by_organisation(admin_request, sample_invited_org_user):
json_resp = admin_request.get(
'organisation_invite.get_invited_org_user_by_organisation',
organisation_id=sample_invited_org_user.organisation.id,
invited_org_user_id=sample_invited_org_user.id
)
assert json_resp['data']['email_address'] == sample_invited_org_user.email_address
def test_get_invited_user_by_organisation_when_user_does_not_belong_to_the_org(
admin_request,
sample_invited_org_user,
fake_uuid,
):
json_resp = admin_request.get(
'organisation_invite.get_invited_org_user_by_organisation',
organisation_id=fake_uuid,
invited_org_user_id=sample_invited_org_user.id,
_expected_status=404
)
assert json_resp['result'] == 'error'
def test_update_org_invited_user_set_status_to_cancelled(admin_request, sample_invited_org_user):
data = {'status': 'cancelled'}
json_resp = admin_request.post(
'organisation_invite.update_org_invite_status',
organisation_id=sample_invited_org_user.organisation_id,
invited_org_user_id=sample_invited_org_user.id,
_data=data
)
assert json_resp['data']['status'] == 'cancelled'
def test_update_org_invited_user_for_wrong_service_returns_404(admin_request, sample_invited_org_user, fake_uuid):
data = {'status': 'cancelled'}
json_resp = admin_request.post(
'organisation_invite.update_org_invite_status',
organisation_id=fake_uuid,
invited_org_user_id=sample_invited_org_user.id,
_data=data,
_expected_status=404
)
assert json_resp['message'] == 'No result found'
def test_update_org_invited_user_for_invalid_data_returns_400(admin_request, sample_invited_org_user):
data = {'status': 'garbage'}
json_resp = admin_request.post(
'organisation_invite.update_org_invite_status',
organisation_id=sample_invited_org_user.organisation_id,
invited_org_user_id=sample_invited_org_user.id,
_data=data,
_expected_status=400
)
assert len(json_resp['errors']) == 1
assert json_resp['errors'][0]['message'] == 'status garbage is not one of [pending, accepted, cancelled]'
| 34.145251 | 114 | 0.724967 | import pytest
from app.models import Notification, INVITE_PENDING
from tests.app.db import create_invited_org_user
@pytest.mark.parametrize('platform_admin, expected_invited_by', (
(True, 'The GOV.UK Notify team'),
(False, 'Test User')
))
@pytest.mark.parametrize('extra_args, expected_start_of_invite_url', [
(
{},
'http://localhost:6012/organisation-invitation/'
),
(
{'invite_link_host': 'https://www.example.com'},
'https://www.example.com/organisation-invitation/'
),
])
def test_create_invited_org_user(
admin_request,
sample_organisation,
sample_user,
mocker,
org_invite_email_template,
extra_args,
expected_start_of_invite_url,
platform_admin,
expected_invited_by,
):
mocked = mocker.patch('app.celery.provider_tasks.deliver_email.apply_async')
email_address = 'invited_user@example.com'
sample_user.platform_admin = platform_admin
data = dict(
organisation=str(sample_organisation.id),
email_address=email_address,
invited_by=str(sample_user.id),
**extra_args
)
json_resp = admin_request.post(
'organisation_invite.invite_user_to_org',
organisation_id=sample_organisation.id,
_data=data,
_expected_status=201
)
assert json_resp['data']['organisation'] == str(sample_organisation.id)
assert json_resp['data']['email_address'] == email_address
assert json_resp['data']['invited_by'] == str(sample_user.id)
assert json_resp['data']['status'] == INVITE_PENDING
assert json_resp['data']['id']
notification = Notification.query.first()
assert notification.reply_to_text == sample_user.email_address
assert len(notification.personalisation.keys()) == 3
assert notification.personalisation['organisation_name'] == 'sample organisation'
assert notification.personalisation['user_name'] == expected_invited_by
assert notification.personalisation['url'].startswith(expected_start_of_invite_url)
assert len(notification.personalisation['url']) > len(expected_start_of_invite_url)
mocked.assert_called_once_with([(str(notification.id))], queue="notify-internal-tasks")
def test_create_invited_user_invalid_email(admin_request, sample_organisation, sample_user, mocker):
mocked = mocker.patch('app.celery.provider_tasks.deliver_email.apply_async')
email_address = 'notanemail'
data = {
'service': str(sample_organisation.id),
'email_address': email_address,
'invited_by': str(sample_user.id),
}
json_resp = admin_request.post(
'organisation_invite.invite_user_to_org',
organisation_id=sample_organisation.id,
_data=data,
_expected_status=400
)
assert json_resp['errors'][0]['message'] == 'email_address Not a valid email address'
assert mocked.call_count == 0
def test_get_all_invited_users_by_service(admin_request, sample_organisation, sample_user):
for i in range(5):
create_invited_org_user(
sample_organisation,
sample_user,
email_address='invited_user_{}@service.gov.uk'.format(i)
)
json_resp = admin_request.get(
'organisation_invite.get_invited_org_users_by_organisation',
organisation_id=sample_organisation.id
)
assert len(json_resp['data']) == 5
for invite in json_resp['data']:
assert invite['organisation'] == str(sample_organisation.id)
assert invite['invited_by'] == str(sample_user.id)
assert invite['id']
def test_get_invited_users_by_service_with_no_invites(admin_request, sample_organisation):
json_resp = admin_request.get(
'organisation_invite.get_invited_org_users_by_organisation',
organisation_id=sample_organisation.id
)
assert len(json_resp['data']) == 0
def test_get_invited_user_by_organisation(admin_request, sample_invited_org_user):
json_resp = admin_request.get(
'organisation_invite.get_invited_org_user_by_organisation',
organisation_id=sample_invited_org_user.organisation.id,
invited_org_user_id=sample_invited_org_user.id
)
assert json_resp['data']['email_address'] == sample_invited_org_user.email_address
def test_get_invited_user_by_organisation_when_user_does_not_belong_to_the_org(
admin_request,
sample_invited_org_user,
fake_uuid,
):
json_resp = admin_request.get(
'organisation_invite.get_invited_org_user_by_organisation',
organisation_id=fake_uuid,
invited_org_user_id=sample_invited_org_user.id,
_expected_status=404
)
assert json_resp['result'] == 'error'
def test_update_org_invited_user_set_status_to_cancelled(admin_request, sample_invited_org_user):
data = {'status': 'cancelled'}
json_resp = admin_request.post(
'organisation_invite.update_org_invite_status',
organisation_id=sample_invited_org_user.organisation_id,
invited_org_user_id=sample_invited_org_user.id,
_data=data
)
assert json_resp['data']['status'] == 'cancelled'
def test_update_org_invited_user_for_wrong_service_returns_404(admin_request, sample_invited_org_user, fake_uuid):
data = {'status': 'cancelled'}
json_resp = admin_request.post(
'organisation_invite.update_org_invite_status',
organisation_id=fake_uuid,
invited_org_user_id=sample_invited_org_user.id,
_data=data,
_expected_status=404
)
assert json_resp['message'] == 'No result found'
def test_update_org_invited_user_for_invalid_data_returns_400(admin_request, sample_invited_org_user):
data = {'status': 'garbage'}
json_resp = admin_request.post(
'organisation_invite.update_org_invite_status',
organisation_id=sample_invited_org_user.organisation_id,
invited_org_user_id=sample_invited_org_user.id,
_data=data,
_expected_status=400
)
assert len(json_resp['errors']) == 1
assert json_resp['errors'][0]['message'] == 'status garbage is not one of [pending, accepted, cancelled]'
| true | true |
f7f7730ed654766fc8591a43262a3e314e7b1197 | 6,013 | py | Python | language-modeling-master/src/nnlm-mc-drop.py | evazhang612/honygenerator | cafcf1736faba978ecaed624b949ebc1498477ee | [
"MIT"
] | 1 | 2019-03-14T16:34:35.000Z | 2019-03-14T16:34:35.000Z | language-modeling-master/src/nnlm-mc-drop.py | evazhang612/honygenerator | cafcf1736faba978ecaed624b949ebc1498477ee | [
"MIT"
] | 2 | 2019-02-12T01:17:16.000Z | 2019-02-12T01:17:20.000Z | language-modeling-master/src/nnlm-mc-drop.py | evazhang612/honygenerator | cafcf1736faba978ecaed624b949ebc1498477ee | [
"MIT"
] | null | null | null | import torchtext, random, torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from tqdm import tqdm
global use_cuda
use_cuda = torch.cuda.is_available()
device = 0 if use_cuda else -1
TEXT = torchtext.data.Field()
train, val, test = torchtext.datasets.LanguageModelingDataset.splits(path=".", train="train.txt", validation="valid.txt", test="valid.txt", text_field=TEXT)
TEXT.build_vocab(train, max_size=1000) if False else TEXT.build_vocab(train)
TEXT.vocab.load_vectors('glove.840B.300d')
train_iter, val_iter, test_iter = torchtext.data.BPTTIterator.splits((train, val, test), batch_size=10, device=device, bptt_len=32, repeat=False)
class LanguageModel(nn.Module):
""" neural network language model with multichannel embeddings and dropout """
def __init__(self, hidden_dim = 100, TEXT = TEXT):
super(LanguageModel, self).__init__()
vocab_size, embedding_dim = TEXT.vocab.vectors.shape
self.nonstatic_embeddings = nn.Embedding(vocab_size, embedding_dim)
self.nonstatic_embeddings.weight.data.copy_(TEXT.vocab.vectors)
self.static_embeddings = nn.Embedding(vocab_size, embedding_dim)
self.static_embeddings.weight.data.copy_(TEXT.vocab.vectors)
self.static_embeddings.weight.requires_grad = False
self.input2linear = nn.Linear(4*embedding_dim, hidden_dim)
self.linear2output = nn.Linear(hidden_dim, vocab_size)
self.dropout = nn.Dropout(p = 0.50)
def forward(self, x):
nonstatic_embedded, static_embedded = self.nonstatic_embeddings(x), self.static_embeddings(x)
dropped_nonstatic, dropped_static = self.dropout(nonstatic_embedded), self.dropout(static_embedded)
x_1 = torch.cat([dropped_nonstatic, dropped_static], dim = 2)
x_2 = x_1.view(len(x_1), -1)
x_3 = F.tanh(self.input2linear(x_2))
x_4 = self.linear2output(x_3)
x_5 = self.dropout(x_4)
logits = F.log_softmax(x_5, dim = 1)
return logits
def predict(self, x, TEXT = TEXT):
embedded = torch.cat([self.nonstatic_embeddings(x), self.static_embeddings(x)], dim = 1)
embedded = embedded.view(-1, 1).transpose(0,1)
activated = F.tanh(self.input2linear(embedded))
output = self.linear2output(activated)
logits = F.log_softmax(output, dim = 1)
out_ids = np.argsort(logits.data[0].tolist())[-20:][::-1]
out_words = ' '.join([TEXT.vocab.itos[out_id] for out_id in out_ids])
return out_words
class Trainer:
def __init__(self, train_iter, val_iter):
self.train_iter = train_iter
self.val_iter = val_iter
def string_to_batch(self, string):
relevant_split = string.split()[-2:] # last two words, ignore ___
ids = [self.word_to_id(word) for word in relevant_split]
if use_cuda:
return Variable(torch.LongTensor(ids)).cuda()
else:
return Variable(torch.LongTensor(ids))
def word_to_id(self, word, TEXT = TEXT):
return TEXT.vocab.stoi[word]
def batch_to_input(self, batch):
ngrams = self.collect_batch_ngrams(batch)
x = Variable(torch.LongTensor([ngram[:-1] for ngram in ngrams]))
y = Variable(torch.LongTensor([ngram[-1] for ngram in ngrams]))
if use_cuda:
return x.cuda(), y.cuda()
else:
return x, y
def collect_batch_ngrams(self, batch, n = 3):
data = batch.text.view(-1).data.tolist()
return [tuple(data[idx:idx + n]) for idx in range(0, len(data) - n + 1)]
def train_model(self, model, num_epochs):
parameters = filter(lambda p: p.requires_grad, model.parameters())
optimizer = torch.optim.Adam(params = parameters, lr=1e-3)
criterion = nn.NLLLoss()
for epoch in tqdm(range(num_epochs)):
model.train()
epoch_loss = []
for batch in self.train_iter:
x, y = self.batch_to_input(batch)
optimizer.zero_grad()
y_pred = model(x)
loss = criterion(y_pred, y)
loss.backward()
optimizer.step()
epoch_loss.append(loss.data[0])
model.eval()
train_ppl = np.exp(np.mean(epoch_loss))
val_ppl = self.validate(model)
print('Epoch {0} | Loss: {1} | Train PPL: {2} | Val PPL: {3}'.format(epoch+1, np.mean(epoch_loss), train_ppl, val_ppl))
print('Model trained.')
self.write_kaggle(model)
print('Output saved.')
def validate(self, model):
criterion = nn.NLLLoss()
aggregate_loss = []
for batch in self.val_iter:
x, y_t = self.batch_to_input(batch)
y_p = model(x)
loss = criterion(y_p, y_t)
aggregate_loss.append(loss.data[0])
val_ppl = np.exp(np.mean(aggregate_loss))
return val_ppl
def predict_sentence(self, string, model):
string = string[:-4]
x = self.string_to_batch(string)
out_words = model.predict(x)
return out_words
def write_kaggle(self, model, input_file = 'input.txt'):
inputs = open(input_file, 'r').read().splitlines()
outputs = [self.predict_sentence(sentence, model) for sentence in inputs]
with open('nnlm_multichannel_dropout_output.txt', 'w') as f:
f.write('id,word')
for idx, line in enumerate(outputs):
f.write('\n')
f.write(str(idx) + ',')
f.write(line)
model = LanguageModel(hidden_dim = 1024)
if use_cuda:
model.cuda()
trainer = Trainer(train_iter = train_iter, val_iter = val_iter)
trainer.train_model(model = model, num_epochs = 10)
| 37.58125 | 156 | 0.612839 | import torchtext, random, torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from tqdm import tqdm
global use_cuda
use_cuda = torch.cuda.is_available()
device = 0 if use_cuda else -1
TEXT = torchtext.data.Field()
train, val, test = torchtext.datasets.LanguageModelingDataset.splits(path=".", train="train.txt", validation="valid.txt", test="valid.txt", text_field=TEXT)
TEXT.build_vocab(train, max_size=1000) if False else TEXT.build_vocab(train)
TEXT.vocab.load_vectors('glove.840B.300d')
train_iter, val_iter, test_iter = torchtext.data.BPTTIterator.splits((train, val, test), batch_size=10, device=device, bptt_len=32, repeat=False)
class LanguageModel(nn.Module):
def __init__(self, hidden_dim = 100, TEXT = TEXT):
super(LanguageModel, self).__init__()
vocab_size, embedding_dim = TEXT.vocab.vectors.shape
self.nonstatic_embeddings = nn.Embedding(vocab_size, embedding_dim)
self.nonstatic_embeddings.weight.data.copy_(TEXT.vocab.vectors)
self.static_embeddings = nn.Embedding(vocab_size, embedding_dim)
self.static_embeddings.weight.data.copy_(TEXT.vocab.vectors)
self.static_embeddings.weight.requires_grad = False
self.input2linear = nn.Linear(4*embedding_dim, hidden_dim)
self.linear2output = nn.Linear(hidden_dim, vocab_size)
self.dropout = nn.Dropout(p = 0.50)
def forward(self, x):
nonstatic_embedded, static_embedded = self.nonstatic_embeddings(x), self.static_embeddings(x)
dropped_nonstatic, dropped_static = self.dropout(nonstatic_embedded), self.dropout(static_embedded)
x_1 = torch.cat([dropped_nonstatic, dropped_static], dim = 2)
x_2 = x_1.view(len(x_1), -1)
x_3 = F.tanh(self.input2linear(x_2))
x_4 = self.linear2output(x_3)
x_5 = self.dropout(x_4)
logits = F.log_softmax(x_5, dim = 1)
return logits
def predict(self, x, TEXT = TEXT):
embedded = torch.cat([self.nonstatic_embeddings(x), self.static_embeddings(x)], dim = 1)
embedded = embedded.view(-1, 1).transpose(0,1)
activated = F.tanh(self.input2linear(embedded))
output = self.linear2output(activated)
logits = F.log_softmax(output, dim = 1)
out_ids = np.argsort(logits.data[0].tolist())[-20:][::-1]
out_words = ' '.join([TEXT.vocab.itos[out_id] for out_id in out_ids])
return out_words
class Trainer:
def __init__(self, train_iter, val_iter):
self.train_iter = train_iter
self.val_iter = val_iter
def string_to_batch(self, string):
relevant_split = string.split()[-2:]
ids = [self.word_to_id(word) for word in relevant_split]
if use_cuda:
return Variable(torch.LongTensor(ids)).cuda()
else:
return Variable(torch.LongTensor(ids))
def word_to_id(self, word, TEXT = TEXT):
return TEXT.vocab.stoi[word]
def batch_to_input(self, batch):
ngrams = self.collect_batch_ngrams(batch)
x = Variable(torch.LongTensor([ngram[:-1] for ngram in ngrams]))
y = Variable(torch.LongTensor([ngram[-1] for ngram in ngrams]))
if use_cuda:
return x.cuda(), y.cuda()
else:
return x, y
def collect_batch_ngrams(self, batch, n = 3):
data = batch.text.view(-1).data.tolist()
return [tuple(data[idx:idx + n]) for idx in range(0, len(data) - n + 1)]
def train_model(self, model, num_epochs):
parameters = filter(lambda p: p.requires_grad, model.parameters())
optimizer = torch.optim.Adam(params = parameters, lr=1e-3)
criterion = nn.NLLLoss()
for epoch in tqdm(range(num_epochs)):
model.train()
epoch_loss = []
for batch in self.train_iter:
x, y = self.batch_to_input(batch)
optimizer.zero_grad()
y_pred = model(x)
loss = criterion(y_pred, y)
loss.backward()
optimizer.step()
epoch_loss.append(loss.data[0])
model.eval()
train_ppl = np.exp(np.mean(epoch_loss))
val_ppl = self.validate(model)
print('Epoch {0} | Loss: {1} | Train PPL: {2} | Val PPL: {3}'.format(epoch+1, np.mean(epoch_loss), train_ppl, val_ppl))
print('Model trained.')
self.write_kaggle(model)
print('Output saved.')
def validate(self, model):
criterion = nn.NLLLoss()
aggregate_loss = []
for batch in self.val_iter:
x, y_t = self.batch_to_input(batch)
y_p = model(x)
loss = criterion(y_p, y_t)
aggregate_loss.append(loss.data[0])
val_ppl = np.exp(np.mean(aggregate_loss))
return val_ppl
def predict_sentence(self, string, model):
string = string[:-4]
x = self.string_to_batch(string)
out_words = model.predict(x)
return out_words
def write_kaggle(self, model, input_file = 'input.txt'):
inputs = open(input_file, 'r').read().splitlines()
outputs = [self.predict_sentence(sentence, model) for sentence in inputs]
with open('nnlm_multichannel_dropout_output.txt', 'w') as f:
f.write('id,word')
for idx, line in enumerate(outputs):
f.write('\n')
f.write(str(idx) + ',')
f.write(line)
model = LanguageModel(hidden_dim = 1024)
if use_cuda:
model.cuda()
trainer = Trainer(train_iter = train_iter, val_iter = val_iter)
trainer.train_model(model = model, num_epochs = 10)
| true | true |
f7f774d6bed93d46f1b693b9d34fe372069428f2 | 6,077 | py | Python | exp2/exp2c.py | Haunter17/MIR_SU17 | 0eaefb8cab78ca896c1ed0074892c296110eb161 | [
"MIT"
] | null | null | null | exp2/exp2c.py | Haunter17/MIR_SU17 | 0eaefb8cab78ca896c1ed0074892c296110eb161 | [
"MIT"
] | 5 | 2017-05-22T20:32:07.000Z | 2017-06-02T21:06:23.000Z | exp2/exp2c.py | Haunter17/MIR_SU17 | 0eaefb8cab78ca896c1ed0074892c296110eb161 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
import h5py
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# Functions for initializing neural nets parameters
def init_weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def init_bias_variable(shape):
initial = tf.constant(0.1, shape=shape, dtype=tf.float32)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, [1, 1, 1, 1], 'VALID')
def loadData(filepath):
print('==> Experiment 2b')
print('==> Loading data from {}'.format(filepath))
# benchmark
t_start = time.time()
# reading data
f = h5py.File(filepath)
X_train = np.array(f.get('trainingFeatures'))
y_train = np.array(f.get('trainingLabels'))
X_val = np.array(f.get('validationFeatures'))
y_val = np.array(f.get('validationLabels'))
t_end = time.time()
print('--Time elapsed for loading data: {t:.2f} \
seconds'.format(t = t_end - t_start))
del f
print('-- Number of training samples: {}'.format(X_train.shape[0]))
print('-- Number of validation samples: {}'.format(X_val.shape[0]))
return [X_train, y_train, X_val, y_val]
def runNeuralNet(num_freq, X_train, y_train, X_val, y_val, batch_size, num_epochs, pooling_strategy):
# Neural-network model set-up
num_training_vec, total_features = X_train.shape
num_freq = 121
num_frames = int(total_features / num_freq)
print('-- Num frames: {}'.format(num_frames))
num_classes = int(max(y_train.max(), y_val.max()) + 1)
k1 = 32
k2 = 64
l = num_frames
print_freq = 1
# Transform labels into on-hot encoding form
y_train_OHEnc = tf.one_hot(y_train.copy(), num_classes)
y_val_OHEnc = tf.one_hot(y_val.copy(), num_classes)
# Set-up input and output label
x = tf.placeholder(tf.float32, [None, total_features])
y_ = tf.placeholder(tf.float32, [None, num_classes])
# first convolutional layer
W_conv1 = init_weight_variable([num_freq, 1, 1, k1])
b_conv1 = init_bias_variable([k1])
x_image = tf.reshape(x, [-1, num_freq, num_frames, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# second layer
W_conv2 = init_weight_variable([1, l, k1, k2])
b_conv2 = init_bias_variable([k2])
h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)
h_conv2_flat = tf.reshape(h_conv2, [-1, (num_frames - l + 1) * k2])
#h_pool2 = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# softmax layer
W_sm = init_weight_variable([(num_frames - l + 1) * k2, num_classes])
b_sm = init_bias_variable([num_classes])
y_conv = tf.matmul(h_conv2_flat, W_sm) + b_sm
# evaluations
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# get the gradients
#gradients = train_step.compute_gradients()
#gradients = train_step.compute_gradients(loss=cross_entropy, var_list=W_sm)
# session
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
y_train = sess.run(y_train_OHEnc)[:, 0, :]
y_val = sess.run(y_val_OHEnc)[:, 0, :]
train_acc_list = []
val_acc_list = []
train_err_list = []
val_err_list = []
epoch_numbers = []
# benchmark
t_start = time.time()
for epoch in range(num_epochs):
epochStart = time.time()
for i in range(0, num_training_vec, batch_size):
batch_end_point = min(i + batch_size, num_training_vec)
train_batch_data = X_train[i : batch_end_point]
train_batch_label = y_train[i : batch_end_point]
train_step.run(feed_dict={x: train_batch_data, y_: train_batch_label})
epochEnd = time.time()
# printing and recording data
if (epoch + 1) % print_freq == 0:
train_acc = accuracy.eval(feed_dict={x:X_train, y_: y_train})
train_acc_list.append(train_acc)
val_acc = accuracy.eval(feed_dict={x: X_val, y_: y_val})
val_acc_list.append(val_acc)
train_err = cross_entropy.eval(feed_dict={x: X_train, y_: y_train})
train_err_list.append(train_err)
val_err = cross_entropy.eval(feed_dict={x: X_val, y_: y_val})
val_err_list.append(val_err)
epoch_numbers += [epoch]
#print("-- epoch: %d, training error %g"%(epoch + 1, train_err))
print("epoch: %d, time: %g, t acc, v acc, t cost, v cost: %g, %g, %g, %g"%(epoch+1, epochEnd - epochStart, train_acc, val_acc, train_err, val_err))
# print out the gradients
#print(gradients.eval(feed_dict={x:X_train, y_: y_train}))
t_end = time.time()
print('--Time elapsed for training: {t:.2f} \
seconds'.format(t = t_end - t_start))
return [train_acc_list, val_acc_list, train_err_list, val_err_list, epoch_numbers]
'''
Our Main
Command Line Arguments: (1) Length of horizontal window
'''
# load the data
[X_train, y_train, X_val, y_val] = loadData('/pylon2/ci560sp/cstrong/exp1/taylorswift_smallDataset_71_7.mat')
batchSize = 500
numEpochs = 250
poolingStrategy = 'MAX'
[train_acc_list, val_acc_list, train_err_list, val_err_list, epoch_numbers] = runNeuralNet(121, X_train, y_train, X_val, y_val, batchSize, numEpochs, poolingStrategy)
# Reports
print('-- Training accuracy: {:.4f}'.format(train_acc_list[-1]))
print('-- Validation accuracy: {:.4f}'.format(val_acc_list[-1]))
print('-- Training error: {:.4E}'.format(train_err_list[-1]))
print('-- Validation error: {:.4E}'.format(val_err_list[-1]))
print('==> Generating error plot...')
x_list = epoch_numbers
train_err_plot, = plt.plot(x_list, train_err_list, 'b.')
val_err_plot, = plt.plot(x_list, val_err_list, '.', color='orange')
plt.xlabel('Number of epochs')
plt.ylabel('Cross-Entropy Error')
plt.title('Error vs Number of Epochs')
plt.legend((train_err_plot, val_err_plot), ('training', 'validation'), loc='best')
plt.savefig('exp2c_k1=32_k2=64.png', format='png')
plt.close()
print('==> Done.')
| 34.333333 | 166 | 0.700675 | import numpy as np
import tensorflow as tf
import h5py
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def init_weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def init_bias_variable(shape):
initial = tf.constant(0.1, shape=shape, dtype=tf.float32)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, [1, 1, 1, 1], 'VALID')
def loadData(filepath):
print('==> Experiment 2b')
print('==> Loading data from {}'.format(filepath))
t_start = time.time()
f = h5py.File(filepath)
X_train = np.array(f.get('trainingFeatures'))
y_train = np.array(f.get('trainingLabels'))
X_val = np.array(f.get('validationFeatures'))
y_val = np.array(f.get('validationLabels'))
t_end = time.time()
print('--Time elapsed for loading data: {t:.2f} \
seconds'.format(t = t_end - t_start))
del f
print('-- Number of training samples: {}'.format(X_train.shape[0]))
print('-- Number of validation samples: {}'.format(X_val.shape[0]))
return [X_train, y_train, X_val, y_val]
def runNeuralNet(num_freq, X_train, y_train, X_val, y_val, batch_size, num_epochs, pooling_strategy):
num_training_vec, total_features = X_train.shape
num_freq = 121
num_frames = int(total_features / num_freq)
print('-- Num frames: {}'.format(num_frames))
num_classes = int(max(y_train.max(), y_val.max()) + 1)
k1 = 32
k2 = 64
l = num_frames
print_freq = 1
y_train_OHEnc = tf.one_hot(y_train.copy(), num_classes)
y_val_OHEnc = tf.one_hot(y_val.copy(), num_classes)
x = tf.placeholder(tf.float32, [None, total_features])
y_ = tf.placeholder(tf.float32, [None, num_classes])
W_conv1 = init_weight_variable([num_freq, 1, 1, k1])
b_conv1 = init_bias_variable([k1])
x_image = tf.reshape(x, [-1, num_freq, num_frames, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
W_conv2 = init_weight_variable([1, l, k1, k2])
b_conv2 = init_bias_variable([k2])
h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)
h_conv2_flat = tf.reshape(h_conv2, [-1, (num_frames - l + 1) * k2])
W_sm = init_weight_variable([(num_frames - l + 1) * k2, num_classes])
b_sm = init_bias_variable([num_classes])
y_conv = tf.matmul(h_conv2_flat, W_sm) + b_sm
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
y_train = sess.run(y_train_OHEnc)[:, 0, :]
y_val = sess.run(y_val_OHEnc)[:, 0, :]
train_acc_list = []
val_acc_list = []
train_err_list = []
val_err_list = []
epoch_numbers = []
t_start = time.time()
for epoch in range(num_epochs):
epochStart = time.time()
for i in range(0, num_training_vec, batch_size):
batch_end_point = min(i + batch_size, num_training_vec)
train_batch_data = X_train[i : batch_end_point]
train_batch_label = y_train[i : batch_end_point]
train_step.run(feed_dict={x: train_batch_data, y_: train_batch_label})
epochEnd = time.time()
if (epoch + 1) % print_freq == 0:
train_acc = accuracy.eval(feed_dict={x:X_train, y_: y_train})
train_acc_list.append(train_acc)
val_acc = accuracy.eval(feed_dict={x: X_val, y_: y_val})
val_acc_list.append(val_acc)
train_err = cross_entropy.eval(feed_dict={x: X_train, y_: y_train})
train_err_list.append(train_err)
val_err = cross_entropy.eval(feed_dict={x: X_val, y_: y_val})
val_err_list.append(val_err)
epoch_numbers += [epoch]
print("epoch: %d, time: %g, t acc, v acc, t cost, v cost: %g, %g, %g, %g"%(epoch+1, epochEnd - epochStart, train_acc, val_acc, train_err, val_err))
t_end = time.time()
print('--Time elapsed for training: {t:.2f} \
seconds'.format(t = t_end - t_start))
return [train_acc_list, val_acc_list, train_err_list, val_err_list, epoch_numbers]
[X_train, y_train, X_val, y_val] = loadData('/pylon2/ci560sp/cstrong/exp1/taylorswift_smallDataset_71_7.mat')
batchSize = 500
numEpochs = 250
poolingStrategy = 'MAX'
[train_acc_list, val_acc_list, train_err_list, val_err_list, epoch_numbers] = runNeuralNet(121, X_train, y_train, X_val, y_val, batchSize, numEpochs, poolingStrategy)
print('-- Training accuracy: {:.4f}'.format(train_acc_list[-1]))
print('-- Validation accuracy: {:.4f}'.format(val_acc_list[-1]))
print('-- Training error: {:.4E}'.format(train_err_list[-1]))
print('-- Validation error: {:.4E}'.format(val_err_list[-1]))
print('==> Generating error plot...')
x_list = epoch_numbers
train_err_plot, = plt.plot(x_list, train_err_list, 'b.')
val_err_plot, = plt.plot(x_list, val_err_list, '.', color='orange')
plt.xlabel('Number of epochs')
plt.ylabel('Cross-Entropy Error')
plt.title('Error vs Number of Epochs')
plt.legend((train_err_plot, val_err_plot), ('training', 'validation'), loc='best')
plt.savefig('exp2c_k1=32_k2=64.png', format='png')
plt.close()
print('==> Done.')
| true | true |
f7f775b27adaf1cc72af5063a7e020c1711073c4 | 3,568 | py | Python | web-server/check-server.py | valgarn/fraud-detection-framework | 52ce63a41af42de541354f32a3fb4bae773f2f86 | [
"Apache-2.0"
] | null | null | null | web-server/check-server.py | valgarn/fraud-detection-framework | 52ce63a41af42de541354f32a3fb4bae773f2f86 | [
"Apache-2.0"
] | null | null | null | web-server/check-server.py | valgarn/fraud-detection-framework | 52ce63a41af42de541354f32a3fb4bae773f2f86 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Fraud Detection Framework Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND< either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
import sys
import threading
import time
import json
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
import pg
import constants
sys.path.append(constants.FDF_PYD_PATH)
os.environ["PATH"] += ";" + constants.FDF_PYD_PATH
import fdf
from MultiProcessingLog import MultiProcessingLog
logging.getLogger().addHandler(MultiProcessingLog("check-server.txt", "a", 0, 0))
class CheckServer(object):
def __init__(self):
self.info = fdf.Photos.All.INFO()
self.cnn = fdf.Photos.All.CNN(json.dumps({
"modelPath": "./fdf/m88-1.pb",
"modelInput": "input_input",
"modelOutput": "softmax_tensor/Softmax" }))
self.pca = fdf.Photos.All.PCA()
self.benford = fdf.Photos.Jpeg.BENFORD()
self.exif = fdf.Photos.Jpeg.EXIF()
self.quality = fdf.Photos.Jpeg.QUALITY()
def check(self):
while True:
task = (None, None, None, None)
try:
if os.path.exists("server.stop"):
exit()
task = pg.getTask()
if not task[0] is None:
methods = json.loads(task[4]) if not task[4] is None and task[4].lower() != "null" else ["info", "cnn", "pca", "benford", "exif", "quality"]
result = dict()
for m in methods:
if m == "info":
result[m] = self.info.check(task[0]).as_dict
elif m == "cnn":
result[m] = self.cnn.check(task[0]).as_dict
elif m == "pca":
result[m] = self.pca.check(task[0]).as_dict
elif m == "benford":
result[m] = self.benford.check(task[0]).as_dict
elif m == "exif":
result[m] = self.exif.check(task[0]).as_dict
elif m == "quality":
result[m] = self.quality.check(task[0]).as_dict
pg.setTaskResult(task[2], json.dumps(result), constants.STATUS_COMPLETED)
except Exception as e:
message = "CHECK EXCEPTION: {}".format(json.dumps({
"exception": MultiProcessingLog.exception2string(e),
"user_id": task[3],
"source_id": task[1],
"task_id": task[2]
}))
logging.info(message)
pg.addMessage(message, constants.MESSAGE_TYPE_ERROR, task[3])
finally:
time.sleep(3)
def run(self):
checkThread = threading.Thread(target=self.check, args=())
checkThread.start()
if __name__ == "__main__":
CheckServer().run()
| 39.644444 | 161 | 0.54204 |
import os
import sys
import threading
import time
import json
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
import pg
import constants
sys.path.append(constants.FDF_PYD_PATH)
os.environ["PATH"] += ";" + constants.FDF_PYD_PATH
import fdf
from MultiProcessingLog import MultiProcessingLog
logging.getLogger().addHandler(MultiProcessingLog("check-server.txt", "a", 0, 0))
class CheckServer(object):
def __init__(self):
self.info = fdf.Photos.All.INFO()
self.cnn = fdf.Photos.All.CNN(json.dumps({
"modelPath": "./fdf/m88-1.pb",
"modelInput": "input_input",
"modelOutput": "softmax_tensor/Softmax" }))
self.pca = fdf.Photos.All.PCA()
self.benford = fdf.Photos.Jpeg.BENFORD()
self.exif = fdf.Photos.Jpeg.EXIF()
self.quality = fdf.Photos.Jpeg.QUALITY()
def check(self):
while True:
task = (None, None, None, None)
try:
if os.path.exists("server.stop"):
exit()
task = pg.getTask()
if not task[0] is None:
methods = json.loads(task[4]) if not task[4] is None and task[4].lower() != "null" else ["info", "cnn", "pca", "benford", "exif", "quality"]
result = dict()
for m in methods:
if m == "info":
result[m] = self.info.check(task[0]).as_dict
elif m == "cnn":
result[m] = self.cnn.check(task[0]).as_dict
elif m == "pca":
result[m] = self.pca.check(task[0]).as_dict
elif m == "benford":
result[m] = self.benford.check(task[0]).as_dict
elif m == "exif":
result[m] = self.exif.check(task[0]).as_dict
elif m == "quality":
result[m] = self.quality.check(task[0]).as_dict
pg.setTaskResult(task[2], json.dumps(result), constants.STATUS_COMPLETED)
except Exception as e:
message = "CHECK EXCEPTION: {}".format(json.dumps({
"exception": MultiProcessingLog.exception2string(e),
"user_id": task[3],
"source_id": task[1],
"task_id": task[2]
}))
logging.info(message)
pg.addMessage(message, constants.MESSAGE_TYPE_ERROR, task[3])
finally:
time.sleep(3)
def run(self):
checkThread = threading.Thread(target=self.check, args=())
checkThread.start()
if __name__ == "__main__":
CheckServer().run()
| true | true |
f7f776ca8bdbe9b7bcd89a66ae42b9de6fc38491 | 10,706 | py | Python | testrunner.py | wradlib/wradlib-old | 55b1687adafd4d0d8c301d6c6e52914df5844748 | [
"MIT"
] | null | null | null | testrunner.py | wradlib/wradlib-old | 55b1687adafd4d0d8c301d6c6e52914df5844748 | [
"MIT"
] | null | null | null | testrunner.py | wradlib/wradlib-old | 55b1687adafd4d0d8c301d6c6e52914df5844748 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2017, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
import sys
import os
import io
import getopt
import unittest
import doctest
import inspect
from multiprocessing import Process, Queue
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import coverage
VERBOSE = 2
def create_examples_testsuite():
# gather information on examples
# all functions inside the examples starting with 'ex_' or 'recipe_'
# are considered as tests
# find example files in examples directory
root_dir = 'examples/'
files = []
skip = ['__init__.py']
for root, _, filenames in os.walk(root_dir):
for filename in filenames:
if filename in skip or filename[-3:] != '.py':
continue
if 'examples/data' in root:
continue
f = os.path.join(root, filename)
f = f.replace('/', '.')
f = f[:-3]
files.append(f)
# create empty testsuite
suite = unittest.TestSuite()
# find matching functions in
for idx, module in enumerate(files):
module1, func = module.split('.')
module = __import__(module)
func = getattr(module, func)
funcs = inspect.getmembers(func, inspect.isfunction)
[suite.addTest(unittest.FunctionTestCase(v))
for k, v in funcs if k.startswith(("ex_", "recipe_"))]
return suite
class NotebookTest(unittest.TestCase):
def __init__(self, nbfile, cov):
super(NotebookTest, self).__init__()
self.nbfile = nbfile
self.cov = cov
def id(self):
return self.nbfile
def runTest(self):
print(self.id())
kernel = 'python%d' % sys.version_info[0]
cur_dir = os.path.dirname(self.nbfile)
with open(self.nbfile) as f:
nb = nbformat.read(f, as_version=4)
if self.cov:
covdict = {'cell_type': 'code', 'execution_count': 1,
'metadata': {'collapsed': True}, 'outputs': [],
'nbsphinx': 'hidden',
'source': 'import coverage\n'
'coverage.process_startup()\n'
'import sys\n'
'sys.path.append("{0}")\n'.format(cur_dir)
}
nb['cells'].insert(0, nbformat.from_dict(covdict))
exproc = ExecutePreprocessor(kernel_name=kernel, timeout=500)
try:
run_dir = os.getenv('WRADLIB_BUILD_DIR', cur_dir)
exproc.preprocess(nb, {'metadata': {'path': run_dir}})
except CellExecutionError as e:
raise e
if self.cov:
nb['cells'].pop(0)
with io.open(self.nbfile, 'wt') as f:
nbformat.write(nb, f)
self.assertTrue(True)
def create_notebooks_testsuite(**kwargs):
# gather information on notebooks
# all notebooks in the notebooks folder
# are considered as tests
# find notebook files in notebooks directory
cov = kwargs.pop('cov')
root_dir = os.getenv('WRADLIB_NOTEBOOKS', 'notebooks')
files = []
skip = []
for root, _, filenames in os.walk(root_dir):
for filename in filenames:
if filename in skip or filename[-6:] != '.ipynb':
continue
# skip checkpoints
if '/.' in root:
continue
f = os.path.join(root, filename)
files.append(f)
# create one TestSuite per Notebook to treat testrunners
# memory overconsumption on travis-ci
suites = []
for file in files:
suite = unittest.TestSuite()
suite.addTest(NotebookTest(file, cov))
suites.append(suite)
return suites
def create_doctest_testsuite():
# gather information on doctests, search in only wradlib folder
root_dir = 'wradlib/'
files = []
skip = ['__init__.py', 'version.py', 'bufr.py', 'test_']
for root, _, filenames in os.walk(root_dir):
for filename in filenames:
if filename in skip or filename[-3:] != '.py':
continue
if 'wradlib/tests' in root:
continue
f = os.path.join(root, filename)
f = f.replace('/', '.')
f = f[:-3]
files.append(f)
# put modules in doctest suite
suite = unittest.TestSuite()
for module in files:
suite.addTest(doctest.DocTestSuite(module))
return suite
def create_unittest_testsuite():
# gather information on tests (unittest etc)
root_dir = 'wradlib/tests/'
return unittest.defaultTestLoader.discover(root_dir)
def single_suite_process(queue, test, verbosity, **kwargs):
test_cov = kwargs.pop('coverage', 0)
test_nb = kwargs.pop('notebooks', 0)
if test_cov and not test_nb:
cov = coverage.coverage()
cov.start()
all_success = 1
for ts in test:
if ts.countTestCases() != 0:
res = unittest.TextTestRunner(verbosity=verbosity).run(ts)
all_success = all_success & res.wasSuccessful()
if test_cov and not test_nb:
cov.stop()
cov.save()
queue.put(all_success)
def keep_tests(suite, arg):
newsuite = unittest.TestSuite()
try:
for tc in suite:
try:
if tc.id().find(arg) != -1:
newsuite.addTest(tc)
except AttributeError:
new = keep_tests(tc, arg)
if new.countTestCases() != 0:
newsuite.addTest(new)
except TypeError:
pass
return newsuite
def main(args):
usage_message = """Usage: python testrunner.py options arg
If run without options, testrunner displays the usage message.
If all tests suites should be run,, use the -a option.
If arg is given, only tests containing arg are run.
options:
-a
--all
Run all tests (examples, test, doctest, notebooks)
-m
Run all tests within a single testsuite [default]
-M
Run each suite as separate instance
-e
--example
Run only examples tests
-d
--doc
Run only doctests
-u
--unit
Run only unit test
-n
--notebook
Run only notebook test
-s
--use-subprocess
Run every testsuite in a subprocess.
-c
--coverage
Run notebook tests with code coverage
-v level
Set the level of verbosity.
0 - Silent
1 - Quiet (produces a dot for each succesful test)
2 - Verbose (default - produces a line of output for each test)
-h
Display usage information.
"""
test_all = 0
test_examples = 0
test_docs = 0
test_notebooks = 0
test_units = 0
test_subprocess = 0
test_cov = 0
verbosity = VERBOSE
try:
options, arg = getopt.getopt(args, 'aednuschv:',
['all', 'example', 'doc',
'notebook', 'unit', 'use-subprocess',
'coverage', 'help'])
except getopt.GetoptError as e:
err_exit(e.msg)
if not options:
err_exit(usage_message)
for name, value in options:
if name in ('-a', '--all'):
test_all = 1
elif name in ('-e', '--example'):
test_examples = 1
elif name in ('-d', '--doc'):
test_docs = 1
elif name in ('-n', '--notebook'):
test_notebooks = 1
elif name in ('-u', '--unit'):
test_units = 1
elif name in ('-s', '--use-subprocess'):
test_subprocess = 1
elif name in ('-c', '--coverage'):
test_cov = 1
elif name in ('-h', '--help'):
err_exit(usage_message, 0)
elif name == '-v':
verbosity = int(value)
else:
err_exit(usage_message)
if not (test_all or test_examples or test_docs or
test_notebooks or test_units):
err_exit('must specify one of: -a -e -d -n -u')
# change to main package path, where testrunner.py lives
path = os.path.dirname(__file__)
if path:
os.chdir(path)
testSuite = []
if test_all:
testSuite.append(create_examples_testsuite())
testSuite.append(create_notebooks_testsuite(cov=test_cov))
testSuite.append(create_doctest_testsuite())
testSuite.append(create_unittest_testsuite())
elif test_examples:
testSuite.append(create_examples_testsuite())
elif test_notebooks:
testSuite.append(create_notebooks_testsuite(cov=test_cov))
elif test_docs:
testSuite.append(unittest.TestSuite(create_doctest_testsuite()))
elif test_units:
testSuite.append(create_unittest_testsuite())
all_success = 1
if test_subprocess:
for test in testSuite:
if arg:
test = keep_tests(test, arg[0])
queue = Queue()
keywords = {'coverage': test_cov, 'notebooks': test_notebooks}
proc = Process(target=single_suite_process,
args=(queue, test, verbosity),
kwargs=keywords)
proc.start()
result = queue.get()
proc.join()
# all_success should be 0 in the end
all_success = all_success & result
else:
if test_cov and not test_notebooks:
cov = coverage.coverage()
cov.start()
for ts in testSuite:
if arg:
ts = keep_tests(ts, arg[0])
for test in ts:
if test.countTestCases() != 0:
result = unittest.TextTestRunner(verbosity=verbosity).\
run(test)
# all_success should be 0 in the end
all_success = all_success & result.wasSuccessful()
if test_cov and not test_notebooks:
cov.stop()
cov.save()
if all_success:
sys.exit(0)
else:
# This will return exit code 1
sys.exit("At least one test has failed. "
"Please see test report for details.")
def err_exit(message, rc=2):
sys.stderr.write("\n%s\n" % message)
sys.exit(rc)
if __name__ == '__main__':
main(sys.argv[1:])
| 29.905028 | 79 | 0.558752 |
import sys
import os
import io
import getopt
import unittest
import doctest
import inspect
from multiprocessing import Process, Queue
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import coverage
VERBOSE = 2
def create_examples_testsuite():
root_dir = 'examples/'
files = []
skip = ['__init__.py']
for root, _, filenames in os.walk(root_dir):
for filename in filenames:
if filename in skip or filename[-3:] != '.py':
continue
if 'examples/data' in root:
continue
f = os.path.join(root, filename)
f = f.replace('/', '.')
f = f[:-3]
files.append(f)
suite = unittest.TestSuite()
for idx, module in enumerate(files):
module1, func = module.split('.')
module = __import__(module)
func = getattr(module, func)
funcs = inspect.getmembers(func, inspect.isfunction)
[suite.addTest(unittest.FunctionTestCase(v))
for k, v in funcs if k.startswith(("ex_", "recipe_"))]
return suite
class NotebookTest(unittest.TestCase):
def __init__(self, nbfile, cov):
super(NotebookTest, self).__init__()
self.nbfile = nbfile
self.cov = cov
def id(self):
return self.nbfile
def runTest(self):
print(self.id())
kernel = 'python%d' % sys.version_info[0]
cur_dir = os.path.dirname(self.nbfile)
with open(self.nbfile) as f:
nb = nbformat.read(f, as_version=4)
if self.cov:
covdict = {'cell_type': 'code', 'execution_count': 1,
'metadata': {'collapsed': True}, 'outputs': [],
'nbsphinx': 'hidden',
'source': 'import coverage\n'
'coverage.process_startup()\n'
'import sys\n'
'sys.path.append("{0}")\n'.format(cur_dir)
}
nb['cells'].insert(0, nbformat.from_dict(covdict))
exproc = ExecutePreprocessor(kernel_name=kernel, timeout=500)
try:
run_dir = os.getenv('WRADLIB_BUILD_DIR', cur_dir)
exproc.preprocess(nb, {'metadata': {'path': run_dir}})
except CellExecutionError as e:
raise e
if self.cov:
nb['cells'].pop(0)
with io.open(self.nbfile, 'wt') as f:
nbformat.write(nb, f)
self.assertTrue(True)
def create_notebooks_testsuite(**kwargs):
cov = kwargs.pop('cov')
root_dir = os.getenv('WRADLIB_NOTEBOOKS', 'notebooks')
files = []
skip = []
for root, _, filenames in os.walk(root_dir):
for filename in filenames:
if filename in skip or filename[-6:] != '.ipynb':
continue
if '/.' in root:
continue
f = os.path.join(root, filename)
files.append(f)
suites = []
for file in files:
suite = unittest.TestSuite()
suite.addTest(NotebookTest(file, cov))
suites.append(suite)
return suites
def create_doctest_testsuite():
root_dir = 'wradlib/'
files = []
skip = ['__init__.py', 'version.py', 'bufr.py', 'test_']
for root, _, filenames in os.walk(root_dir):
for filename in filenames:
if filename in skip or filename[-3:] != '.py':
continue
if 'wradlib/tests' in root:
continue
f = os.path.join(root, filename)
f = f.replace('/', '.')
f = f[:-3]
files.append(f)
suite = unittest.TestSuite()
for module in files:
suite.addTest(doctest.DocTestSuite(module))
return suite
def create_unittest_testsuite():
root_dir = 'wradlib/tests/'
return unittest.defaultTestLoader.discover(root_dir)
def single_suite_process(queue, test, verbosity, **kwargs):
test_cov = kwargs.pop('coverage', 0)
test_nb = kwargs.pop('notebooks', 0)
if test_cov and not test_nb:
cov = coverage.coverage()
cov.start()
all_success = 1
for ts in test:
if ts.countTestCases() != 0:
res = unittest.TextTestRunner(verbosity=verbosity).run(ts)
all_success = all_success & res.wasSuccessful()
if test_cov and not test_nb:
cov.stop()
cov.save()
queue.put(all_success)
def keep_tests(suite, arg):
newsuite = unittest.TestSuite()
try:
for tc in suite:
try:
if tc.id().find(arg) != -1:
newsuite.addTest(tc)
except AttributeError:
new = keep_tests(tc, arg)
if new.countTestCases() != 0:
newsuite.addTest(new)
except TypeError:
pass
return newsuite
def main(args):
usage_message = """Usage: python testrunner.py options arg
If run without options, testrunner displays the usage message.
If all tests suites should be run,, use the -a option.
If arg is given, only tests containing arg are run.
options:
-a
--all
Run all tests (examples, test, doctest, notebooks)
-m
Run all tests within a single testsuite [default]
-M
Run each suite as separate instance
-e
--example
Run only examples tests
-d
--doc
Run only doctests
-u
--unit
Run only unit test
-n
--notebook
Run only notebook test
-s
--use-subprocess
Run every testsuite in a subprocess.
-c
--coverage
Run notebook tests with code coverage
-v level
Set the level of verbosity.
0 - Silent
1 - Quiet (produces a dot for each succesful test)
2 - Verbose (default - produces a line of output for each test)
-h
Display usage information.
"""
test_all = 0
test_examples = 0
test_docs = 0
test_notebooks = 0
test_units = 0
test_subprocess = 0
test_cov = 0
verbosity = VERBOSE
try:
options, arg = getopt.getopt(args, 'aednuschv:',
['all', 'example', 'doc',
'notebook', 'unit', 'use-subprocess',
'coverage', 'help'])
except getopt.GetoptError as e:
err_exit(e.msg)
if not options:
err_exit(usage_message)
for name, value in options:
if name in ('-a', '--all'):
test_all = 1
elif name in ('-e', '--example'):
test_examples = 1
elif name in ('-d', '--doc'):
test_docs = 1
elif name in ('-n', '--notebook'):
test_notebooks = 1
elif name in ('-u', '--unit'):
test_units = 1
elif name in ('-s', '--use-subprocess'):
test_subprocess = 1
elif name in ('-c', '--coverage'):
test_cov = 1
elif name in ('-h', '--help'):
err_exit(usage_message, 0)
elif name == '-v':
verbosity = int(value)
else:
err_exit(usage_message)
if not (test_all or test_examples or test_docs or
test_notebooks or test_units):
err_exit('must specify one of: -a -e -d -n -u')
path = os.path.dirname(__file__)
if path:
os.chdir(path)
testSuite = []
if test_all:
testSuite.append(create_examples_testsuite())
testSuite.append(create_notebooks_testsuite(cov=test_cov))
testSuite.append(create_doctest_testsuite())
testSuite.append(create_unittest_testsuite())
elif test_examples:
testSuite.append(create_examples_testsuite())
elif test_notebooks:
testSuite.append(create_notebooks_testsuite(cov=test_cov))
elif test_docs:
testSuite.append(unittest.TestSuite(create_doctest_testsuite()))
elif test_units:
testSuite.append(create_unittest_testsuite())
all_success = 1
if test_subprocess:
for test in testSuite:
if arg:
test = keep_tests(test, arg[0])
queue = Queue()
keywords = {'coverage': test_cov, 'notebooks': test_notebooks}
proc = Process(target=single_suite_process,
args=(queue, test, verbosity),
kwargs=keywords)
proc.start()
result = queue.get()
proc.join()
all_success = all_success & result
else:
if test_cov and not test_notebooks:
cov = coverage.coverage()
cov.start()
for ts in testSuite:
if arg:
ts = keep_tests(ts, arg[0])
for test in ts:
if test.countTestCases() != 0:
result = unittest.TextTestRunner(verbosity=verbosity).\
run(test)
all_success = all_success & result.wasSuccessful()
if test_cov and not test_notebooks:
cov.stop()
cov.save()
if all_success:
sys.exit(0)
else:
sys.exit("At least one test has failed. "
"Please see test report for details.")
def err_exit(message, rc=2):
sys.stderr.write("\n%s\n" % message)
sys.exit(rc)
if __name__ == '__main__':
main(sys.argv[1:])
| true | true |
f7f77732429e2d576a7ce999b4f86a8fe4d1d29c | 2,457 | py | Python | utils.py | michellesima/transformers | 327b32277d882e13b104dd8502a83a8668ea84e9 | [
"Apache-2.0"
] | 1 | 2020-04-16T18:31:04.000Z | 2020-04-16T18:31:04.000Z | utils.py | michellesima/transformers | 327b32277d882e13b104dd8502a83a8668ea84e9 | [
"Apache-2.0"
] | null | null | null | utils.py | michellesima/transformers | 327b32277d882e13b104dd8502a83a8668ea84e9 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
from nltk.stem import WordNetLemmatizer
from transformers import *
def repeatN(list, n):
ori = list
for _ in range(n):
list = list.append(ori, ignore_index=True)
return list
def agen_verbs():
'''
for word in each category, get its infinitive form if it's in verb.txt
for short phrases like 'apply to', only the first word is considered
Note: 24 words not in verb.txt
'''
df = pd.read_csv('~/resources/lexica/CONNOTATION/agency_verb.csv')
agen_v = {}
total = 0
cats = {'+': 'pos', '-':'neg', '=':'equal'}
for k, v in cats.items():
subdf = df[df['Agency{agent}_Label'] == k]
ver_li = subdf['verb'].str.split()
agen_v[v] = set(word_infinitive(li[0]) for li in ver_li if len(li) > 0)
total += len(agen_v[v])
return agen_v
def word_infinitive(word):
#infi = lemmatizer.lemmatize(word)
row = verb_form[verb_form.isin([word]).any(axis=1)]
if row.empty:
return word
infi = row[0].iloc[0]
return infi
def get_gpu_memory_map():
"""Get the current gpu usage.
Returns
-------
usage: dict
Keys are device ids as integers.
Values are memory usage as integers in MB.
"""
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.used',
'--format=csv,nounits,noheader'
], encoding='utf-8')
# Convert lines into a dictionary
gpu_memory = [int(x) for x in result.strip().split('\n')]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map
def add_pad(list, tokenizer):
res = [__sen_pad(sen, tokenizer) for sen in list]
return res
def __sen_pad(sen, tokenizer):
# add padding for each sentence
if len(sen) < max_sen_len:
pad = [tokenizer.pad_token_id for i in range(max_sen_len - len(sen))]
sen.extend(pad)
return sen
elif len(sen) > max_sen_len:
orilen = len(sen)
for i in range(orilen - max_sen_len):
sen.pop(len(sen) - 2)
return sen
max_sen_len = 64
#lemmatizer = WordNetLemmatizer()
verb_form = pd.read_csv('verb.txt', usecols=[_ for _ in range(24)], header=None)
ps = [0.4, 0.6]
num_epoch = 10
agen_v = agen_verbs()
ROC_TRAIN = './data/roc/train.csv'
ROC_TEST = './data/roc/test.csv'
ROC_DEV = './data/roc/dev.csv'
| 29.25 | 81 | 0.600326 | import pandas as pd
from nltk.stem import WordNetLemmatizer
from transformers import *
def repeatN(list, n):
ori = list
for _ in range(n):
list = list.append(ori, ignore_index=True)
return list
def agen_verbs():
df = pd.read_csv('~/resources/lexica/CONNOTATION/agency_verb.csv')
agen_v = {}
total = 0
cats = {'+': 'pos', '-':'neg', '=':'equal'}
for k, v in cats.items():
subdf = df[df['Agency{agent}_Label'] == k]
ver_li = subdf['verb'].str.split()
agen_v[v] = set(word_infinitive(li[0]) for li in ver_li if len(li) > 0)
total += len(agen_v[v])
return agen_v
def word_infinitive(word):
row = verb_form[verb_form.isin([word]).any(axis=1)]
if row.empty:
return word
infi = row[0].iloc[0]
return infi
def get_gpu_memory_map():
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.used',
'--format=csv,nounits,noheader'
], encoding='utf-8')
gpu_memory = [int(x) for x in result.strip().split('\n')]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map
def add_pad(list, tokenizer):
res = [__sen_pad(sen, tokenizer) for sen in list]
return res
def __sen_pad(sen, tokenizer):
if len(sen) < max_sen_len:
pad = [tokenizer.pad_token_id for i in range(max_sen_len - len(sen))]
sen.extend(pad)
return sen
elif len(sen) > max_sen_len:
orilen = len(sen)
for i in range(orilen - max_sen_len):
sen.pop(len(sen) - 2)
return sen
max_sen_len = 64
verb_form = pd.read_csv('verb.txt', usecols=[_ for _ in range(24)], header=None)
ps = [0.4, 0.6]
num_epoch = 10
agen_v = agen_verbs()
ROC_TRAIN = './data/roc/train.csv'
ROC_TEST = './data/roc/test.csv'
ROC_DEV = './data/roc/dev.csv'
| true | true |
f7f7778c7ed11670ebf154abc1c2b4ab8604eef1 | 140 | py | Python | tests/test_curlies_in_attrs_2.py | gvanrossum/pyxl3 | e6588c12caee49c43faf6aa260f04d7e971f6aa8 | [
"Apache-2.0"
] | 150 | 2016-01-26T13:25:58.000Z | 2022-03-11T14:31:45.000Z | tests/test_curlies_in_attrs_2.py | gvanrossum/pyxl3 | e6588c12caee49c43faf6aa260f04d7e971f6aa8 | [
"Apache-2.0"
] | 7 | 2016-02-07T20:08:55.000Z | 2019-07-09T03:35:49.000Z | tests/test_curlies_in_attrs_2.py | gvanrossum/pyxl3 | e6588c12caee49c43faf6aa260f04d7e971f6aa8 | [
"Apache-2.0"
] | 19 | 2016-01-27T15:48:48.000Z | 2020-11-06T07:31:12.000Z | # coding: pyxl
from pyxl import html
def test():
assert str(<frag><img src="barbaz{'foo'}" /></frag>) == """<img src="barbazfoo" />"""
| 23.333333 | 89 | 0.585714 |
from pyxl import html
def test():
assert str(<frag><img src="barbaz{'foo'}" /></frag>) == """<img src="barbazfoo" />"""
| false | true |
f7f777cd6a9aa7ea00c135dad164c778f1d722f3 | 5,695 | py | Python | debian/elevator/usr/lib/python2.7/dist-packages/elevator/utils/daemon.py | oleiade/Elevator | 85fd72d8eb8b524140b8aefef216e00972dcecaa | [
"MIT"
] | 10 | 2015-04-08T09:46:24.000Z | 2021-08-01T08:42:39.000Z | debian/elevator/usr/lib/python2.7/dist-packages/elevator/utils/daemon.py | oleiade/Elevator | 85fd72d8eb8b524140b8aefef216e00972dcecaa | [
"MIT"
] | null | null | null | debian/elevator/usr/lib/python2.7/dist-packages/elevator/utils/daemon.py | oleiade/Elevator | 85fd72d8eb8b524140b8aefef216e00972dcecaa | [
"MIT"
] | 4 | 2015-04-29T07:22:31.000Z | 2018-04-19T02:29:10.000Z | """
***
Modified generic daemon class
***
Author: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
www.boxedice.com
License: http://creativecommons.org/licenses/by-sa/3.0/
Changes: 23rd Jan 2009 (David Mytton <david@boxedice.com>)
- Replaced hard coded '/dev/null in __init__ with os.devnull
- Added OS check to conditionally remove code that doesn't work on OS X
- Added output to console on completion
- Tidied up formatting
11th Mar 2009 (David Mytton <david@boxedice.com>)
- Fixed problem with daemon exiting on Python 2.4 (before SystemExit was part of the Exception base)
13th Aug 2010 (David Mytton <david@boxedice.com>
- Fixed unhandled exception if PID file is empty
"""
# Core modules
import atexit
import os
import sys
import time
import signal
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin=os.devnull, stdout=os.devnull, stderr=os.devnull, home_dir='.', umask=022, verbose=1):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
self.home_dir = home_dir
self.verbose = verbose
self.umask = umask
self.daemon_alive = True
def daemonize(self):
"""
Do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# Exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment
os.chdir(self.home_dir)
os.setsid()
os.umask(self.umask)
# Do second fork
try:
pid = os.fork()
if pid > 0:
# Exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
if sys.platform != 'darwin': # This block breaks on OS X
# Redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
if self.stderr:
se = file(self.stderr, 'a+', 0)
os.dup2(se.fileno(), sys.stderr.fileno())
else:
se = sys.stderr
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
def sigtermhandler(signum, frame):
self.daemon_alive = False
signal.signal(signal.SIGTERM, sigtermhandler)
signal.signal(signal.SIGINT, sigtermhandler)
if self.verbose >= 1:
print "Started"
# Write pidfile
atexit.register(self.delpid) # Make sure pid file is removed if we quit
pid = str(os.getpid())
file(self.pidfile, 'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self, *args, **kwargs):
"""
Start the daemon
"""
if self.verbose >= 1:
print "Starting..."
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
except SystemExit:
pid = None
if pid:
message = "pidfile %s already exists. Is it already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run(*args, **kwargs)
def stop(self):
"""
Stop the daemon
"""
if self.verbose >= 1:
print "Stopping..."
# Get the pid from the pidfile
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
except ValueError:
pid = None
if not pid:
message = "pidfile %s does not exist. Not running?\n"
sys.stderr.write(message % self.pidfile)
# Just to be sure. A ValueError might occur if the PID file is empty but does actually exist
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
return # Not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
if self.verbose >= 1:
print "Stopped"
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
| 29.973684 | 124 | 0.53029 | """
***
Modified generic daemon class
***
Author: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
www.boxedice.com
License: http://creativecommons.org/licenses/by-sa/3.0/
Changes: 23rd Jan 2009 (David Mytton <david@boxedice.com>)
- Replaced hard coded '/dev/null in __init__ with os.devnull
- Added OS check to conditionally remove code that doesn't work on OS X
- Added output to console on completion
- Tidied up formatting
11th Mar 2009 (David Mytton <david@boxedice.com>)
- Fixed problem with daemon exiting on Python 2.4 (before SystemExit was part of the Exception base)
13th Aug 2010 (David Mytton <david@boxedice.com>
- Fixed unhandled exception if PID file is empty
"""
import atexit
import os
import sys
import time
import signal
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin=os.devnull, stdout=os.devnull, stderr=os.devnull, home_dir='.', umask=022, verbose=1):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
self.home_dir = home_dir
self.verbose = verbose
self.umask = umask
self.daemon_alive = True
def daemonize(self):
"""
Do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# Exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment
os.chdir(self.home_dir)
os.setsid()
os.umask(self.umask)
# Do second fork
try:
pid = os.fork()
if pid > 0:
# Exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
if sys.platform != 'darwin': # This block breaks on OS X
# Redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
if self.stderr:
se = file(self.stderr, 'a+', 0)
os.dup2(se.fileno(), sys.stderr.fileno())
else:
se = sys.stderr
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
def sigtermhandler(signum, frame):
self.daemon_alive = False
signal.signal(signal.SIGTERM, sigtermhandler)
signal.signal(signal.SIGINT, sigtermhandler)
if self.verbose >= 1:
print "Started"
# Write pidfile
atexit.register(self.delpid) # Make sure pid file is removed if we quit
pid = str(os.getpid())
file(self.pidfile, 'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self, *args, **kwargs):
"""
Start the daemon
"""
if self.verbose >= 1:
print "Starting..."
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
except SystemExit:
pid = None
if pid:
message = "pidfile %s already exists. Is it already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run(*args, **kwargs)
def stop(self):
"""
Stop the daemon
"""
if self.verbose >= 1:
print "Stopping..."
# Get the pid from the pidfile
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
except ValueError:
pid = None
if not pid:
message = "pidfile %s does not exist. Not running?\n"
sys.stderr.write(message % self.pidfile)
# Just to be sure. A ValueError might occur if the PID file is empty but does actually exist
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
return # Not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
if self.verbose >= 1:
print "Stopped"
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
| false | true |
f7f778a98d1c15226615010916f46f9bd5a23a69 | 378 | py | Python | data/migrations/0002_auto_20190819_0731.py | bernardobgam/edtech_experiment | 88a64b925b6692261649418260a0bdf7b4a5a9d1 | [
"MIT"
] | null | null | null | data/migrations/0002_auto_20190819_0731.py | bernardobgam/edtech_experiment | 88a64b925b6692261649418260a0bdf7b4a5a9d1 | [
"MIT"
] | 8 | 2020-06-05T23:56:56.000Z | 2022-03-12T00:02:52.000Z | data/migrations/0002_auto_20190819_0731.py | bernardobgam/edtech_experiment | 88a64b925b6692261649418260a0bdf7b4a5a9d1 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.1 on 2019-08-18 21:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='quizdata',
name='seconds_taken',
field=models.FloatField(null=True),
),
]
| 19.894737 | 47 | 0.589947 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='quizdata',
name='seconds_taken',
field=models.FloatField(null=True),
),
]
| true | true |
f7f778c5f1399d369a962531326c4d2119ff39a2 | 900 | py | Python | corehq/apps/accounting/migrations/0025_auto_20180508_1952.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/apps/accounting/migrations/0025_auto_20180508_1952.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/apps/accounting/migrations/0025_auto_20180508_1952.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | # Generated by Django 1.11.13 on 2018-05-08 19:52
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounting', '0024_unique__transaction_id'),
]
operations = [
migrations.AddField(
model_name='billingrecord',
name='emailed_to_list',
field=django.contrib.postgres.fields.ArrayField(base_field=models.EmailField(max_length=254),
default=list, size=None),
),
migrations.AddField(
model_name='wirebillingrecord',
name='emailed_to_list',
field=django.contrib.postgres.fields.ArrayField(base_field=models.EmailField(max_length=254),
default=list, size=None),
),
]
| 33.333333 | 105 | 0.576667 |
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounting', '0024_unique__transaction_id'),
]
operations = [
migrations.AddField(
model_name='billingrecord',
name='emailed_to_list',
field=django.contrib.postgres.fields.ArrayField(base_field=models.EmailField(max_length=254),
default=list, size=None),
),
migrations.AddField(
model_name='wirebillingrecord',
name='emailed_to_list',
field=django.contrib.postgres.fields.ArrayField(base_field=models.EmailField(max_length=254),
default=list, size=None),
),
]
| true | true |
f7f778e2dc5aa3f8e7c5c488f4ecf23263dc9652 | 13,625 | py | Python | run_2.py | gormlabenz/People-Counting-in-Real-Time | d9bdfc1d428e249aa1d86b355a2aa67b5199ae2a | [
"MIT"
] | null | null | null | run_2.py | gormlabenz/People-Counting-in-Real-Time | d9bdfc1d428e249aa1d86b355a2aa67b5199ae2a | [
"MIT"
] | null | null | null | run_2.py | gormlabenz/People-Counting-in-Real-Time | d9bdfc1d428e249aa1d86b355a2aa67b5199ae2a | [
"MIT"
] | null | null | null | import argparse
import csv
import datetime
import logging
import time
from itertools import zip_longest
import cv2
import dlib
import imutils
import numpy as np
import schedule
from imutils.video import FPS, VideoStream
from PIL import Image
from mylib import config, epd4in2, thread
from mylib.centroidtracker import CentroidTracker
from mylib.mailer import Mailer
from mylib.trackableobject import TrackableObject
logging.basicConfig(level=logging.DEBUG)
t0 = time.time()
COLOR_ID = (0, 0, 255)
COLOR_DOT = (0, 0, 255)
COLOR_LINE = (0, 0, 255)
def run():
# init paper
logging.info("epd4in2 Demo")
epd = epd4in2.EPD()
logging.info("init and Clear")
epd.init()
epd.Clear()
time.sleep(1)
# Drawing on the image
logging.info("Drawing")
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=False,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-i", "--input", type=str,
help="path to optional input video file")
ap.add_argument("-o", "--output", type=str,
help="path to optional output video file")
# confidence default 0.4
ap.add_argument("-c", "--confidence", type=float, default=0.4,
help="minimum probability to filter weak detections")
ap.add_argument("-s", "--skip-frames", type=int, default=30,
help="# of skip frames between detections")
args = vars(ap.parse_args())
# initialize the list of class labels MobileNet SSD was trained to
# detect
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
# load our serialized model from disk
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# if a video path was not supplied, grab a reference to the ip camera
if not args.get("input", False):
print("[INFO] Starting the live stream..")
vs = VideoStream(config.url).start()
time.sleep(2.0)
# otherwise, grab a reference to the video file
else:
print("[INFO] Starting the video..")
vs = cv2.VideoCapture(args["input"])
# initialize the video writer (we'll instantiate later if need be)
writer = None
# initialize the frame dimensions (we'll set them as soon as we read
# the first frame from the video)
W = None
H = None
# instantiate our centroid tracker, then initialize a list to store
# each of our dlib correlation trackers, followed by a dictionary to
# map each unique object ID to a TrackableObject
ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
trackers = []
trackableObjects = {}
# initialize the total number of frames processed thus far, along
# with the total number of objects that have moved either up or down
totalFrames = 0
totalDown = 0
totalUp = 0
x = []
empty = []
empty1 = []
# start the frames per second throughput estimator
fps = FPS().start()
# loop over frames from the video stream
while True:
# grab the next frame and handle if we are reading from either
# VideoCapture or VideoStream
frame = vs.read()
frame = frame[1] if args.get("input", False) else frame
# if we are viewing a video and we did not grab a frame then we
# have reached the end of the video
if args["input"] is not None and frame is None:
break
# resize the frame to have a maximum width of 500 pixels (the
# less data we have, the faster we can process it), then convert
# the frame from BGR to RGB for dlib
frame = imutils.resize(frame, width=epd.width, height=epd.height)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img_grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_thres = cv2.adaptiveThreshold(
img_grey, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
frame_out = cv2.cvtColor(frame_thres, cv2.COLOR_GRAY2BGR)
# if the frame dimensions are empty, set them
if W is None or H is None:
(H, W) = frame.shape[:2]
# if we are supposed to be writing a video to disk, initialize
# the writer
if args["output"] is not None and writer is None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 30,
(W, H), True)
# initialize the current status along with our list of bounding
# box rectangles returned by either (1) our object detector or
# (2) the correlation trackers
status = "Waiting"
rects = []
# check to see if we should run a more computationally expensive
# object detection method to aid our tracker
if totalFrames % args["skip_frames"] == 0:
# set the status and initialize our new set of object trackers
status = "Detecting"
trackers = []
# convert the frame to a blob and pass the blob through the
# network and obtain the detections
blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated
# with the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by requiring a minimum
# confidence
if confidence > args["confidence"]:
# extract the index of the class label from the
# detections list
idx = int(detections[0, 0, i, 1])
# if the class label is not a person, ignore it
if CLASSES[idx] != "person":
continue
# compute the (x, y)-coordinates of the bounding box
# for the object
box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = box.astype("int")
# construct a dlib rectangle object from the bounding
# box coordinates and then start the dlib correlation
# tracker
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(startX, startY, endX, endY)
tracker.start_track(rgb, rect)
# add the tracker to our list of trackers so we can
# utilize it during skip frames
trackers.append(tracker)
# otherwise, we should utilize our object *trackers* rather than
# object *detectors* to obtain a higher frame processing throughput
else:
# loop over the trackers
for tracker in trackers:
# set the status of our system to be 'tracking' rather
# than 'waiting' or 'detecting'
status = "Tracking"
# update the tracker and grab the updated position
tracker.update(rgb)
pos = tracker.get_position()
# unpack the position object
startX = int(pos.left())
startY = int(pos.top())
endX = int(pos.right())
endY = int(pos.bottom())
# add the bounding box coordinates to the rectangles list
rects.append((startX, startY, endX, endY))
# draw a horizontal line in the center of the frame -- once an
# object crosses this line we will determine whether they were
# moving 'up' or 'down'
#cv2.line(frame_out, (0, H // 2), (W, H // 2), COLOR_LINE, 2)
""" cv2.putText(frame_out, "-Prediction border - Entrance-", (10, H - ((i * 20) + 200)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1) """
# use the centroid tracker to associate the (1) old object
# centroids with (2) the newly computed object centroids
objects = ct.update(rects)
# loop over the tracked objects
for (objectID, centroid) in objects.items():
# check to see if a trackable object exists for the current
# object ID
to = trackableObjects.get(objectID, None)
# if there is no existing trackable object, create one
if to is None:
to = TrackableObject(objectID, centroid)
# otherwise, there is a trackable object so we can utilize it
# to determine direction
else:
# the difference between the y-coordinate of the *current*
# centroid and the mean of *previous* centroids will tell
# us in which direction the object is moving (negative for
# 'up' and positive for 'down')
y = [c[1] for c in to.centroids]
direction = centroid[1] - np.mean(y)
to.centroids.append(centroid)
# check to see if the object has been counted or not
if not to.counted:
# if the direction is negative (indicating the object
# is moving up) AND the centroid is above the center
# line, count the object
if direction < 0 and centroid[1] < H // 2:
totalUp += 1
empty.append(totalUp)
to.counted = True
# if the direction is positive (indicating the object
# is moving down) AND the centroid is below the
# center line, count the object
elif direction > 0 and centroid[1] > H // 2:
totalDown += 1
empty1.append(totalDown)
# print(empty1[-1])
# if the people limit exceeds over threshold, send an email alert
if sum(x) >= config.Threshold:
cv2.putText(frame_out, "-ALERT: People limit exceeded-", (10, frame.shape[0] - 80),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 2)
if config.ALERT:
print("[INFO] Sending email alert..")
Mailer().send(config.MAIL)
print("[INFO] Alert sent")
to.counted = True
x = []
# compute the sum of total people inside
x.append(len(empty1) - len(empty))
print('x = ', x)
for k, v in info:
print(k, v)
#print("Total people inside:", x)
# draw both the ID of the object and the centroid of the
# object on the output frame
text = "ID {}".format(objectID)
print(text)
frame_out_pil = Image.fromarray(frame_out)
cv2.putText(frame_out, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLOR_ID, 2)
cv2.circle(
frame_out, (centroid[0], centroid[1]), 4, COLOR_DOT, -1)
epd.display(epd.getbuffer(frame_out_pil))
# store the trackable object in our dictionary
trackableObjects[objectID] = to
# construct a tuple of information we will be displaying on the
info = [
("Exit", totalUp),
("Enter", totalDown),
("Status", status),
]
info2 = [
("Total people inside", x),
]
# Display the output
# for (i, (k, v)) in enumerate(info):
# text = "{}: {}".format(k, v)
# cv2.putText(frame_out, text, (10, H - ((i * 20) + 20)),
# cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 2)
#
# for (i, (k, v)) in enumerate(info2):
# text = "{}: {}".format(k, v)
# cv2.putText(frame_out, text, (265, H - ((i * 20) + 60)),
# cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
# show the output frame
# cv2.imshow("Real-Time Monitoring/Analysis Window", frame_out)
key = cv2.waitKey(1) & 0xFF
# increment the total number of frames processed thus far and
# then update the FPS counter
totalFrames += 1
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# # if we are not using a video file, stop the camera video stream
# if not args.get("input", False):
# vs.stop()
#
# # otherwise, release the video file pointer
# else:
# vs.release()
# issue 15
if config.Thread:
vs.release()
if __name__ == '__main__':
run()
| 39.152299 | 111 | 0.551633 | import argparse
import csv
import datetime
import logging
import time
from itertools import zip_longest
import cv2
import dlib
import imutils
import numpy as np
import schedule
from imutils.video import FPS, VideoStream
from PIL import Image
from mylib import config, epd4in2, thread
from mylib.centroidtracker import CentroidTracker
from mylib.mailer import Mailer
from mylib.trackableobject import TrackableObject
logging.basicConfig(level=logging.DEBUG)
t0 = time.time()
COLOR_ID = (0, 0, 255)
COLOR_DOT = (0, 0, 255)
COLOR_LINE = (0, 0, 255)
def run():
logging.info("epd4in2 Demo")
epd = epd4in2.EPD()
logging.info("init and Clear")
epd.init()
epd.Clear()
time.sleep(1)
logging.info("Drawing")
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=False,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-i", "--input", type=str,
help="path to optional input video file")
ap.add_argument("-o", "--output", type=str,
help="path to optional output video file")
ap.add_argument("-c", "--confidence", type=float, default=0.4,
help="minimum probability to filter weak detections")
ap.add_argument("-s", "--skip-frames", type=int, default=30,
help="# of skip frames between detections")
args = vars(ap.parse_args())
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
if not args.get("input", False):
print("[INFO] Starting the live stream..")
vs = VideoStream(config.url).start()
time.sleep(2.0)
else:
print("[INFO] Starting the video..")
vs = cv2.VideoCapture(args["input"])
writer = None
# initialize the frame dimensions (we'll set them as soon as we read
W = None
H = None
ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
trackers = []
trackableObjects = {}
totalFrames = 0
totalDown = 0
totalUp = 0
x = []
empty = []
empty1 = []
fps = FPS().start()
while True:
frame = vs.read()
frame = frame[1] if args.get("input", False) else frame
if args["input"] is not None and frame is None:
break
frame = imutils.resize(frame, width=epd.width, height=epd.height)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img_grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_thres = cv2.adaptiveThreshold(
img_grey, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
frame_out = cv2.cvtColor(frame_thres, cv2.COLOR_GRAY2BGR)
if W is None or H is None:
(H, W) = frame.shape[:2]
if args["output"] is not None and writer is None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 30,
(W, H), True)
status = "Waiting"
rects = []
if totalFrames % args["skip_frames"] == 0:
status = "Detecting"
trackers = []
blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
net.setInput(blob)
detections = net.forward()
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > args["confidence"]:
idx = int(detections[0, 0, i, 1])
if CLASSES[idx] != "person":
continue
box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = box.astype("int")
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(startX, startY, endX, endY)
tracker.start_track(rgb, rect)
trackers.append(tracker)
else:
for tracker in trackers:
status = "Tracking"
tracker.update(rgb)
pos = tracker.get_position()
startX = int(pos.left())
startY = int(pos.top())
endX = int(pos.right())
endY = int(pos.bottom())
rects.append((startX, startY, endX, endY))
objects = ct.update(rects)
for (objectID, centroid) in objects.items():
to = trackableObjects.get(objectID, None)
if to is None:
to = TrackableObject(objectID, centroid)
else:
y = [c[1] for c in to.centroids]
direction = centroid[1] - np.mean(y)
to.centroids.append(centroid)
if not to.counted:
if direction < 0 and centroid[1] < H // 2:
totalUp += 1
empty.append(totalUp)
to.counted = True
elif direction > 0 and centroid[1] > H // 2:
totalDown += 1
empty1.append(totalDown)
if sum(x) >= config.Threshold:
cv2.putText(frame_out, "-ALERT: People limit exceeded-", (10, frame.shape[0] - 80),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 2)
if config.ALERT:
print("[INFO] Sending email alert..")
Mailer().send(config.MAIL)
print("[INFO] Alert sent")
to.counted = True
x = []
x.append(len(empty1) - len(empty))
print('x = ', x)
for k, v in info:
print(k, v)
text = "ID {}".format(objectID)
print(text)
frame_out_pil = Image.fromarray(frame_out)
cv2.putText(frame_out, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLOR_ID, 2)
cv2.circle(
frame_out, (centroid[0], centroid[1]), 4, COLOR_DOT, -1)
epd.display(epd.getbuffer(frame_out_pil))
trackableObjects[objectID] = to
info = [
("Exit", totalUp),
("Enter", totalDown),
("Status", status),
]
info2 = [
("Total people inside", x),
]
key = cv2.waitKey(1) & 0xFF
totalFrames += 1
fps.update()
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
:
run()
| true | true |
f7f77937db9d241920ff84ea180aef7fdbb14361 | 30,354 | py | Python | fairseq/sequence_generator.py | yingwaner/C-MNMT | 3a87f75302efc859139af72483f480a1cac86f25 | [
"MIT"
] | null | null | null | fairseq/sequence_generator.py | yingwaner/C-MNMT | 3a87f75302efc859139af72483f480a1cac86f25 | [
"MIT"
] | null | null | null | fairseq/sequence_generator.py | yingwaner/C-MNMT | 3a87f75302efc859139af72483f480a1cac86f25 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from fairseq import search, utils
from fairseq.data import data_utils
from fairseq.models import FairseqIncrementalDecoder
class SequenceGenerator(object):
def __init__(
self,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=200,
min_len=1,
normalize_scores=True,
len_penalty=1.,
unk_penalty=0.,
retain_dropout=False,
sampling=False,
sampling_topk=-1,
sampling_topp=-1.0,
temperature=1.,
diverse_beam_groups=-1,
diverse_beam_strength=0.5,
match_source_len=False,
no_repeat_ngram_size=0,
):
"""Generates translations of a given source sentence.
Args:
tgt_dict (~fairseq.data.Dictionary): target dictionary
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
retain_dropout (bool, optional): use dropout when generating
(default: False)
sampling (bool, optional): sample outputs instead of beam search
(default: False)
sampling_topk (int, optional): only sample among the top-k choices
at each step (default: -1)
sampling_topp (float, optional): only sample among the smallest set
of words whose cumulative probability mass exceeds p
at each step (default: -1.0)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
diverse_beam_groups/strength (float, optional): parameters for
Diverse Beam Search sampling
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.retain_dropout = retain_dropout
self.temperature = temperature
self.match_source_len = match_source_len
self.no_repeat_ngram_size = no_repeat_ngram_size
assert sampling_topk < 0 or sampling, '--sampling-topk requires --sampling'
assert sampling_topp < 0 or sampling, '--sampling-topp requires --sampling'
assert temperature > 0, '--temperature must be greater than 0'
if sampling:
self.search = search.Sampling(tgt_dict, sampling_topk, sampling_topp)
elif diverse_beam_groups > 0:
self.search = search.DiverseBeamSearch(tgt_dict, diverse_beam_groups, diverse_beam_strength)
elif match_source_len:
self.search = search.LengthConstrainedBeamSearch(
tgt_dict, min_len_a=1, min_len_b=0, max_len_a=1, max_len_b=0,
)
else:
self.search = search.BeamSearch(tgt_dict)
@torch.no_grad()
def generate(self, models, sample, lang_num, **kwargs):
"""Generate a batch of translations.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
model = EnsembleModel(models)
return self._generate(model, sample, lang_num, **kwargs)
@torch.no_grad()
def _generate(
self,
model,
sample,
lang_num,
prefix_tokens=None,
bos_token=None,
**kwargs
):
if not self.retain_dropout:
model.eval()
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample['net_input'].items()
if k != 'prev_output_tokens'
}
src_tokens = encoder_input['src_tokens']
src_lengths = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
input_size = src_tokens.size()
# batch dimension goes first followed by source lengths
bsz = input_size[0]
src_len = input_size[1]
beam_size = self.beam_size
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
# exclude the EOS marker
model.max_decoder_positions() - 1,
)
# compute the encoder output for each beam
encoder_outs = model.forward_encoder(lang_num, encoder_input)
sents = encoder_outs[0]['encoder_out']
sents = sents.transpose(0, 1)#T x B x C -> B x T x C
sents = sents.cpu().numpy()
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = model.reorder_encoder_out(encoder_outs, new_order)
# initialize buffers
scores = src_tokens.new(bsz * beam_size, max_len + 1).float().fill_(0)
scores_buf = scores.clone()
tokens = src_tokens.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)
tokens_buf = tokens.clone()
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn, attn_buf = None, None
# The blacklist indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then the blacklist would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
blacklist = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfin_idx):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
assert not tokens_clone.eq(self.eos).any()
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
if self.match_source_len and step > src_lengths[unfin_idx]:
score = -math.inf
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = None
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': None,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfin_idx):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
reorder_state = None
batch_idxs = None
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
model.reorder_incremental_state(reorder_state)
encoder_outs = model.reorder_encoder_out(encoder_outs, reorder_state)
lprobs, avg_attn_scores = model.forward_decoder(
tokens[:, :step + 1], encoder_outs, temperature=self.temperature,
)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle min and max length constraints
if step >= max_len:
lprobs[:, :self.eos] = -math.inf
lprobs[:, self.eos + 1:] = -math.inf
elif step < self.min_len:
lprobs[:, self.eos] = -math.inf
# handle prefix tokens (possibly with different lengths)
if prefix_tokens is not None and step < prefix_tokens.size(1):
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = -math.inf
lprobs[prefix_mask] = lprobs[prefix_mask].scatter_(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
def replicate_first_beam(tensor, mask):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
# copy tokens, scores and lprobs from the first beam to all beams
tokens = replicate_first_beam(tokens, eos_mask_batch_dim)
scores = replicate_first_beam(scores, eos_mask_batch_dim)
lprobs = replicate_first_beam(lprobs, eos_mask_batch_dim)
if self.no_repeat_ngram_size > 0:
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
gen_tokens = tokens[bbsz_idx].tolist()
for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]
# Record attention scores
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2)
attn_buf = attn.clone()
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
scores_buf = scores_buf.type_as(lprobs)
eos_bbsz_idx = buffer('eos_bbsz_idx')
eos_scores = buffer('eos_scores', type_of=scores)
self.search.set_src_lengths(src_lengths)
if self.no_repeat_ngram_size > 0:
def calculate_banned_tokens(bbsz_idx):
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
return gen_ngrams[bbsz_idx].get(ngram_index, [])
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)]
else:
banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos (except for blacklisted ones)
eos_mask = cand_indices.eq(self.eos)
eos_mask[:, :beam_size][blacklist] = 0
# only consider eos when it's among the top beam_size indices
torch.masked_select(
cand_bbsz_idx[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_bbsz_idx,
)
finalized_sents = set()
if eos_bbsz_idx.numel() > 0:
torch.masked_select(
cand_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_scores,
)
finalized_sents = finalize_hypos(step, eos_bbsz_idx, eos_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < max_len
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = batch_mask.nonzero().squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
blacklist = blacklist[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos or
# blacklisted hypos and values < cand_size indicate candidate
# active hypos. After this, the min values per row are the top
# candidate active hypos.
active_mask = buffer('active_mask')
eos_mask[:, :beam_size] |= blacklist
torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[:eos_mask.size(1)],
out=active_mask,
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, new_blacklist = buffer('active_hypos'), buffer('new_blacklist')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(new_blacklist, active_hypos)
)
# update blacklist to ignore any finalized hypos
blacklist = new_blacklist.ge(cand_size)[:, :beam_size]
assert (~blacklist).any(dim=1).all()
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx,
)
active_scores = torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
return finalized, sents
class EnsembleModel(torch.nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models = torch.nn.ModuleList(models)
self.incremental_states = None
if all(isinstance(m.decoder, FairseqIncrementalDecoder) for m in models):
self.incremental_states = {m: {} for m in models}
def has_encoder(self):
return hasattr(self.models[0], 'encoder')
def max_decoder_positions(self):
return min(m.max_decoder_positions() for m in self.models)
@torch.no_grad()
def forward_encoder(self, lang_num, encoder_input):
if not self.has_encoder():
return None
return [model.encoder(lang_num, **encoder_input) for model in self.models]
@torch.no_grad()
def forward_decoder(self, tokens, encoder_outs, temperature=1.):
if len(self.models) == 1:
return self._decode_one(
tokens,
self.models[0],
encoder_outs[0] if self.has_encoder() else None,
self.incremental_states,
log_probs=True,
temperature=temperature,
)
log_probs = []
avg_attn = None
for model, encoder_out in zip(self.models, encoder_outs):
probs, attn = self._decode_one(
tokens,
model,
encoder_out,
self.incremental_states,
log_probs=True,
temperature=temperature,
)
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(len(self.models))
if avg_attn is not None:
avg_attn.div_(len(self.models))
return avg_probs, avg_attn
def _decode_one(
self, tokens, model, encoder_out, incremental_states, log_probs,
temperature=1.,
):
if self.incremental_states is not None:
decoder_out = list(model.decoder(
tokens, encoder_out=encoder_out, incremental_state=self.incremental_states[model],
))
else:
decoder_out = list(model.decoder(tokens, encoder_out=encoder_out))
decoder_out[0] = decoder_out[0][:, -1:, :]
if temperature != 1.:
decoder_out[0].div_(temperature)
attn = decoder_out[1]
if type(attn) is dict:
attn = attn.get('attn', None)
if attn is not None:
attn = attn[:, -1, :]
probs = model.get_normalized_probs(decoder_out, log_probs=log_probs)
probs = probs[:, -1, :]
return probs, attn
def reorder_encoder_out(self, encoder_outs, new_order):
if not self.has_encoder():
return
return [
model.encoder.reorder_encoder_out(encoder_out, new_order)
for model, encoder_out in zip(self.models, encoder_outs)
]
def reorder_incremental_state(self, new_order):
if self.incremental_states is None:
return
for model in self.models:
model.decoder.reorder_incremental_state(self.incremental_states[model], new_order)
class SequenceGeneratorWithAlignment(SequenceGenerator):
def __init__(self, tgt_dict, left_pad_target=False, **kwargs):
"""Generates translations of a given source sentence.
Produces alignments following "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
left_pad_target (bool, optional): Whether or not the
hypothesis should be left padded or not when they are
teacher forced for generating alignments.
"""
super().__init__(tgt_dict, **kwargs)
self.left_pad_target = left_pad_target
@torch.no_grad()
def generate(self, models, sample, **kwargs):
model = EnsembleModelWithAlignment(models)
finalized = super()._generate(model, sample, **kwargs)
src_tokens = sample['net_input']['src_tokens']
bsz = src_tokens.shape[0]
beam_size = self.beam_size
src_tokens, src_lengths, prev_output_tokens, tgt_tokens = \
self._prepare_batch_for_alignment(sample, finalized)
if any(getattr(m, 'full_context_alignment', False) for m in model.models):
attn = model.forward_align(src_tokens, src_lengths, prev_output_tokens)
else:
attn = [
finalized[i // beam_size][i % beam_size]['attention'].transpose(1, 0)
for i in range(bsz * beam_size)
]
# Process the attn matrix to extract hard alignments.
for i in range(bsz * beam_size):
alignment = utils.extract_hard_alignment(attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos)
finalized[i // beam_size][i % beam_size]['alignment'] = alignment
return finalized
def _prepare_batch_for_alignment(self, sample, hypothesis):
src_tokens = sample['net_input']['src_tokens']
bsz = src_tokens.shape[0]
src_tokens = src_tokens[:, None, :].expand(-1, self.beam_size, -1).contiguous().view(bsz * self.beam_size, -1)
src_lengths = sample['net_input']['src_lengths']
src_lengths = src_lengths[:, None].expand(-1, self.beam_size).contiguous().view(bsz * self.beam_size)
prev_output_tokens = data_utils.collate_tokens(
[beam['tokens'] for example in hypothesis for beam in example],
self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=True,
)
tgt_tokens = data_utils.collate_tokens(
[beam['tokens'] for example in hypothesis for beam in example],
self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=False,
)
return src_tokens, src_lengths, prev_output_tokens, tgt_tokens
class EnsembleModelWithAlignment(EnsembleModel):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__(models)
def forward_align(self, src_tokens, src_lengths, prev_output_tokens):
avg_attn = None
for model in self.models:
decoder_out = model(src_tokens, src_lengths, prev_output_tokens)
attn = decoder_out[1]['attn']
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(self.models) > 1:
avg_attn.div_(len(self.models))
return avg_attn
def _decode_one(
self, tokens, model, encoder_out, incremental_states, log_probs,
temperature=1.,
):
if self.incremental_states is not None:
decoder_out = list(model.forward_decoder(
tokens,
encoder_out=encoder_out,
incremental_state=self.incremental_states[model],
))
else:
decoder_out = list(model.forward_decoder(tokens, encoder_out=encoder_out))
decoder_out[0] = decoder_out[0][:, -1:, :]
if temperature != 1.:
decoder_out[0].div_(temperature)
attn = decoder_out[1]
if type(attn) is dict:
attn = attn.get('attn', None)
if attn is not None:
attn = attn[:, -1, :]
probs = model.get_normalized_probs(decoder_out, log_probs=log_probs)
probs = probs[:, -1, :]
return probs, attn
| 42.393855 | 118 | 0.580187 |
import math
import torch
from fairseq import search, utils
from fairseq.data import data_utils
from fairseq.models import FairseqIncrementalDecoder
class SequenceGenerator(object):
def __init__(
self,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=200,
min_len=1,
normalize_scores=True,
len_penalty=1.,
unk_penalty=0.,
retain_dropout=False,
sampling=False,
sampling_topk=-1,
sampling_topp=-1.0,
temperature=1.,
diverse_beam_groups=-1,
diverse_beam_strength=0.5,
match_source_len=False,
no_repeat_ngram_size=0,
):
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.retain_dropout = retain_dropout
self.temperature = temperature
self.match_source_len = match_source_len
self.no_repeat_ngram_size = no_repeat_ngram_size
assert sampling_topk < 0 or sampling, '--sampling-topk requires --sampling'
assert sampling_topp < 0 or sampling, '--sampling-topp requires --sampling'
assert temperature > 0, '--temperature must be greater than 0'
if sampling:
self.search = search.Sampling(tgt_dict, sampling_topk, sampling_topp)
elif diverse_beam_groups > 0:
self.search = search.DiverseBeamSearch(tgt_dict, diverse_beam_groups, diverse_beam_strength)
elif match_source_len:
self.search = search.LengthConstrainedBeamSearch(
tgt_dict, min_len_a=1, min_len_b=0, max_len_a=1, max_len_b=0,
)
else:
self.search = search.BeamSearch(tgt_dict)
@torch.no_grad()
def generate(self, models, sample, lang_num, **kwargs):
model = EnsembleModel(models)
return self._generate(model, sample, lang_num, **kwargs)
@torch.no_grad()
def _generate(
self,
model,
sample,
lang_num,
prefix_tokens=None,
bos_token=None,
**kwargs
):
if not self.retain_dropout:
model.eval()
encoder_input = {
k: v for k, v in sample['net_input'].items()
if k != 'prev_output_tokens'
}
src_tokens = encoder_input['src_tokens']
src_lengths = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
input_size = src_tokens.size()
bsz = input_size[0]
src_len = input_size[1]
beam_size = self.beam_size
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
model.max_decoder_positions() - 1,
)
encoder_outs = model.forward_encoder(lang_num, encoder_input)
sents = encoder_outs[0]['encoder_out']
sents = sents.transpose(0, 1)
sents = sents.cpu().numpy()
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = model.reorder_encoder_out(encoder_outs, new_order)
scores = src_tokens.new(bsz * beam_size, max_len + 1).float().fill_(0)
scores_buf = scores.clone()
tokens = src_tokens.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)
tokens_buf = tokens.clone()
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn, attn_buf = None, None
# samples. Then the blacklist would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
blacklist = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfin_idx):
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores):
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
assert not tokens_clone.eq(self.eos).any()
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
if self.match_source_len and step > src_lengths[unfin_idx]:
score = -math.inf
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = None
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': None,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfin_idx):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
reorder_state = None
batch_idxs = None
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
model.reorder_incremental_state(reorder_state)
encoder_outs = model.reorder_encoder_out(encoder_outs, reorder_state)
lprobs, avg_attn_scores = model.forward_decoder(
tokens[:, :step + 1], encoder_outs, temperature=self.temperature,
)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle min and max length constraints
if step >= max_len:
lprobs[:, :self.eos] = -math.inf
lprobs[:, self.eos + 1:] = -math.inf
elif step < self.min_len:
lprobs[:, self.eos] = -math.inf
# handle prefix tokens (possibly with different lengths)
if prefix_tokens is not None and step < prefix_tokens.size(1):
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = -math.inf
lprobs[prefix_mask] = lprobs[prefix_mask].scatter_(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
def replicate_first_beam(tensor, mask):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
# copy tokens, scores and lprobs from the first beam to all beams
tokens = replicate_first_beam(tokens, eos_mask_batch_dim)
scores = replicate_first_beam(scores, eos_mask_batch_dim)
lprobs = replicate_first_beam(lprobs, eos_mask_batch_dim)
if self.no_repeat_ngram_size > 0:
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
gen_tokens = tokens[bbsz_idx].tolist()
for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]
# Record attention scores
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2)
attn_buf = attn.clone()
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
scores_buf = scores_buf.type_as(lprobs)
eos_bbsz_idx = buffer('eos_bbsz_idx')
eos_scores = buffer('eos_scores', type_of=scores)
self.search.set_src_lengths(src_lengths)
if self.no_repeat_ngram_size > 0:
def calculate_banned_tokens(bbsz_idx):
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
return gen_ngrams[bbsz_idx].get(ngram_index, [])
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)]
else:
banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
eos_mask = cand_indices.eq(self.eos)
eos_mask[:, :beam_size][blacklist] = 0
torch.masked_select(
cand_bbsz_idx[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_bbsz_idx,
)
finalized_sents = set()
if eos_bbsz_idx.numel() > 0:
torch.masked_select(
cand_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_scores,
)
finalized_sents = finalize_hypos(step, eos_bbsz_idx, eos_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < max_len
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = batch_mask.nonzero().squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
blacklist = blacklist[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos or
# blacklisted hypos and values < cand_size indicate candidate
# active hypos. After this, the min values per row are the top
# candidate active hypos.
active_mask = buffer('active_mask')
eos_mask[:, :beam_size] |= blacklist
torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[:eos_mask.size(1)],
out=active_mask,
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, new_blacklist = buffer('active_hypos'), buffer('new_blacklist')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(new_blacklist, active_hypos)
)
# update blacklist to ignore any finalized hypos
blacklist = new_blacklist.ge(cand_size)[:, :beam_size]
assert (~blacklist).any(dim=1).all()
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx,
)
active_scores = torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
return finalized, sents
class EnsembleModel(torch.nn.Module):
def __init__(self, models):
super().__init__()
self.models = torch.nn.ModuleList(models)
self.incremental_states = None
if all(isinstance(m.decoder, FairseqIncrementalDecoder) for m in models):
self.incremental_states = {m: {} for m in models}
def has_encoder(self):
return hasattr(self.models[0], 'encoder')
def max_decoder_positions(self):
return min(m.max_decoder_positions() for m in self.models)
@torch.no_grad()
def forward_encoder(self, lang_num, encoder_input):
if not self.has_encoder():
return None
return [model.encoder(lang_num, **encoder_input) for model in self.models]
@torch.no_grad()
def forward_decoder(self, tokens, encoder_outs, temperature=1.):
if len(self.models) == 1:
return self._decode_one(
tokens,
self.models[0],
encoder_outs[0] if self.has_encoder() else None,
self.incremental_states,
log_probs=True,
temperature=temperature,
)
log_probs = []
avg_attn = None
for model, encoder_out in zip(self.models, encoder_outs):
probs, attn = self._decode_one(
tokens,
model,
encoder_out,
self.incremental_states,
log_probs=True,
temperature=temperature,
)
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(len(self.models))
if avg_attn is not None:
avg_attn.div_(len(self.models))
return avg_probs, avg_attn
def _decode_one(
self, tokens, model, encoder_out, incremental_states, log_probs,
temperature=1.,
):
if self.incremental_states is not None:
decoder_out = list(model.decoder(
tokens, encoder_out=encoder_out, incremental_state=self.incremental_states[model],
))
else:
decoder_out = list(model.decoder(tokens, encoder_out=encoder_out))
decoder_out[0] = decoder_out[0][:, -1:, :]
if temperature != 1.:
decoder_out[0].div_(temperature)
attn = decoder_out[1]
if type(attn) is dict:
attn = attn.get('attn', None)
if attn is not None:
attn = attn[:, -1, :]
probs = model.get_normalized_probs(decoder_out, log_probs=log_probs)
probs = probs[:, -1, :]
return probs, attn
def reorder_encoder_out(self, encoder_outs, new_order):
if not self.has_encoder():
return
return [
model.encoder.reorder_encoder_out(encoder_out, new_order)
for model, encoder_out in zip(self.models, encoder_outs)
]
def reorder_incremental_state(self, new_order):
if self.incremental_states is None:
return
for model in self.models:
model.decoder.reorder_incremental_state(self.incremental_states[model], new_order)
class SequenceGeneratorWithAlignment(SequenceGenerator):
def __init__(self, tgt_dict, left_pad_target=False, **kwargs):
super().__init__(tgt_dict, **kwargs)
self.left_pad_target = left_pad_target
@torch.no_grad()
def generate(self, models, sample, **kwargs):
model = EnsembleModelWithAlignment(models)
finalized = super()._generate(model, sample, **kwargs)
src_tokens = sample['net_input']['src_tokens']
bsz = src_tokens.shape[0]
beam_size = self.beam_size
src_tokens, src_lengths, prev_output_tokens, tgt_tokens = \
self._prepare_batch_for_alignment(sample, finalized)
if any(getattr(m, 'full_context_alignment', False) for m in model.models):
attn = model.forward_align(src_tokens, src_lengths, prev_output_tokens)
else:
attn = [
finalized[i // beam_size][i % beam_size]['attention'].transpose(1, 0)
for i in range(bsz * beam_size)
]
# Process the attn matrix to extract hard alignments.
for i in range(bsz * beam_size):
alignment = utils.extract_hard_alignment(attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos)
finalized[i // beam_size][i % beam_size]['alignment'] = alignment
return finalized
def _prepare_batch_for_alignment(self, sample, hypothesis):
src_tokens = sample['net_input']['src_tokens']
bsz = src_tokens.shape[0]
src_tokens = src_tokens[:, None, :].expand(-1, self.beam_size, -1).contiguous().view(bsz * self.beam_size, -1)
src_lengths = sample['net_input']['src_lengths']
src_lengths = src_lengths[:, None].expand(-1, self.beam_size).contiguous().view(bsz * self.beam_size)
prev_output_tokens = data_utils.collate_tokens(
[beam['tokens'] for example in hypothesis for beam in example],
self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=True,
)
tgt_tokens = data_utils.collate_tokens(
[beam['tokens'] for example in hypothesis for beam in example],
self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=False,
)
return src_tokens, src_lengths, prev_output_tokens, tgt_tokens
class EnsembleModelWithAlignment(EnsembleModel):
def __init__(self, models):
super().__init__(models)
def forward_align(self, src_tokens, src_lengths, prev_output_tokens):
avg_attn = None
for model in self.models:
decoder_out = model(src_tokens, src_lengths, prev_output_tokens)
attn = decoder_out[1]['attn']
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(self.models) > 1:
avg_attn.div_(len(self.models))
return avg_attn
def _decode_one(
self, tokens, model, encoder_out, incremental_states, log_probs,
temperature=1.,
):
if self.incremental_states is not None:
decoder_out = list(model.forward_decoder(
tokens,
encoder_out=encoder_out,
incremental_state=self.incremental_states[model],
))
else:
decoder_out = list(model.forward_decoder(tokens, encoder_out=encoder_out))
decoder_out[0] = decoder_out[0][:, -1:, :]
if temperature != 1.:
decoder_out[0].div_(temperature)
attn = decoder_out[1]
if type(attn) is dict:
attn = attn.get('attn', None)
if attn is not None:
attn = attn[:, -1, :]
probs = model.get_normalized_probs(decoder_out, log_probs=log_probs)
probs = probs[:, -1, :]
return probs, attn
| true | true |
f7f7794cc06448e6a39672f0c079219c7d63c3ed | 220 | py | Python | Speeking/app.py | luizpavanello/python_courses | 274daa7ba70a7e2d06b3edbeda1161ce08526665 | [
"MIT"
] | null | null | null | Speeking/app.py | luizpavanello/python_courses | 274daa7ba70a7e2d06b3edbeda1161ce08526665 | [
"MIT"
] | null | null | null | Speeking/app.py | luizpavanello/python_courses | 274daa7ba70a7e2d06b3edbeda1161ce08526665 | [
"MIT"
] | null | null | null | import gtts
from playsound import playsound
with open('frase.txt', 'r') as arquivo:
for linha in arquivo:
frase = gtts.gTTS(linha,lang='pt-br')
frase.save('frase.mp3')
playsound('frase.mp3')
| 24.444444 | 45 | 0.640909 | import gtts
from playsound import playsound
with open('frase.txt', 'r') as arquivo:
for linha in arquivo:
frase = gtts.gTTS(linha,lang='pt-br')
frase.save('frase.mp3')
playsound('frase.mp3')
| true | true |
f7f77a5c4df1ebd0c6a7032915e41ced71637ee7 | 2,410 | py | Python | HSFL_LNN/middlewares.py | SundownEffect/HSFL_LatestNewsNotification | fe15b96b0e97df3d3acc022677efda06ae7e5fee | [
"MIT"
] | 1 | 2020-07-22T11:23:23.000Z | 2020-07-22T11:23:23.000Z | HSFL_LNN/middlewares.py | OtterWhisperer/HSFL_LatestNewsNotification | fe15b96b0e97df3d3acc022677efda06ae7e5fee | [
"MIT"
] | 5 | 2020-06-18T12:10:12.000Z | 2020-06-18T15:38:17.000Z | HSFL_LNN/middlewares.py | SundownEffect/HSFL_LatestNewsNotification | fe15b96b0e97df3d3acc022677efda06ae7e5fee | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# NTLM Auth
#from scrapy.http import Response
#import requests
#from requests_ntlm import HttpNtlmAuth
class HSFL_LNN_SpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
# NTLM-Auth
#def process_request(self, request, spider):
# url = request.url
# pwd = getattr(spider, 'http_pass', '')
# usr = getattr(spider, 'http_user', '')
# s = requests.session()
# response = s.get(url,auth=HttpNtlmAuth(usr,pwd))
# return Response(url,response.status_code,{}, response.content)
def process_spider_output(response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 33.472222 | 78 | 0.648133 |
from scrapy import signals
class HSFL_LNN_SpiderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(response, spider):
return None
def process_spider_output(response, result, spider):
for i in result:
yield i
def process_spider_exception(response, exception, spider):
pass
def process_start_requests(start_requests, spider):
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| true | true |
f7f77a61dfed88f3787d0d945c6a76bd2e8fa3fe | 4,392 | py | Python | create_training_joblib.py | sct-pipeline/contrast-agnostic-softseg-spinalcord | 4d3429f311c2c4787c63c290acf3cafa0a4e15bf | [
"MIT"
] | null | null | null | create_training_joblib.py | sct-pipeline/contrast-agnostic-softseg-spinalcord | 4d3429f311c2c4787c63c290acf3cafa0a4e15bf | [
"MIT"
] | 13 | 2021-01-24T23:03:42.000Z | 2021-11-12T15:58:32.000Z | create_training_joblib.py | sct-pipeline/contrast-agnostic-softseg-spinalcord | 4d3429f311c2c4787c63c290acf3cafa0a4e15bf | [
"MIT"
] | 2 | 2021-05-21T21:04:00.000Z | 2022-03-02T18:55:56.000Z | import pandas as pd
import joblib
import numpy as np
import argparse
import os
# Inputs:
# --sct_train_file: Pickle file that was holds the a list of the dataset used for training.
# Can be downloaded at: https://github.com/sct-data/deepseg_sc_models
# train_valid_test column: 1 for training, 2 for validating, 3 for testing
# --bids_datasets_list: List of dataset folders to gather list of subjects from.
# 1 or more (e.g. sct-testing-large spine-generic-multi-subject etc.)
# --ofolder: Folder to save the output .joblib file
# Example usage:
# python3 create_training_joblib --sct_train_file ~/dataset.pkl --bids_datasets_list ~/datasets/testing-large
# --ofolder ~/train_new_model
#
# Konstantinos Nasiotis 2021
def create_new_joblib(dataset_sct_file, input_bids_folders, outputFolder):
## Load the merged participants.tsv
#merged_folder = '/home/nas/Consulting/ivado-project/Datasets/merged_SCTLARGE_MULTISUBJECT/'
#df_merged = bids.BIDS(merged_folder).participants.content
# Merge multiple .tsv files into the same dataframe
df_merged = pd.read_table(os.path.join(input_bids_folders[0], 'participants.tsv'), encoding="ISO-8859-1")
# Convert to string to get rid of potential TypeError during merging within the same column
df_merged = df_merged.astype(str)
# Add the Bids_path to the dataframe
df_merged['bids_path'] = [input_bids_folders[0]] * len(df_merged)
for iFolder in range(1, len(input_bids_folders)):
df_next = pd.read_table(os.path.join(input_bids_folders[iFolder], 'participants.tsv'), encoding="ISO-8859-1")
df_next = df_next.astype(str)
df_next['bids_path'] = [input_bids_folders[iFolder]] * len(df_next)
# Merge the .tsv files (This keeps also non-overlapping fields)
df_merged = pd.merge(left=df_merged, right=df_next, how='outer')
dataUsedOnSct = pd.read_pickle(dataset_sct_file)
# Force the subjects that were used for testing for SCT models to be used for testing in the new .joblib
subjectsUsedForTesting = dataUsedOnSct[dataUsedOnSct['train_valid_test'] == 3]['subject'].to_list()
# Use 60% for training/validation and 40% for testing
percentage_train = 0.4
percentage_validation = 0.2
# Whatever was used in sct testing, will stay in the testing side of the joblib as well
test = df_merged[np.in1d(df_merged['data_id'], subjectsUsedForTesting)]
# Keep only the rest of the subjects for splitting to training/validation/testing sets
df_merged_reduced = df_merged[np.invert(np.in1d(df_merged['data_id'], subjectsUsedForTesting))]
train, validate, test2 = np.split(df_merged_reduced.sample(frac=1),
[int(percentage_train*(len(df_merged_reduced))+len(test)/2),
int((percentage_train+percentage_validation)*len(df_merged_reduced)+len(test)/2)])
# Append the testing from sct to the new testing entries
test3 = test.append(test2, ignore_index=1)
# Populate the joblib file
jobdict = {'train': train['participant_id'].to_list(),
'valid': validate['participant_id'].to_list(),
'test': test3['participant_id'].to_list()}
joblib.dump(jobdict, os.path.join(outputFolder, "new_splits.joblib"))
'''
# Debugging
newJoblib = joblib.load(os.path.join(outputFolder, "new_splits.joblib"))
print(len(newJoblib["train"]))
print(len(newJoblib["valid"]))
print(len(newJoblib["test"]))
'''
print('Success')
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--sct_train_file", required=True, nargs="*", dest="sctTrainFile",
help=".pkl file that was used while training SCT models")
parser.add_argument("--bids_datasets_list", required=True, nargs="*", dest="bidsDatasets",
help="BIDS dataset inputs")
parser.add_argument("--ofolder", required=True, nargs="*", dest="outputFolder",
help="Output folder where the new_splits.joblib file will be saved")
return parser
def main():
parser = get_parser()
args = parser.parse_args()
# Run comparison
create_new_joblib(args.sctTrainFile[0], args.bidsDatasets, args.outputFolder[0])
if __name__ == '__main__':
main()
| 43.485149 | 121 | 0.68602 | import pandas as pd
import joblib
import numpy as np
import argparse
import os
def create_new_joblib(dataset_sct_file, input_bids_folders, outputFolder):
d.read_table(os.path.join(input_bids_folders[0], 'participants.tsv'), encoding="ISO-8859-1")
df_merged = df_merged.astype(str)
df_merged['bids_path'] = [input_bids_folders[0]] * len(df_merged)
for iFolder in range(1, len(input_bids_folders)):
df_next = pd.read_table(os.path.join(input_bids_folders[iFolder], 'participants.tsv'), encoding="ISO-8859-1")
df_next = df_next.astype(str)
df_next['bids_path'] = [input_bids_folders[iFolder]] * len(df_next)
df_merged = pd.merge(left=df_merged, right=df_next, how='outer')
dataUsedOnSct = pd.read_pickle(dataset_sct_file)
subjectsUsedForTesting = dataUsedOnSct[dataUsedOnSct['train_valid_test'] == 3]['subject'].to_list()
percentage_train = 0.4
percentage_validation = 0.2
test = df_merged[np.in1d(df_merged['data_id'], subjectsUsedForTesting)]
df_merged_reduced = df_merged[np.invert(np.in1d(df_merged['data_id'], subjectsUsedForTesting))]
train, validate, test2 = np.split(df_merged_reduced.sample(frac=1),
[int(percentage_train*(len(df_merged_reduced))+len(test)/2),
int((percentage_train+percentage_validation)*len(df_merged_reduced)+len(test)/2)])
test3 = test.append(test2, ignore_index=1)
jobdict = {'train': train['participant_id'].to_list(),
'valid': validate['participant_id'].to_list(),
'test': test3['participant_id'].to_list()}
joblib.dump(jobdict, os.path.join(outputFolder, "new_splits.joblib"))
print('Success')
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--sct_train_file", required=True, nargs="*", dest="sctTrainFile",
help=".pkl file that was used while training SCT models")
parser.add_argument("--bids_datasets_list", required=True, nargs="*", dest="bidsDatasets",
help="BIDS dataset inputs")
parser.add_argument("--ofolder", required=True, nargs="*", dest="outputFolder",
help="Output folder where the new_splits.joblib file will be saved")
return parser
def main():
parser = get_parser()
args = parser.parse_args()
create_new_joblib(args.sctTrainFile[0], args.bidsDatasets, args.outputFolder[0])
if __name__ == '__main__':
main()
| true | true |
f7f77ac97cc3d5b692c304afcef650d3c48ec431 | 31,800 | py | Python | pygments/lexers/matlab.py | mariushegele/pygments | bc4168d3ce89ae56b50c53d76e7ed50793873698 | [
"BSD-2-Clause"
] | null | null | null | pygments/lexers/matlab.py | mariushegele/pygments | bc4168d3ce89ae56b50c53d76e7ed50793873698 | [
"BSD-2-Clause"
] | 1 | 2019-03-08T20:01:19.000Z | 2019-03-08T20:01:19.000Z | pygments/lexers/matlab.py | mariushegele/pygments | bc4168d3ce89ae56b50c53d76e7ed50793873698 | [
"BSD-2-Clause"
] | 1 | 2019-03-08T19:44:02.000Z | 2019-03-08T19:44:02.000Z | # -*- coding: utf-8 -*-
"""
pygments.lexers.matlab
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Matlab and related languages.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, words, do_insertions
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Whitespace
from pygments.lexers import _scilab_builtins
__all__ = ['MatlabLexer', 'MatlabSessionLexer', 'OctaveLexer', 'ScilabLexer']
class MatlabLexer(RegexLexer):
"""
For Matlab source code.
.. versionadded:: 0.10
"""
name = 'Matlab'
aliases = ['matlab']
filenames = ['*.m']
mimetypes = ['text/matlab']
#
# These lists are generated automatically.
# Run the following in bash shell:
#
# for f in elfun specfun elmat; do
# echo -n "$f = "
# matlab -nojvm -r "help $f;exit;" | perl -ne \
# 'push(@c,$1) if /^ (\w+)\s+-/; END {print q{["}.join(q{","},@c).qq{"]\n};}'
# done
#
# elfun: Elementary math functions
# specfun: Special Math functions
# elmat: Elementary matrices and matrix manipulation
#
# taken from Matlab version 9.4 (R2018a)
#
elfun = ("sin", "sind", "sinh", "asin", "asind", "asinh", "cos", "cosd", "cosh",
"acos", "acosd", "acosh", "tan", "tand", "tanh", "atan", "atand", "atan2",
"atan2d", "atanh", "sec", "secd", "sech", "asec", "asecd", "asech", "csc", "cscd",
"csch", "acsc", "acscd", "acsch", "cot", "cotd", "coth", "acot", "acotd",
"acoth", "hypot", "deg2rad", "rad2deg", "exp", "expm1", "log", "log1p", "log10", "log2", "pow2",
"realpow", "reallog", "realsqrt", "sqrt", "nthroot", "nextpow2", "abs",
"angle", "complex", "conj", "imag", "real", "unwrap", "isreal", "cplxpair",
"fix", "floor", "ceil", "round", "mod", "rem", "sign")
specfun = ("airy", "besselj", "bessely", "besselh", "besseli", "besselk", "beta",
"betainc", "betaincinv", "betaln", "ellipj", "ellipke", "erf", "erfc", "erfcx",
"erfinv", "erfcinv", "expint", "gamma", "gammainc", "gammaincinv", "gammaln", "psi", "legendre",
"cross", "dot", "factor", "isprime", "primes", "gcd", "lcm", "rat",
"rats", "perms", "nchoosek", "factorial", "cart2sph", "cart2pol",
"pol2cart", "sph2cart", "hsv2rgb", "rgb2hsv")
elmat = ("zeros", "ones", "eye", "repmat", "repelem", "linspace", "logspace",
"freqspace", "meshgrid", "accumarray", "size", "length", "ndims", "numel",
"disp", "isempty", "isequal", "isequaln", "cat", "reshape",
"diag", "blkdiag", "tril", "triu", "fliplr", "flipud", "flip", "rot90",
"find", "end", "sub2ind", "ind2sub", "bsxfun", "ndgrid", "permute",
"ipermute", "shiftdim", "circshift", "squeeze", "isscalar", "isvector",
"isrow", "iscolumn", "ismatrix", "eps", "realmax", "realmin", "intmax", "intmin", "flintmax", "pi", "i", "inf", "nan", "isnan",
"isinf", "isfinite", "j", "true", "false", "compan", "gallery", "hadamard", "hankel",
"hilb", "invhilb", "magic", "pascal", "rosser", "toeplitz", "vander",
"wilkinson")
_operators = r'-|==|~=|<=|>=|<|>|&&|&|~|\|\|?|\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\'
tokens = {
'root': [
# line starting with '!' is sent as a system command. not sure what
# label to use...
(r'^!.*', String.Other),
(r'%\{\s*\n', Comment.Multiline, 'blockcomment'),
(r'%.*$', Comment),
(r'^\s*function\b', Keyword, 'deffunc'),
# from 'iskeyword' on version 9.4 (R2018a):
# Check that there is no preceding dot, as keywords are valid field
# names.
(words(('break', 'case', 'catch', 'classdef', 'continue', 'else',
'elseif', 'end', 'for', 'function',
'global', 'if', 'otherwise', 'parfor',
'persistent', 'return', 'spmd', 'switch',
'try', 'while'),
prefix=r'(?<!\.)', suffix=r'\b'),
Keyword),
("(" + "|".join(elfun + specfun + elmat) + r')\b', Name.Builtin),
# line continuation with following comment:
(r'(\.\.\.)(.*)$', bygroups(Keyword, Comment)),
# command form:
# "How MATLAB Recognizes Command Syntax" specifies that an operator
# is recognized if it is either surrounded by spaces or by no
# spaces on both sides; only the former case matters for us. (This
# allows distinguishing `cd ./foo` from `cd ./ foo`.)
(r'(?:^|(?<=;))(\s*)(\w+)(\s+)(?!=|\(|(%s)\s+)' % _operators,
bygroups(Text, Name, Text), 'commandargs'),
# operators:
(_operators, Operator),
# numbers (must come before punctuation to handle `.5`; cannot use
# `\b` due to e.g. `5. + .5`).
(r'(?<!\w)((\d+\.\d*)|(\d*\.\d+))([eEf][+-]?\d+)?(?!\w)', Number.Float),
(r'\b\d+[eEf][+-]?[0-9]+\b', Number.Float),
(r'\b\d+\b', Number.Integer),
# punctuation:
(r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation),
(r'=|:|;', Punctuation),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w)\].])\'+', Operator),
(r'"(""|[^"])*"', String),
(r'(?<![\w)\].])\'', String, 'string'),
(r'[a-zA-Z_]\w*', Name),
(r'.', Text),
],
'blockcomment': [
(r'^\s*%\}', Comment.Multiline, '#pop'),
(r'^.*\n', Comment.Multiline),
(r'.', Comment.Multiline),
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Whitespace, Text, Whitespace, Punctuation,
Whitespace, Name.Function, Punctuation, Text,
Punctuation, Whitespace), '#pop'),
# function with no args
(r'(\s*)([a-zA-Z_]\w*)', bygroups(Text, Name.Function), '#pop'),
],
'string': [
(r"[^']*'", String, '#pop'),
],
'commandargs': [
# If an equal sign or other operator is encountered, this
# isn't a command. It might be a variable assignment or
# comparison operation with multiple spaces before the
# equal sign or operator
(r"=", Punctuation, '#pop'),
(_operators, Operator, '#pop'),
(r"[ \t]+", Text),
("'[^']*'", String),
(r"[^';\s]+", String),
(";?", Punctuation, '#pop'),
]
}
def analyse_text(text):
# function declaration.
first_non_comment = next((line for line in text.splitlines()
if not re.match(r'^\s*%', text)), '').strip()
if (first_non_comment.startswith('function')
and '{' not in first_non_comment):
return 1.
# comment
elif re.search(r'^\s*%', text, re.M):
return 0.2
# system cmd
elif re.search(r'^!\w+', text, re.M):
return 0.2
line_re = re.compile('.*?\n')
class MatlabSessionLexer(Lexer):
"""
For Matlab sessions. Modeled after PythonConsoleLexer.
Contributed by Ken Schutte <kschutte@csail.mit.edu>.
.. versionadded:: 0.10
"""
name = 'Matlab session'
aliases = ['matlabsession']
def get_tokens_unprocessed(self, text):
mlexer = MatlabLexer(**self.options)
curcode = ''
insertions = []
continuation = False
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>> '):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:3])]))
curcode += line[3:]
elif line.startswith('>>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:2])]))
curcode += line[2:]
elif line.startswith('???'):
idx = len(curcode)
# without is showing error on same line as before...?
# line = "\n" + line
token = (0, Generic.Traceback, line)
insertions.append((idx, [token]))
elif continuation:
# line_start is the length of the most recent prompt symbol
line_start = len(insertions[-1][-1][-1])
# Set leading spaces with the length of the prompt to be a generic prompt
# This keeps code aligned when prompts are removed, say with some Javascript
if line.startswith(' '*line_start):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:line_start])]))
curcode += line[line_start:]
else:
curcode += line
else:
if curcode:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
# Does not allow continuation if a comment is included after the ellipses.
# Continues any line that ends with ..., even comments (lines that start with %)
if line.strip().endswith('...'):
continuation = True
else:
continuation = False
if curcode: # or item:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
class OctaveLexer(RegexLexer):
"""
For GNU Octave source code.
.. versionadded:: 1.5
"""
name = 'Octave'
aliases = ['octave']
filenames = ['*.m']
mimetypes = ['text/octave']
# These lists are generated automatically.
# Run the following in bash shell:
#
# First dump all of the Octave manual into a plain text file:
#
# $ info octave --subnodes -o octave-manual
#
# Now grep through it:
# for i in \
# "Built-in Function" "Command" "Function File" \
# "Loadable Function" "Mapping Function";
# do
# perl -e '@name = qw('"$i"');
# print lc($name[0]),"_kw = [\n"';
#
# perl -n -e 'print "\"$1\",\n" if /-- '"$i"': .* (\w*) \(/;' \
# octave-manual | sort | uniq ;
# echo "]" ;
# echo;
# done
# taken from Octave Mercurial changeset 8cc154f45e37 (30-jan-2011)
builtin_kw = (
"addlistener", "addpath", "addproperty", "all",
"and", "any", "argnames", "argv", "assignin",
"atexit", "autoload",
"available_graphics_toolkits", "beep_on_error",
"bitand", "bitmax", "bitor", "bitshift", "bitxor",
"cat", "cell", "cellstr", "char", "class", "clc",
"columns", "command_line_path",
"completion_append_char", "completion_matches",
"complex", "confirm_recursive_rmdir", "cputime",
"crash_dumps_octave_core", "ctranspose", "cumprod",
"cumsum", "debug_on_error", "debug_on_interrupt",
"debug_on_warning", "default_save_options",
"dellistener", "diag", "diff", "disp",
"doc_cache_file", "do_string_escapes", "double",
"drawnow", "e", "echo_executing_commands", "eps",
"eq", "errno", "errno_list", "error", "eval",
"evalin", "exec", "exist", "exit", "eye", "false",
"fclear", "fclose", "fcntl", "fdisp", "feof",
"ferror", "feval", "fflush", "fgetl", "fgets",
"fieldnames", "file_in_loadpath", "file_in_path",
"filemarker", "filesep", "find_dir_in_path",
"fixed_point_format", "fnmatch", "fopen", "fork",
"formula", "fprintf", "fputs", "fread", "freport",
"frewind", "fscanf", "fseek", "fskipl", "ftell",
"functions", "fwrite", "ge", "genpath", "get",
"getegid", "getenv", "geteuid", "getgid",
"getpgrp", "getpid", "getppid", "getuid", "glob",
"gt", "gui_mode", "history_control",
"history_file", "history_size",
"history_timestamp_format_string", "home",
"horzcat", "hypot", "ifelse",
"ignore_function_time_stamp", "inferiorto",
"info_file", "info_program", "inline", "input",
"intmax", "intmin", "ipermute",
"is_absolute_filename", "isargout", "isbool",
"iscell", "iscellstr", "ischar", "iscomplex",
"isempty", "isfield", "isfloat", "isglobal",
"ishandle", "isieee", "isindex", "isinteger",
"islogical", "ismatrix", "ismethod", "isnull",
"isnumeric", "isobject", "isreal",
"is_rooted_relative_filename", "issorted",
"isstruct", "isvarname", "kbhit", "keyboard",
"kill", "lasterr", "lasterror", "lastwarn",
"ldivide", "le", "length", "link", "linspace",
"logical", "lstat", "lt", "make_absolute_filename",
"makeinfo_program", "max_recursion_depth", "merge",
"methods", "mfilename", "minus", "mislocked",
"mkdir", "mkfifo", "mkstemp", "mldivide", "mlock",
"mouse_wheel_zoom", "mpower", "mrdivide", "mtimes",
"munlock", "nargin", "nargout",
"native_float_format", "ndims", "ne", "nfields",
"nnz", "norm", "not", "numel", "nzmax",
"octave_config_info", "octave_core_file_limit",
"octave_core_file_name",
"octave_core_file_options", "ones", "or",
"output_max_field_width", "output_precision",
"page_output_immediately", "page_screen_output",
"path", "pathsep", "pause", "pclose", "permute",
"pi", "pipe", "plus", "popen", "power",
"print_empty_dimensions", "printf",
"print_struct_array_contents", "prod",
"program_invocation_name", "program_name",
"putenv", "puts", "pwd", "quit", "rats", "rdivide",
"readdir", "readlink", "read_readline_init_file",
"realmax", "realmin", "rehash", "rename",
"repelems", "re_read_readline_init_file", "reset",
"reshape", "resize", "restoredefaultpath",
"rethrow", "rmdir", "rmfield", "rmpath", "rows",
"save_header_format_string", "save_precision",
"saving_history", "scanf", "set", "setenv",
"shell_cmd", "sighup_dumps_octave_core",
"sigterm_dumps_octave_core", "silent_functions",
"single", "size", "size_equal", "sizemax",
"sizeof", "sleep", "source", "sparse_auto_mutate",
"split_long_rows", "sprintf", "squeeze", "sscanf",
"stat", "stderr", "stdin", "stdout", "strcmp",
"strcmpi", "string_fill_char", "strncmp",
"strncmpi", "struct", "struct_levels_to_print",
"strvcat", "subsasgn", "subsref", "sum", "sumsq",
"superiorto", "suppress_verbose_help_message",
"symlink", "system", "tic", "tilde_expand",
"times", "tmpfile", "tmpnam", "toc", "toupper",
"transpose", "true", "typeinfo", "umask", "uminus",
"uname", "undo_string_escapes", "unlink", "uplus",
"upper", "usage", "usleep", "vec", "vectorize",
"vertcat", "waitpid", "warning", "warranty",
"whos_line_format", "yes_or_no", "zeros",
"inf", "Inf", "nan", "NaN")
command_kw = ("close", "load", "who", "whos")
function_kw = (
"accumarray", "accumdim", "acosd", "acotd",
"acscd", "addtodate", "allchild", "ancestor",
"anova", "arch_fit", "arch_rnd", "arch_test",
"area", "arma_rnd", "arrayfun", "ascii", "asctime",
"asecd", "asind", "assert", "atand",
"autoreg_matrix", "autumn", "axes", "axis", "bar",
"barh", "bartlett", "bartlett_test", "beep",
"betacdf", "betainv", "betapdf", "betarnd",
"bicgstab", "bicubic", "binary", "binocdf",
"binoinv", "binopdf", "binornd", "bitcmp",
"bitget", "bitset", "blackman", "blanks",
"blkdiag", "bone", "box", "brighten", "calendar",
"cast", "cauchy_cdf", "cauchy_inv", "cauchy_pdf",
"cauchy_rnd", "caxis", "celldisp", "center", "cgs",
"chisquare_test_homogeneity",
"chisquare_test_independence", "circshift", "cla",
"clabel", "clf", "clock", "cloglog", "closereq",
"colon", "colorbar", "colormap", "colperm",
"comet", "common_size", "commutation_matrix",
"compan", "compare_versions", "compass",
"computer", "cond", "condest", "contour",
"contourc", "contourf", "contrast", "conv",
"convhull", "cool", "copper", "copyfile", "cor",
"corrcoef", "cor_test", "cosd", "cotd", "cov",
"cplxpair", "cross", "cscd", "cstrcat", "csvread",
"csvwrite", "ctime", "cumtrapz", "curl", "cut",
"cylinder", "date", "datenum", "datestr",
"datetick", "datevec", "dblquad", "deal",
"deblank", "deconv", "delaunay", "delaunayn",
"delete", "demo", "detrend", "diffpara", "diffuse",
"dir", "discrete_cdf", "discrete_inv",
"discrete_pdf", "discrete_rnd", "display",
"divergence", "dlmwrite", "dos", "dsearch",
"dsearchn", "duplication_matrix", "durbinlevinson",
"ellipsoid", "empirical_cdf", "empirical_inv",
"empirical_pdf", "empirical_rnd", "eomday",
"errorbar", "etime", "etreeplot", "example",
"expcdf", "expinv", "expm", "exppdf", "exprnd",
"ezcontour", "ezcontourf", "ezmesh", "ezmeshc",
"ezplot", "ezpolar", "ezsurf", "ezsurfc", "factor",
"factorial", "fail", "fcdf", "feather", "fftconv",
"fftfilt", "fftshift", "figure", "fileattrib",
"fileparts", "fill", "findall", "findobj",
"findstr", "finv", "flag", "flipdim", "fliplr",
"flipud", "fpdf", "fplot", "fractdiff", "freqz",
"freqz_plot", "frnd", "fsolve",
"f_test_regression", "ftp", "fullfile", "fzero",
"gamcdf", "gaminv", "gampdf", "gamrnd", "gca",
"gcbf", "gcbo", "gcf", "genvarname", "geocdf",
"geoinv", "geopdf", "geornd", "getfield", "ginput",
"glpk", "gls", "gplot", "gradient",
"graphics_toolkit", "gray", "grid", "griddata",
"griddatan", "gtext", "gunzip", "gzip", "hadamard",
"hamming", "hankel", "hanning", "hggroup",
"hidden", "hilb", "hist", "histc", "hold", "hot",
"hotelling_test", "housh", "hsv", "hurst",
"hygecdf", "hygeinv", "hygepdf", "hygernd",
"idivide", "ifftshift", "image", "imagesc",
"imfinfo", "imread", "imshow", "imwrite", "index",
"info", "inpolygon", "inputname", "interpft",
"interpn", "intersect", "invhilb", "iqr", "isa",
"isdefinite", "isdir", "is_duplicate_entry",
"isequal", "isequalwithequalnans", "isfigure",
"ishermitian", "ishghandle", "is_leap_year",
"isletter", "ismac", "ismember", "ispc", "isprime",
"isprop", "isscalar", "issquare", "isstrprop",
"issymmetric", "isunix", "is_valid_file_id",
"isvector", "jet", "kendall",
"kolmogorov_smirnov_cdf",
"kolmogorov_smirnov_test", "kruskal_wallis_test",
"krylov", "kurtosis", "laplace_cdf", "laplace_inv",
"laplace_pdf", "laplace_rnd", "legend", "legendre",
"license", "line", "linkprop", "list_primes",
"loadaudio", "loadobj", "logistic_cdf",
"logistic_inv", "logistic_pdf", "logistic_rnd",
"logit", "loglog", "loglogerr", "logm", "logncdf",
"logninv", "lognpdf", "lognrnd", "logspace",
"lookfor", "ls_command", "lsqnonneg", "magic",
"mahalanobis", "manova", "matlabroot",
"mcnemar_test", "mean", "meansq", "median", "menu",
"mesh", "meshc", "meshgrid", "meshz", "mexext",
"mget", "mkpp", "mode", "moment", "movefile",
"mpoles", "mput", "namelengthmax", "nargchk",
"nargoutchk", "nbincdf", "nbininv", "nbinpdf",
"nbinrnd", "nchoosek", "ndgrid", "newplot", "news",
"nonzeros", "normcdf", "normest", "norminv",
"normpdf", "normrnd", "now", "nthroot", "null",
"ocean", "ols", "onenormest", "optimget",
"optimset", "orderfields", "orient", "orth",
"pack", "pareto", "parseparams", "pascal", "patch",
"pathdef", "pcg", "pchip", "pcolor", "pcr",
"peaks", "periodogram", "perl", "perms", "pie",
"pink", "planerot", "playaudio", "plot",
"plotmatrix", "plotyy", "poisscdf", "poissinv",
"poisspdf", "poissrnd", "polar", "poly",
"polyaffine", "polyarea", "polyderiv", "polyfit",
"polygcd", "polyint", "polyout", "polyreduce",
"polyval", "polyvalm", "postpad", "powerset",
"ppder", "ppint", "ppjumps", "ppplot", "ppval",
"pqpnonneg", "prepad", "primes", "print",
"print_usage", "prism", "probit", "qp", "qqplot",
"quadcc", "quadgk", "quadl", "quadv", "quiver",
"qzhess", "rainbow", "randi", "range", "rank",
"ranks", "rat", "reallog", "realpow", "realsqrt",
"record", "rectangle_lw", "rectangle_sw",
"rectint", "refresh", "refreshdata",
"regexptranslate", "repmat", "residue", "ribbon",
"rindex", "roots", "rose", "rosser", "rotdim",
"rref", "run", "run_count", "rundemos", "run_test",
"runtests", "saveas", "saveaudio", "saveobj",
"savepath", "scatter", "secd", "semilogx",
"semilogxerr", "semilogy", "semilogyerr",
"setaudio", "setdiff", "setfield", "setxor",
"shading", "shift", "shiftdim", "sign_test",
"sinc", "sind", "sinetone", "sinewave", "skewness",
"slice", "sombrero", "sortrows", "spaugment",
"spconvert", "spdiags", "spearman", "spectral_adf",
"spectral_xdf", "specular", "speed", "spencer",
"speye", "spfun", "sphere", "spinmap", "spline",
"spones", "sprand", "sprandn", "sprandsym",
"spring", "spstats", "spy", "sqp", "stairs",
"statistics", "std", "stdnormal_cdf",
"stdnormal_inv", "stdnormal_pdf", "stdnormal_rnd",
"stem", "stft", "strcat", "strchr", "strjust",
"strmatch", "strread", "strsplit", "strtok",
"strtrim", "strtrunc", "structfun", "studentize",
"subplot", "subsindex", "subspace", "substr",
"substruct", "summer", "surf", "surface", "surfc",
"surfl", "surfnorm", "svds", "swapbytes",
"sylvester_matrix", "symvar", "synthesis", "table",
"tand", "tar", "tcdf", "tempdir", "tempname",
"test", "text", "textread", "textscan", "tinv",
"title", "toeplitz", "tpdf", "trace", "trapz",
"treelayout", "treeplot", "triangle_lw",
"triangle_sw", "tril", "trimesh", "triplequad",
"triplot", "trisurf", "triu", "trnd", "tsearchn",
"t_test", "t_test_regression", "type", "unidcdf",
"unidinv", "unidpdf", "unidrnd", "unifcdf",
"unifinv", "unifpdf", "unifrnd", "union", "unique",
"unix", "unmkpp", "unpack", "untabify", "untar",
"unwrap", "unzip", "u_test", "validatestring",
"vander", "var", "var_test", "vech", "ver",
"version", "view", "voronoi", "voronoin",
"waitforbuttonpress", "wavread", "wavwrite",
"wblcdf", "wblinv", "wblpdf", "wblrnd", "weekday",
"welch_test", "what", "white", "whitebg",
"wienrnd", "wilcoxon_test", "wilkinson", "winter",
"xlabel", "xlim", "ylabel", "yulewalker", "zip",
"zlabel", "z_test")
loadable_kw = (
"airy", "amd", "balance", "besselh", "besseli",
"besselj", "besselk", "bessely", "bitpack",
"bsxfun", "builtin", "ccolamd", "cellfun",
"cellslices", "chol", "choldelete", "cholinsert",
"cholinv", "cholshift", "cholupdate", "colamd",
"colloc", "convhulln", "convn", "csymamd",
"cummax", "cummin", "daspk", "daspk_options",
"dasrt", "dasrt_options", "dassl", "dassl_options",
"dbclear", "dbdown", "dbstack", "dbstatus",
"dbstop", "dbtype", "dbup", "dbwhere", "det",
"dlmread", "dmperm", "dot", "eig", "eigs",
"endgrent", "endpwent", "etree", "fft", "fftn",
"fftw", "filter", "find", "full", "gcd",
"getgrent", "getgrgid", "getgrnam", "getpwent",
"getpwnam", "getpwuid", "getrusage", "givens",
"gmtime", "gnuplot_binary", "hess", "ifft",
"ifftn", "inv", "isdebugmode", "issparse", "kron",
"localtime", "lookup", "lsode", "lsode_options",
"lu", "luinc", "luupdate", "matrix_type", "max",
"min", "mktime", "pinv", "qr", "qrdelete",
"qrinsert", "qrshift", "qrupdate", "quad",
"quad_options", "qz", "rand", "rande", "randg",
"randn", "randp", "randperm", "rcond", "regexp",
"regexpi", "regexprep", "schur", "setgrent",
"setpwent", "sort", "spalloc", "sparse", "spparms",
"sprank", "sqrtm", "strfind", "strftime",
"strptime", "strrep", "svd", "svd_driver", "syl",
"symamd", "symbfact", "symrcm", "time", "tsearch",
"typecast", "urlread", "urlwrite")
mapping_kw = (
"abs", "acos", "acosh", "acot", "acoth", "acsc",
"acsch", "angle", "arg", "asec", "asech", "asin",
"asinh", "atan", "atanh", "beta", "betainc",
"betaln", "bincoeff", "cbrt", "ceil", "conj", "cos",
"cosh", "cot", "coth", "csc", "csch", "erf", "erfc",
"erfcx", "erfinv", "exp", "finite", "fix", "floor",
"fmod", "gamma", "gammainc", "gammaln", "imag",
"isalnum", "isalpha", "isascii", "iscntrl",
"isdigit", "isfinite", "isgraph", "isinf",
"islower", "isna", "isnan", "isprint", "ispunct",
"isspace", "isupper", "isxdigit", "lcm", "lgamma",
"log", "lower", "mod", "real", "rem", "round",
"roundb", "sec", "sech", "sign", "sin", "sinh",
"sqrt", "tan", "tanh", "toascii", "tolower", "xor")
builtin_consts = (
"EDITOR", "EXEC_PATH", "I", "IMAGE_PATH", "NA",
"OCTAVE_HOME", "OCTAVE_VERSION", "PAGER",
"PAGER_FLAGS", "SEEK_CUR", "SEEK_END", "SEEK_SET",
"SIG", "S_ISBLK", "S_ISCHR", "S_ISDIR", "S_ISFIFO",
"S_ISLNK", "S_ISREG", "S_ISSOCK", "WCONTINUE",
"WCOREDUMP", "WEXITSTATUS", "WIFCONTINUED",
"WIFEXITED", "WIFSIGNALED", "WIFSTOPPED", "WNOHANG",
"WSTOPSIG", "WTERMSIG", "WUNTRACED")
tokens = {
'root': [
# We should look into multiline comments
(r'[%#].*$', Comment),
(r'^\s*function\b', Keyword, 'deffunc'),
# from 'iskeyword' on hg changeset 8cc154f45e37
(words((
'__FILE__', '__LINE__', 'break', 'case', 'catch', 'classdef', 'continue', 'do', 'else',
'elseif', 'end', 'end_try_catch', 'end_unwind_protect', 'endclassdef',
'endevents', 'endfor', 'endfunction', 'endif', 'endmethods', 'endproperties',
'endswitch', 'endwhile', 'events', 'for', 'function', 'get', 'global', 'if', 'methods',
'otherwise', 'persistent', 'properties', 'return', 'set', 'static', 'switch', 'try',
'until', 'unwind_protect', 'unwind_protect_cleanup', 'while'), suffix=r'\b'),
Keyword),
(words(builtin_kw + command_kw + function_kw + loadable_kw + mapping_kw,
suffix=r'\b'), Name.Builtin),
(words(builtin_consts, suffix=r'\b'), Name.Constant),
# operators in Octave but not Matlab:
(r'-=|!=|!|/=|--', Operator),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators in Octave but not Matlab requiring escape for re:
(r'\*=|\+=|\^=|\/=|\\=|\*\*|\+\+|\.\*\*', Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'[\[\](){}:@.,]', Punctuation),
(r'=|:|;', Punctuation),
(r'"[^"]*"', String),
(r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float),
(r'\d+[eEf][+-]?[0-9]+', Number.Float),
(r'\d+', Number.Integer),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w)\].])\'+', Operator),
(r'(?<![\w)\].])\'', String, 'string'),
(r'[a-zA-Z_]\w*', Name),
(r'.', Text),
],
'string': [
(r"[^']*'", String, '#pop'),
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Whitespace, Text, Whitespace, Punctuation,
Whitespace, Name.Function, Punctuation, Text,
Punctuation, Whitespace), '#pop'),
# function with no args
(r'(\s*)([a-zA-Z_]\w*)', bygroups(Text, Name.Function), '#pop'),
],
}
class ScilabLexer(RegexLexer):
"""
For Scilab source code.
.. versionadded:: 1.5
"""
name = 'Scilab'
aliases = ['scilab']
filenames = ['*.sci', '*.sce', '*.tst']
mimetypes = ['text/scilab']
tokens = {
'root': [
(r'//.*?$', Comment.Single),
(r'^\s*function\b', Keyword, 'deffunc'),
(words((
'__FILE__', '__LINE__', 'break', 'case', 'catch', 'classdef', 'continue', 'do', 'else',
'elseif', 'end', 'end_try_catch', 'end_unwind_protect', 'endclassdef',
'endevents', 'endfor', 'endfunction', 'endif', 'endmethods', 'endproperties',
'endswitch', 'endwhile', 'events', 'for', 'function', 'get', 'global', 'if', 'methods',
'otherwise', 'persistent', 'properties', 'return', 'set', 'static', 'switch', 'try',
'until', 'unwind_protect', 'unwind_protect_cleanup', 'while'), suffix=r'\b'),
Keyword),
(words(_scilab_builtins.functions_kw +
_scilab_builtins.commands_kw +
_scilab_builtins.macros_kw, suffix=r'\b'), Name.Builtin),
(words(_scilab_builtins.variables_kw, suffix=r'\b'), Name.Constant),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'[\[\](){}@.,=:;]', Punctuation),
(r'"[^"]*"', String),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w)\].])\'+', Operator),
(r'(?<![\w)\].])\'', String, 'string'),
(r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float),
(r'\d+[eEf][+-]?[0-9]+', Number.Float),
(r'\d+', Number.Integer),
(r'[a-zA-Z_]\w*', Name),
(r'.', Text),
],
'string': [
(r"[^']*'", String, '#pop'),
(r'.', String, '#pop'),
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Whitespace, Text, Whitespace, Punctuation,
Whitespace, Name.Function, Punctuation, Text,
Punctuation, Whitespace), '#pop'),
# function with no args
(r'(\s*)([a-zA-Z_]\w*)', bygroups(Text, Name.Function), '#pop'),
],
}
| 44.351464 | 140 | 0.508994 |
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, words, do_insertions
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Whitespace
from pygments.lexers import _scilab_builtins
__all__ = ['MatlabLexer', 'MatlabSessionLexer', 'OctaveLexer', 'ScilabLexer']
class MatlabLexer(RegexLexer):
name = 'Matlab'
aliases = ['matlab']
filenames = ['*.m']
mimetypes = ['text/matlab']
elfun = ("sin", "sind", "sinh", "asin", "asind", "asinh", "cos", "cosd", "cosh",
"acos", "acosd", "acosh", "tan", "tand", "tanh", "atan", "atand", "atan2",
"atan2d", "atanh", "sec", "secd", "sech", "asec", "asecd", "asech", "csc", "cscd",
"csch", "acsc", "acscd", "acsch", "cot", "cotd", "coth", "acot", "acotd",
"acoth", "hypot", "deg2rad", "rad2deg", "exp", "expm1", "log", "log1p", "log10", "log2", "pow2",
"realpow", "reallog", "realsqrt", "sqrt", "nthroot", "nextpow2", "abs",
"angle", "complex", "conj", "imag", "real", "unwrap", "isreal", "cplxpair",
"fix", "floor", "ceil", "round", "mod", "rem", "sign")
specfun = ("airy", "besselj", "bessely", "besselh", "besseli", "besselk", "beta",
"betainc", "betaincinv", "betaln", "ellipj", "ellipke", "erf", "erfc", "erfcx",
"erfinv", "erfcinv", "expint", "gamma", "gammainc", "gammaincinv", "gammaln", "psi", "legendre",
"cross", "dot", "factor", "isprime", "primes", "gcd", "lcm", "rat",
"rats", "perms", "nchoosek", "factorial", "cart2sph", "cart2pol",
"pol2cart", "sph2cart", "hsv2rgb", "rgb2hsv")
elmat = ("zeros", "ones", "eye", "repmat", "repelem", "linspace", "logspace",
"freqspace", "meshgrid", "accumarray", "size", "length", "ndims", "numel",
"disp", "isempty", "isequal", "isequaln", "cat", "reshape",
"diag", "blkdiag", "tril", "triu", "fliplr", "flipud", "flip", "rot90",
"find", "end", "sub2ind", "ind2sub", "bsxfun", "ndgrid", "permute",
"ipermute", "shiftdim", "circshift", "squeeze", "isscalar", "isvector",
"isrow", "iscolumn", "ismatrix", "eps", "realmax", "realmin", "intmax", "intmin", "flintmax", "pi", "i", "inf", "nan", "isnan",
"isinf", "isfinite", "j", "true", "false", "compan", "gallery", "hadamard", "hankel",
"hilb", "invhilb", "magic", "pascal", "rosser", "toeplitz", "vander",
"wilkinson")
_operators = r'-|==|~=|<=|>=|<|>|&&|&|~|\|\|?|\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\'
tokens = {
'root': [
(r'^!.*', String.Other),
(r'%\{\s*\n', Comment.Multiline, 'blockcomment'),
(r'%.*$', Comment),
(r'^\s*function\b', Keyword, 'deffunc'),
(words(('break', 'case', 'catch', 'classdef', 'continue', 'else',
'elseif', 'end', 'for', 'function',
'global', 'if', 'otherwise', 'parfor',
'persistent', 'return', 'spmd', 'switch',
'try', 'while'),
prefix=r'(?<!\.)', suffix=r'\b'),
Keyword),
("(" + "|".join(elfun + specfun + elmat) + r')\b', Name.Builtin),
(r'(\.\.\.)(.*)$', bygroups(Keyword, Comment)),
(r'(?:^|(?<=;))(\s*)(\w+)(\s+)(?!=|\(|(%s)\s+)' % _operators,
bygroups(Text, Name, Text), 'commandargs'),
(_operators, Operator),
(r'(?<!\w)((\d+\.\d*)|(\d*\.\d+))([eEf][+-]?\d+)?(?!\w)', Number.Float),
(r'\b\d+[eEf][+-]?[0-9]+\b', Number.Float),
(r'\b\d+\b', Number.Integer),
(r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation),
(r'=|:|;', Punctuation),
(r'(?<=[\w)\].])\'+', Operator),
(r'"(""|[^"])*"', String),
(r'(?<![\w)\].])\'', String, 'string'),
(r'[a-zA-Z_]\w*', Name),
(r'.', Text),
],
'blockcomment': [
(r'^\s*%\}', Comment.Multiline, '#pop'),
(r'^.*\n', Comment.Multiline),
(r'.', Comment.Multiline),
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Whitespace, Text, Whitespace, Punctuation,
Whitespace, Name.Function, Punctuation, Text,
Punctuation, Whitespace), '#pop'),
# function with no args
(r'(\s*)([a-zA-Z_]\w*)', bygroups(Text, Name.Function), '#pop'),
],
'string': [
(r"[^']*'", String, '#pop'),
],
'commandargs': [
# If an equal sign or other operator is encountered, this
# isn't a command. It might be a variable assignment or
# comparison operation with multiple spaces before the
# equal sign or operator
(r"=", Punctuation, '#pop'),
(_operators, Operator, '#pop'),
(r"[ \t]+", Text),
("'[^']*'", String),
(r"[^';\s]+", String),
(";?", Punctuation, '#pop'),
]
}
def analyse_text(text):
# function declaration.
first_non_comment = next((line for line in text.splitlines()
if not re.match(r'^\s*%', text)), '').strip()
if (first_non_comment.startswith('function')
and '{' not in first_non_comment):
return 1.
# comment
elif re.search(r'^\s*%', text, re.M):
return 0.2
# system cmd
elif re.search(r'^!\w+', text, re.M):
return 0.2
line_re = re.compile('.*?\n')
class MatlabSessionLexer(Lexer):
name = 'Matlab session'
aliases = ['matlabsession']
def get_tokens_unprocessed(self, text):
mlexer = MatlabLexer(**self.options)
curcode = ''
insertions = []
continuation = False
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>> '):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:3])]))
curcode += line[3:]
elif line.startswith('>>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:2])]))
curcode += line[2:]
elif line.startswith('???'):
idx = len(curcode)
# without is showing error on same line as before...?
# line = "\n" + line
token = (0, Generic.Traceback, line)
insertions.append((idx, [token]))
elif continuation:
# line_start is the length of the most recent prompt symbol
line_start = len(insertions[-1][-1][-1])
# Set leading spaces with the length of the prompt to be a generic prompt
# This keeps code aligned when prompts are removed, say with some Javascript
if line.startswith(' '*line_start):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:line_start])]))
curcode += line[line_start:]
else:
curcode += line
else:
if curcode:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
# Does not allow continuation if a comment is included after the ellipses.
# Continues any line that ends with ..., even comments (lines that start with %)
if line.strip().endswith('...'):
continuation = True
else:
continuation = False
if curcode: # or item:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
class OctaveLexer(RegexLexer):
name = 'Octave'
aliases = ['octave']
filenames = ['*.m']
mimetypes = ['text/octave']
# These lists are generated automatically.
# Run the following in bash shell:
#
# First dump all of the Octave manual into a plain text file:
#
# $ info octave --subnodes -o octave-manual
#
# Now grep through it:
# for i in \
# "Built-in Function" "Command" "Function File" \
# "Loadable Function" "Mapping Function";
# do
# perl -e '@name = qw('"$i"');
# print lc($name[0]),"_kw = [\n"';
#
# perl -n -e 'print "\"$1\",\n" if /-- '"$i"': .* (\w*) \(/;' \
# octave-manual | sort | uniq ;
# echo "]" ;
# echo;
# done
# taken from Octave Mercurial changeset 8cc154f45e37 (30-jan-2011)
builtin_kw = (
"addlistener", "addpath", "addproperty", "all",
"and", "any", "argnames", "argv", "assignin",
"atexit", "autoload",
"available_graphics_toolkits", "beep_on_error",
"bitand", "bitmax", "bitor", "bitshift", "bitxor",
"cat", "cell", "cellstr", "char", "class", "clc",
"columns", "command_line_path",
"completion_append_char", "completion_matches",
"complex", "confirm_recursive_rmdir", "cputime",
"crash_dumps_octave_core", "ctranspose", "cumprod",
"cumsum", "debug_on_error", "debug_on_interrupt",
"debug_on_warning", "default_save_options",
"dellistener", "diag", "diff", "disp",
"doc_cache_file", "do_string_escapes", "double",
"drawnow", "e", "echo_executing_commands", "eps",
"eq", "errno", "errno_list", "error", "eval",
"evalin", "exec", "exist", "exit", "eye", "false",
"fclear", "fclose", "fcntl", "fdisp", "feof",
"ferror", "feval", "fflush", "fgetl", "fgets",
"fieldnames", "file_in_loadpath", "file_in_path",
"filemarker", "filesep", "find_dir_in_path",
"fixed_point_format", "fnmatch", "fopen", "fork",
"formula", "fprintf", "fputs", "fread", "freport",
"frewind", "fscanf", "fseek", "fskipl", "ftell",
"functions", "fwrite", "ge", "genpath", "get",
"getegid", "getenv", "geteuid", "getgid",
"getpgrp", "getpid", "getppid", "getuid", "glob",
"gt", "gui_mode", "history_control",
"history_file", "history_size",
"history_timestamp_format_string", "home",
"horzcat", "hypot", "ifelse",
"ignore_function_time_stamp", "inferiorto",
"info_file", "info_program", "inline", "input",
"intmax", "intmin", "ipermute",
"is_absolute_filename", "isargout", "isbool",
"iscell", "iscellstr", "ischar", "iscomplex",
"isempty", "isfield", "isfloat", "isglobal",
"ishandle", "isieee", "isindex", "isinteger",
"islogical", "ismatrix", "ismethod", "isnull",
"isnumeric", "isobject", "isreal",
"is_rooted_relative_filename", "issorted",
"isstruct", "isvarname", "kbhit", "keyboard",
"kill", "lasterr", "lasterror", "lastwarn",
"ldivide", "le", "length", "link", "linspace",
"logical", "lstat", "lt", "make_absolute_filename",
"makeinfo_program", "max_recursion_depth", "merge",
"methods", "mfilename", "minus", "mislocked",
"mkdir", "mkfifo", "mkstemp", "mldivide", "mlock",
"mouse_wheel_zoom", "mpower", "mrdivide", "mtimes",
"munlock", "nargin", "nargout",
"native_float_format", "ndims", "ne", "nfields",
"nnz", "norm", "not", "numel", "nzmax",
"octave_config_info", "octave_core_file_limit",
"octave_core_file_name",
"octave_core_file_options", "ones", "or",
"output_max_field_width", "output_precision",
"page_output_immediately", "page_screen_output",
"path", "pathsep", "pause", "pclose", "permute",
"pi", "pipe", "plus", "popen", "power",
"print_empty_dimensions", "printf",
"print_struct_array_contents", "prod",
"program_invocation_name", "program_name",
"putenv", "puts", "pwd", "quit", "rats", "rdivide",
"readdir", "readlink", "read_readline_init_file",
"realmax", "realmin", "rehash", "rename",
"repelems", "re_read_readline_init_file", "reset",
"reshape", "resize", "restoredefaultpath",
"rethrow", "rmdir", "rmfield", "rmpath", "rows",
"save_header_format_string", "save_precision",
"saving_history", "scanf", "set", "setenv",
"shell_cmd", "sighup_dumps_octave_core",
"sigterm_dumps_octave_core", "silent_functions",
"single", "size", "size_equal", "sizemax",
"sizeof", "sleep", "source", "sparse_auto_mutate",
"split_long_rows", "sprintf", "squeeze", "sscanf",
"stat", "stderr", "stdin", "stdout", "strcmp",
"strcmpi", "string_fill_char", "strncmp",
"strncmpi", "struct", "struct_levels_to_print",
"strvcat", "subsasgn", "subsref", "sum", "sumsq",
"superiorto", "suppress_verbose_help_message",
"symlink", "system", "tic", "tilde_expand",
"times", "tmpfile", "tmpnam", "toc", "toupper",
"transpose", "true", "typeinfo", "umask", "uminus",
"uname", "undo_string_escapes", "unlink", "uplus",
"upper", "usage", "usleep", "vec", "vectorize",
"vertcat", "waitpid", "warning", "warranty",
"whos_line_format", "yes_or_no", "zeros",
"inf", "Inf", "nan", "NaN")
command_kw = ("close", "load", "who", "whos")
function_kw = (
"accumarray", "accumdim", "acosd", "acotd",
"acscd", "addtodate", "allchild", "ancestor",
"anova", "arch_fit", "arch_rnd", "arch_test",
"area", "arma_rnd", "arrayfun", "ascii", "asctime",
"asecd", "asind", "assert", "atand",
"autoreg_matrix", "autumn", "axes", "axis", "bar",
"barh", "bartlett", "bartlett_test", "beep",
"betacdf", "betainv", "betapdf", "betarnd",
"bicgstab", "bicubic", "binary", "binocdf",
"binoinv", "binopdf", "binornd", "bitcmp",
"bitget", "bitset", "blackman", "blanks",
"blkdiag", "bone", "box", "brighten", "calendar",
"cast", "cauchy_cdf", "cauchy_inv", "cauchy_pdf",
"cauchy_rnd", "caxis", "celldisp", "center", "cgs",
"chisquare_test_homogeneity",
"chisquare_test_independence", "circshift", "cla",
"clabel", "clf", "clock", "cloglog", "closereq",
"colon", "colorbar", "colormap", "colperm",
"comet", "common_size", "commutation_matrix",
"compan", "compare_versions", "compass",
"computer", "cond", "condest", "contour",
"contourc", "contourf", "contrast", "conv",
"convhull", "cool", "copper", "copyfile", "cor",
"corrcoef", "cor_test", "cosd", "cotd", "cov",
"cplxpair", "cross", "cscd", "cstrcat", "csvread",
"csvwrite", "ctime", "cumtrapz", "curl", "cut",
"cylinder", "date", "datenum", "datestr",
"datetick", "datevec", "dblquad", "deal",
"deblank", "deconv", "delaunay", "delaunayn",
"delete", "demo", "detrend", "diffpara", "diffuse",
"dir", "discrete_cdf", "discrete_inv",
"discrete_pdf", "discrete_rnd", "display",
"divergence", "dlmwrite", "dos", "dsearch",
"dsearchn", "duplication_matrix", "durbinlevinson",
"ellipsoid", "empirical_cdf", "empirical_inv",
"empirical_pdf", "empirical_rnd", "eomday",
"errorbar", "etime", "etreeplot", "example",
"expcdf", "expinv", "expm", "exppdf", "exprnd",
"ezcontour", "ezcontourf", "ezmesh", "ezmeshc",
"ezplot", "ezpolar", "ezsurf", "ezsurfc", "factor",
"factorial", "fail", "fcdf", "feather", "fftconv",
"fftfilt", "fftshift", "figure", "fileattrib",
"fileparts", "fill", "findall", "findobj",
"findstr", "finv", "flag", "flipdim", "fliplr",
"flipud", "fpdf", "fplot", "fractdiff", "freqz",
"freqz_plot", "frnd", "fsolve",
"f_test_regression", "ftp", "fullfile", "fzero",
"gamcdf", "gaminv", "gampdf", "gamrnd", "gca",
"gcbf", "gcbo", "gcf", "genvarname", "geocdf",
"geoinv", "geopdf", "geornd", "getfield", "ginput",
"glpk", "gls", "gplot", "gradient",
"graphics_toolkit", "gray", "grid", "griddata",
"griddatan", "gtext", "gunzip", "gzip", "hadamard",
"hamming", "hankel", "hanning", "hggroup",
"hidden", "hilb", "hist", "histc", "hold", "hot",
"hotelling_test", "housh", "hsv", "hurst",
"hygecdf", "hygeinv", "hygepdf", "hygernd",
"idivide", "ifftshift", "image", "imagesc",
"imfinfo", "imread", "imshow", "imwrite", "index",
"info", "inpolygon", "inputname", "interpft",
"interpn", "intersect", "invhilb", "iqr", "isa",
"isdefinite", "isdir", "is_duplicate_entry",
"isequal", "isequalwithequalnans", "isfigure",
"ishermitian", "ishghandle", "is_leap_year",
"isletter", "ismac", "ismember", "ispc", "isprime",
"isprop", "isscalar", "issquare", "isstrprop",
"issymmetric", "isunix", "is_valid_file_id",
"isvector", "jet", "kendall",
"kolmogorov_smirnov_cdf",
"kolmogorov_smirnov_test", "kruskal_wallis_test",
"krylov", "kurtosis", "laplace_cdf", "laplace_inv",
"laplace_pdf", "laplace_rnd", "legend", "legendre",
"license", "line", "linkprop", "list_primes",
"loadaudio", "loadobj", "logistic_cdf",
"logistic_inv", "logistic_pdf", "logistic_rnd",
"logit", "loglog", "loglogerr", "logm", "logncdf",
"logninv", "lognpdf", "lognrnd", "logspace",
"lookfor", "ls_command", "lsqnonneg", "magic",
"mahalanobis", "manova", "matlabroot",
"mcnemar_test", "mean", "meansq", "median", "menu",
"mesh", "meshc", "meshgrid", "meshz", "mexext",
"mget", "mkpp", "mode", "moment", "movefile",
"mpoles", "mput", "namelengthmax", "nargchk",
"nargoutchk", "nbincdf", "nbininv", "nbinpdf",
"nbinrnd", "nchoosek", "ndgrid", "newplot", "news",
"nonzeros", "normcdf", "normest", "norminv",
"normpdf", "normrnd", "now", "nthroot", "null",
"ocean", "ols", "onenormest", "optimget",
"optimset", "orderfields", "orient", "orth",
"pack", "pareto", "parseparams", "pascal", "patch",
"pathdef", "pcg", "pchip", "pcolor", "pcr",
"peaks", "periodogram", "perl", "perms", "pie",
"pink", "planerot", "playaudio", "plot",
"plotmatrix", "plotyy", "poisscdf", "poissinv",
"poisspdf", "poissrnd", "polar", "poly",
"polyaffine", "polyarea", "polyderiv", "polyfit",
"polygcd", "polyint", "polyout", "polyreduce",
"polyval", "polyvalm", "postpad", "powerset",
"ppder", "ppint", "ppjumps", "ppplot", "ppval",
"pqpnonneg", "prepad", "primes", "print",
"print_usage", "prism", "probit", "qp", "qqplot",
"quadcc", "quadgk", "quadl", "quadv", "quiver",
"qzhess", "rainbow", "randi", "range", "rank",
"ranks", "rat", "reallog", "realpow", "realsqrt",
"record", "rectangle_lw", "rectangle_sw",
"rectint", "refresh", "refreshdata",
"regexptranslate", "repmat", "residue", "ribbon",
"rindex", "roots", "rose", "rosser", "rotdim",
"rref", "run", "run_count", "rundemos", "run_test",
"runtests", "saveas", "saveaudio", "saveobj",
"savepath", "scatter", "secd", "semilogx",
"semilogxerr", "semilogy", "semilogyerr",
"setaudio", "setdiff", "setfield", "setxor",
"shading", "shift", "shiftdim", "sign_test",
"sinc", "sind", "sinetone", "sinewave", "skewness",
"slice", "sombrero", "sortrows", "spaugment",
"spconvert", "spdiags", "spearman", "spectral_adf",
"spectral_xdf", "specular", "speed", "spencer",
"speye", "spfun", "sphere", "spinmap", "spline",
"spones", "sprand", "sprandn", "sprandsym",
"spring", "spstats", "spy", "sqp", "stairs",
"statistics", "std", "stdnormal_cdf",
"stdnormal_inv", "stdnormal_pdf", "stdnormal_rnd",
"stem", "stft", "strcat", "strchr", "strjust",
"strmatch", "strread", "strsplit", "strtok",
"strtrim", "strtrunc", "structfun", "studentize",
"subplot", "subsindex", "subspace", "substr",
"substruct", "summer", "surf", "surface", "surfc",
"surfl", "surfnorm", "svds", "swapbytes",
"sylvester_matrix", "symvar", "synthesis", "table",
"tand", "tar", "tcdf", "tempdir", "tempname",
"test", "text", "textread", "textscan", "tinv",
"title", "toeplitz", "tpdf", "trace", "trapz",
"treelayout", "treeplot", "triangle_lw",
"triangle_sw", "tril", "trimesh", "triplequad",
"triplot", "trisurf", "triu", "trnd", "tsearchn",
"t_test", "t_test_regression", "type", "unidcdf",
"unidinv", "unidpdf", "unidrnd", "unifcdf",
"unifinv", "unifpdf", "unifrnd", "union", "unique",
"unix", "unmkpp", "unpack", "untabify", "untar",
"unwrap", "unzip", "u_test", "validatestring",
"vander", "var", "var_test", "vech", "ver",
"version", "view", "voronoi", "voronoin",
"waitforbuttonpress", "wavread", "wavwrite",
"wblcdf", "wblinv", "wblpdf", "wblrnd", "weekday",
"welch_test", "what", "white", "whitebg",
"wienrnd", "wilcoxon_test", "wilkinson", "winter",
"xlabel", "xlim", "ylabel", "yulewalker", "zip",
"zlabel", "z_test")
loadable_kw = (
"airy", "amd", "balance", "besselh", "besseli",
"besselj", "besselk", "bessely", "bitpack",
"bsxfun", "builtin", "ccolamd", "cellfun",
"cellslices", "chol", "choldelete", "cholinsert",
"cholinv", "cholshift", "cholupdate", "colamd",
"colloc", "convhulln", "convn", "csymamd",
"cummax", "cummin", "daspk", "daspk_options",
"dasrt", "dasrt_options", "dassl", "dassl_options",
"dbclear", "dbdown", "dbstack", "dbstatus",
"dbstop", "dbtype", "dbup", "dbwhere", "det",
"dlmread", "dmperm", "dot", "eig", "eigs",
"endgrent", "endpwent", "etree", "fft", "fftn",
"fftw", "filter", "find", "full", "gcd",
"getgrent", "getgrgid", "getgrnam", "getpwent",
"getpwnam", "getpwuid", "getrusage", "givens",
"gmtime", "gnuplot_binary", "hess", "ifft",
"ifftn", "inv", "isdebugmode", "issparse", "kron",
"localtime", "lookup", "lsode", "lsode_options",
"lu", "luinc", "luupdate", "matrix_type", "max",
"min", "mktime", "pinv", "qr", "qrdelete",
"qrinsert", "qrshift", "qrupdate", "quad",
"quad_options", "qz", "rand", "rande", "randg",
"randn", "randp", "randperm", "rcond", "regexp",
"regexpi", "regexprep", "schur", "setgrent",
"setpwent", "sort", "spalloc", "sparse", "spparms",
"sprank", "sqrtm", "strfind", "strftime",
"strptime", "strrep", "svd", "svd_driver", "syl",
"symamd", "symbfact", "symrcm", "time", "tsearch",
"typecast", "urlread", "urlwrite")
mapping_kw = (
"abs", "acos", "acosh", "acot", "acoth", "acsc",
"acsch", "angle", "arg", "asec", "asech", "asin",
"asinh", "atan", "atanh", "beta", "betainc",
"betaln", "bincoeff", "cbrt", "ceil", "conj", "cos",
"cosh", "cot", "coth", "csc", "csch", "erf", "erfc",
"erfcx", "erfinv", "exp", "finite", "fix", "floor",
"fmod", "gamma", "gammainc", "gammaln", "imag",
"isalnum", "isalpha", "isascii", "iscntrl",
"isdigit", "isfinite", "isgraph", "isinf",
"islower", "isna", "isnan", "isprint", "ispunct",
"isspace", "isupper", "isxdigit", "lcm", "lgamma",
"log", "lower", "mod", "real", "rem", "round",
"roundb", "sec", "sech", "sign", "sin", "sinh",
"sqrt", "tan", "tanh", "toascii", "tolower", "xor")
builtin_consts = (
"EDITOR", "EXEC_PATH", "I", "IMAGE_PATH", "NA",
"OCTAVE_HOME", "OCTAVE_VERSION", "PAGER",
"PAGER_FLAGS", "SEEK_CUR", "SEEK_END", "SEEK_SET",
"SIG", "S_ISBLK", "S_ISCHR", "S_ISDIR", "S_ISFIFO",
"S_ISLNK", "S_ISREG", "S_ISSOCK", "WCONTINUE",
"WCOREDUMP", "WEXITSTATUS", "WIFCONTINUED",
"WIFEXITED", "WIFSIGNALED", "WIFSTOPPED", "WNOHANG",
"WSTOPSIG", "WTERMSIG", "WUNTRACED")
tokens = {
'root': [
# We should look into multiline comments
(r'[%#].*$', Comment),
(r'^\s*function\b', Keyword, 'deffunc'),
# from 'iskeyword' on hg changeset 8cc154f45e37
(words((
'__FILE__', '__LINE__', 'break', 'case', 'catch', 'classdef', 'continue', 'do', 'else',
'elseif', 'end', 'end_try_catch', 'end_unwind_protect', 'endclassdef',
'endevents', 'endfor', 'endfunction', 'endif', 'endmethods', 'endproperties',
'endswitch', 'endwhile', 'events', 'for', 'function', 'get', 'global', 'if', 'methods',
'otherwise', 'persistent', 'properties', 'return', 'set', 'static', 'switch', 'try',
'until', 'unwind_protect', 'unwind_protect_cleanup', 'while'), suffix=r'\b'),
Keyword),
(words(builtin_kw + command_kw + function_kw + loadable_kw + mapping_kw,
suffix=r'\b'), Name.Builtin),
(words(builtin_consts, suffix=r'\b'), Name.Constant),
# operators in Octave but not Matlab:
(r'-=|!=|!|/=|--', Operator),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators in Octave but not Matlab requiring escape for re:
(r'\*=|\+=|\^=|\/=|\\=|\*\*|\+\+|\.\*\*', Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'[\[\](){}:@.,]', Punctuation),
(r'=|:|;', Punctuation),
(r'"[^"]*"', String),
(r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float),
(r'\d+[eEf][+-]?[0-9]+', Number.Float),
(r'\d+', Number.Integer),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w)\].])\'+', Operator),
(r'(?<![\w)\].])\'', String, 'string'),
(r'[a-zA-Z_]\w*', Name),
(r'.', Text),
],
'string': [
(r"[^']*'", String, '
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Whitespace, Text, Whitespace, Punctuation,
Whitespace, Name.Function, Punctuation, Text,
Punctuation, Whitespace), '
# function with no args
(r'(\s*)([a-zA-Z_]\w*)', bygroups(Text, Name.Function), '
],
}
class ScilabLexer(RegexLexer):
name = 'Scilab'
aliases = ['scilab']
filenames = ['*.sci', '*.sce', '*.tst']
mimetypes = ['text/scilab']
tokens = {
'root': [
(r'//.*?$', Comment.Single),
(r'^\s*function\b', Keyword, 'deffunc'),
(words((
'__FILE__', '__LINE__', 'break', 'case', 'catch', 'classdef', 'continue', 'do', 'else',
'elseif', 'end', 'end_try_catch', 'end_unwind_protect', 'endclassdef',
'endevents', 'endfor', 'endfunction', 'endif', 'endmethods', 'endproperties',
'endswitch', 'endwhile', 'events', 'for', 'function', 'get', 'global', 'if', 'methods',
'otherwise', 'persistent', 'properties', 'return', 'set', 'static', 'switch', 'try',
'until', 'unwind_protect', 'unwind_protect_cleanup', 'while'), suffix=r'\b'),
Keyword),
(words(_scilab_builtins.functions_kw +
_scilab_builtins.commands_kw +
_scilab_builtins.macros_kw, suffix=r'\b'), Name.Builtin),
(words(_scilab_builtins.variables_kw, suffix=r'\b'), Name.Constant),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'[\[\](){}@.,=:;]', Punctuation),
(r'"[^"]*"', String),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w)\].])\'+', Operator),
(r'(?<![\w)\].])\'', String, 'string'),
(r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float),
(r'\d+[eEf][+-]?[0-9]+', Number.Float),
(r'\d+', Number.Integer),
(r'[a-zA-Z_]\w*', Name),
(r'.', Text),
],
'string': [
(r"[^']*'", String, '#pop'),
(r'.', String, '#pop'),
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Whitespace, Text, Whitespace, Punctuation,
Whitespace, Name.Function, Punctuation, Text,
Punctuation, Whitespace), '#pop'),
# function with no args
(r'(\s*)([a-zA-Z_]\w*)', bygroups(Text, Name.Function), '#pop'),
],
}
| true | true |
f7f77ae55ab8be22237200accfcee46e0087aff2 | 1,041 | py | Python | inbm/integration-reloaded/scripts/create_signature.py | ahameedx/intel-inb-manageability | aca445fa4cef0b608e6e88e74476547e10c06073 | [
"Apache-2.0"
] | 5 | 2021-12-13T21:19:31.000Z | 2022-01-18T18:29:43.000Z | inbm/integration-reloaded/scripts/create_signature.py | ahameedx/intel-inb-manageability | aca445fa4cef0b608e6e88e74476547e10c06073 | [
"Apache-2.0"
] | 45 | 2021-12-30T17:21:09.000Z | 2022-03-29T22:47:32.000Z | inbm/integration-reloaded/scripts/create_signature.py | ahameedx/intel-inb-manageability | aca445fa4cef0b608e6e88e74476547e10c06073 | [
"Apache-2.0"
] | 4 | 2022-01-26T17:42:54.000Z | 2022-03-30T04:48:04.000Z | import hashlib
import sys
from binascii import hexlify
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.serialization import load_pem_private_key
file_name = package_name = password = None
num_params = len(sys.argv)
if num_params < 3:
print('Invalid number of params')
exit(1)
else:
file_name = sys.argv[1]
package_name = sys.argv[2]
if num_params == 4:
password = sys.argv[3].encode('utf-8')
with open(package_name) as package:
checksum = hashlib.sha384(package.read().encode('utf-8')).hexdigest()
with open(file_name) as f:
priv_key = load_pem_private_key(f.read().encode('utf-8'), password=password, backend=default_backend())
signature = priv_key.sign(checksum.encode('utf-8'), padding.PSS(mgf=padding.MGF1(hashes.SHA384()), salt_length=padding.PSS.MAX_LENGTH), hashes.SHA384())
print((hexlify(signature)).decode('utf-8', errors='strict')) | 35.896552 | 152 | 0.756004 | import hashlib
import sys
from binascii import hexlify
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.serialization import load_pem_private_key
file_name = package_name = password = None
num_params = len(sys.argv)
if num_params < 3:
print('Invalid number of params')
exit(1)
else:
file_name = sys.argv[1]
package_name = sys.argv[2]
if num_params == 4:
password = sys.argv[3].encode('utf-8')
with open(package_name) as package:
checksum = hashlib.sha384(package.read().encode('utf-8')).hexdigest()
with open(file_name) as f:
priv_key = load_pem_private_key(f.read().encode('utf-8'), password=password, backend=default_backend())
signature = priv_key.sign(checksum.encode('utf-8'), padding.PSS(mgf=padding.MGF1(hashes.SHA384()), salt_length=padding.PSS.MAX_LENGTH), hashes.SHA384())
print((hexlify(signature)).decode('utf-8', errors='strict')) | true | true |
f7f77b997c2074d396407ce6a7f24cdb9a3fd2d3 | 5,070 | py | Python | torchvision/datasets/oxford_iiit_pet.py | yoshitomo-matsubara/vision | 03d11338f3faf94a0749549912593ddb8b70be17 | [
"BSD-3-Clause"
] | null | null | null | torchvision/datasets/oxford_iiit_pet.py | yoshitomo-matsubara/vision | 03d11338f3faf94a0749549912593ddb8b70be17 | [
"BSD-3-Clause"
] | null | null | null | torchvision/datasets/oxford_iiit_pet.py | yoshitomo-matsubara/vision | 03d11338f3faf94a0749549912593ddb8b70be17 | [
"BSD-3-Clause"
] | null | null | null | import os
import os.path
import pathlib
from typing import Any, Callable, Optional, Union, Tuple
from typing import Sequence
from PIL import Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class OxfordIIITPet(VisionDataset):
"""`Oxford-IIIT Pet Dataset <https://www.robots.ox.ac.uk/~vgg/data/pets/>`_.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"trainval"`` (default) or ``"test"``.
target_types (string, sequence of strings, optional): Types of target to use. Can be ``category`` (default) or
``segmentation``. Can also be a list to output a tuple with all specified target types. The types represent:
- ``category`` (int): Label for one of the 37 pet categories.
- ``segmentation`` (PIL image): Segmentation trimap of the image.
If empty, ``None`` will be returned as target.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and puts it into
``root/oxford-iiit-pet``. If dataset is already downloaded, it is not downloaded again.
"""
_RESOURCES = (
("https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz", "5c4f3ee8e5d25df40f4fd59a7f44e54c"),
("https://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz", "95a8c909bbe2e81eed6a22bccdf3f68f"),
)
_VALID_TARGET_TYPES = ("category", "segmentation")
def __init__(
self,
root: str,
split: str = "trainval",
target_types: Union[Sequence[str], str] = "category",
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
):
self._split = verify_str_arg(split, "split", ("trainval", "test"))
if isinstance(target_types, str):
target_types = [target_types]
self._target_types = [
verify_str_arg(target_type, "target_types", self._VALID_TARGET_TYPES) for target_type in target_types
]
super().__init__(root, transforms=transforms, transform=transform, target_transform=target_transform)
self._base_folder = pathlib.Path(self.root) / "oxford-iiit-pet"
self._images_folder = self._base_folder / "images"
self._anns_folder = self._base_folder / "annotations"
self._segs_folder = self._anns_folder / "trimaps"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
image_ids = []
self._labels = []
with open(self._anns_folder / f"{self._split}.txt") as file:
for line in file:
image_id, label, *_ = line.strip().split()
image_ids.append(image_id)
self._labels.append(int(label) - 1)
self.classes = [
" ".join(part.title() for part in raw_cls.split("_"))
for raw_cls, _ in sorted(
{(image_id.rsplit("_", 1)[0], label) for image_id, label in zip(image_ids, self._labels)},
key=lambda image_id_and_label: image_id_and_label[1],
)
]
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._images = [self._images_folder / f"{image_id}.jpg" for image_id in image_ids]
self._segs = [self._segs_folder / f"{image_id}.png" for image_id in image_ids]
def __len__(self) -> int:
return len(self._images)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image = Image.open(self._images[idx]).convert("RGB")
target: Any = []
for target_type in self._target_types:
if target_type == "category":
target.append(self._labels[idx])
else: # target_type == "segmentation"
target.append(Image.open(self._segs[idx]))
if not target:
target = None
elif len(target) == 1:
target = target[0]
else:
target = tuple(target)
if self.transforms:
image, target = self.transforms(image, target)
return image, target
def _check_exists(self) -> bool:
for folder in (self._images_folder, self._anns_folder):
if not (os.path.exists(folder) and os.path.isdir(folder)):
return False
else:
return True
def _download(self) -> None:
if self._check_exists():
return
for url, md5 in self._RESOURCES:
download_and_extract_archive(url, download_root=str(self._base_folder), md5=md5)
| 39.92126 | 120 | 0.623866 | import os
import os.path
import pathlib
from typing import Any, Callable, Optional, Union, Tuple
from typing import Sequence
from PIL import Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class OxfordIIITPet(VisionDataset):
_RESOURCES = (
("https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz", "5c4f3ee8e5d25df40f4fd59a7f44e54c"),
("https://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz", "95a8c909bbe2e81eed6a22bccdf3f68f"),
)
_VALID_TARGET_TYPES = ("category", "segmentation")
def __init__(
self,
root: str,
split: str = "trainval",
target_types: Union[Sequence[str], str] = "category",
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
):
self._split = verify_str_arg(split, "split", ("trainval", "test"))
if isinstance(target_types, str):
target_types = [target_types]
self._target_types = [
verify_str_arg(target_type, "target_types", self._VALID_TARGET_TYPES) for target_type in target_types
]
super().__init__(root, transforms=transforms, transform=transform, target_transform=target_transform)
self._base_folder = pathlib.Path(self.root) / "oxford-iiit-pet"
self._images_folder = self._base_folder / "images"
self._anns_folder = self._base_folder / "annotations"
self._segs_folder = self._anns_folder / "trimaps"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
image_ids = []
self._labels = []
with open(self._anns_folder / f"{self._split}.txt") as file:
for line in file:
image_id, label, *_ = line.strip().split()
image_ids.append(image_id)
self._labels.append(int(label) - 1)
self.classes = [
" ".join(part.title() for part in raw_cls.split("_"))
for raw_cls, _ in sorted(
{(image_id.rsplit("_", 1)[0], label) for image_id, label in zip(image_ids, self._labels)},
key=lambda image_id_and_label: image_id_and_label[1],
)
]
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._images = [self._images_folder / f"{image_id}.jpg" for image_id in image_ids]
self._segs = [self._segs_folder / f"{image_id}.png" for image_id in image_ids]
def __len__(self) -> int:
return len(self._images)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image = Image.open(self._images[idx]).convert("RGB")
target: Any = []
for target_type in self._target_types:
if target_type == "category":
target.append(self._labels[idx])
else:
target.append(Image.open(self._segs[idx]))
if not target:
target = None
elif len(target) == 1:
target = target[0]
else:
target = tuple(target)
if self.transforms:
image, target = self.transforms(image, target)
return image, target
def _check_exists(self) -> bool:
for folder in (self._images_folder, self._anns_folder):
if not (os.path.exists(folder) and os.path.isdir(folder)):
return False
else:
return True
def _download(self) -> None:
if self._check_exists():
return
for url, md5 in self._RESOURCES:
download_and_extract_archive(url, download_root=str(self._base_folder), md5=md5)
| true | true |
f7f77baecb04936cf10d98ef3ad04894d39dc1e4 | 559 | py | Python | nz_django/day8/djang_drf_demo/front/migrations/0002_game.py | gaohj/nzflask_bbs | 36a94c380b78241ed5d1e07edab9618c3e8d477b | [
"Apache-2.0"
] | null | null | null | nz_django/day8/djang_drf_demo/front/migrations/0002_game.py | gaohj/nzflask_bbs | 36a94c380b78241ed5d1e07edab9618c3e8d477b | [
"Apache-2.0"
] | 27 | 2020-02-12T07:55:58.000Z | 2022-03-12T00:19:09.000Z | nz_django/day8/djang_drf_demo/front/migrations/0002_game.py | gaohj/nzflask_bbs | 36a94c380b78241ed5d1e07edab9618c3e8d477b | [
"Apache-2.0"
] | 2 | 2020-02-18T01:54:55.000Z | 2020-02-21T11:36:28.000Z | # Generated by Django 2.0 on 2020-02-28 07:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('front', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('g_name', models.CharField(max_length=30)),
('g_price', models.FloatField(default=1.0)),
],
),
]
| 25.409091 | 114 | 0.563506 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('front', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('g_name', models.CharField(max_length=30)),
('g_price', models.FloatField(default=1.0)),
],
),
]
| true | true |
f7f77c392c9ec80b57c867128fb0176dd812752e | 21,690 | py | Python | setup.py | JanSchulz/pandas | 6e8ce685eb5a4bd0b39665a3a0d7ddd627ea8ed0 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | setup.py | JanSchulz/pandas | 6e8ce685eb5a4bd0b39665a3a0d7ddd627ea8ed0 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | setup.py | JanSchulz/pandas | 6e8ce685eb5a4bd0b39665a3a0d7ddd627ea8ed0 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
Parts of this file were taken from the pyzmq project
(https://github.com/zeromq/pyzmq) which have been permitted for use under the
BSD license. Parts are from lxml (https://github.com/lxml/lxml)
"""
import os
import sys
import shutil
import warnings
import re
# may need to work around setuptools bug by providing a fake Pyrex
try:
import Cython
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "fake_pyrex"))
except ImportError:
pass
# try bootstrapping setuptools if it doesn't exist
try:
import pkg_resources
try:
pkg_resources.require("setuptools>=0.6c5")
except pkg_resources.VersionConflict:
from ez_setup import use_setuptools
use_setuptools(version="0.6c5")
from setuptools import setup, Command
_have_setuptools = True
except ImportError:
# no setuptools installed
from distutils.core import setup, Command
_have_setuptools = False
setuptools_kwargs = {}
min_numpy_ver = '1.7.0'
if sys.version_info[0] >= 3:
setuptools_kwargs = {
'zip_safe': False,
'install_requires': ['python-dateutil >= 2',
'pytz >= 2011k',
'numpy >= %s' % min_numpy_ver],
'setup_requires': ['numpy >= %s' % min_numpy_ver],
}
if not _have_setuptools:
sys.exit("need setuptools/distribute for Py3k"
"\n$ pip install distribute")
else:
setuptools_kwargs = {
'install_requires': ['python-dateutil',
'pytz >= 2011k',
'numpy >= %s' % min_numpy_ver],
'setup_requires': ['numpy >= %s' % min_numpy_ver],
'zip_safe': False,
}
if not _have_setuptools:
try:
import numpy
import dateutil
setuptools_kwargs = {}
except ImportError:
sys.exit("install requires: 'python-dateutil < 2','numpy'."
" use pip or easy_install."
"\n $ pip install 'python-dateutil < 2' 'numpy'")
from distutils.extension import Extension
from distutils.command.build import build
from distutils.command.sdist import sdist
from distutils.command.build_ext import build_ext as _build_ext
try:
from Cython.Distutils import build_ext as _build_ext
# from Cython.Distutils import Extension # to get pyrex debugging symbols
cython = True
except ImportError:
cython = False
from os.path import join as pjoin
class build_ext(_build_ext):
def build_extensions(self):
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
for ext in self.extensions:
if hasattr(ext, 'include_dirs') and not numpy_incl in ext.include_dirs:
ext.include_dirs.append(numpy_incl)
_build_ext.build_extensions(self)
DESCRIPTION = ("Powerful data structures for data analysis, time series,"
"and statistics")
LONG_DESCRIPTION = """
**pandas** is a Python package providing fast, flexible, and expressive data
structures designed to make working with structured (tabular, multidimensional,
potentially heterogeneous) and time series data both easy and intuitive. It
aims to be the fundamental high-level building block for doing practical,
**real world** data analysis in Python. Additionally, it has the broader goal
of becoming **the most powerful and flexible open source data analysis /
manipulation tool available in any language**. It is already well on its way
toward this goal.
pandas is well suited for many different kinds of data:
- Tabular data with heterogeneously-typed columns, as in an SQL table or
Excel spreadsheet
- Ordered and unordered (not necessarily fixed-frequency) time series data.
- Arbitrary matrix data (homogeneously typed or heterogeneous) with row and
column labels
- Any other form of observational / statistical data sets. The data actually
need not be labeled at all to be placed into a pandas data structure
The two primary data structures of pandas, Series (1-dimensional) and DataFrame
(2-dimensional), handle the vast majority of typical use cases in finance,
statistics, social science, and many areas of engineering. For R users,
DataFrame provides everything that R's ``data.frame`` provides and much
more. pandas is built on top of `NumPy <http://www.numpy.org>`__ and is
intended to integrate well within a scientific computing environment with many
other 3rd party libraries.
Here are just a few of the things that pandas does well:
- Easy handling of **missing data** (represented as NaN) in floating point as
well as non-floating point data
- Size mutability: columns can be **inserted and deleted** from DataFrame and
higher dimensional objects
- Automatic and explicit **data alignment**: objects can be explicitly
aligned to a set of labels, or the user can simply ignore the labels and
let `Series`, `DataFrame`, etc. automatically align the data for you in
computations
- Powerful, flexible **group by** functionality to perform
split-apply-combine operations on data sets, for both aggregating and
transforming data
- Make it **easy to convert** ragged, differently-indexed data in other
Python and NumPy data structures into DataFrame objects
- Intelligent label-based **slicing**, **fancy indexing**, and **subsetting**
of large data sets
- Intuitive **merging** and **joining** data sets
- Flexible **reshaping** and pivoting of data sets
- **Hierarchical** labeling of axes (possible to have multiple labels per
tick)
- Robust IO tools for loading data from **flat files** (CSV and delimited),
Excel files, databases, and saving / loading data from the ultrafast **HDF5
format**
- **Time series**-specific functionality: date range generation and frequency
conversion, moving window statistics, moving window linear regressions,
date shifting and lagging, etc.
Many of these principles are here to address the shortcomings frequently
experienced using other languages / scientific research environments. For data
scientists, working with data is typically divided into multiple stages:
munging and cleaning data, analyzing / modeling it, then organizing the results
of the analysis into a form suitable for plotting or tabular display. pandas is
the ideal tool for all of these tasks.
Note
----
Windows binaries built against NumPy 1.8.1
"""
DISTNAME = 'pandas'
LICENSE = 'BSD'
AUTHOR = "The PyData Development Team"
EMAIL = "pydata@googlegroups.com"
URL = "http://pandas.pydata.org"
DOWNLOAD_URL = ''
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Cython',
'Topic :: Scientific/Engineering',
]
MAJOR = 0
MINOR = 14
MICRO = 1
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
FULLVERSION = VERSION
write_version = True
if not ISRELEASED:
import subprocess
FULLVERSION += '.dev'
pipe = None
for cmd in ['git','git.cmd']:
try:
pipe = subprocess.Popen([cmd, "describe", "--always", "--match", "v[0-9]*"],
stdout=subprocess.PIPE)
(so,serr) = pipe.communicate()
if pipe.returncode == 0:
break
except:
pass
if pipe is None or pipe.returncode != 0:
# no git, or not in git dir
if os.path.exists('pandas/version.py'):
warnings.warn("WARNING: Couldn't get git revision, using existing pandas/version.py")
write_version = False
else:
warnings.warn("WARNING: Couldn't get git revision, using generic version string")
else:
# have git, in git dir, but may have used a shallow clone (travis does this)
rev = so.strip()
# makes distutils blow up on Python 2.7
if sys.version_info[0] >= 3:
rev = rev.decode('ascii')
if not rev.startswith('v') and re.match("[a-zA-Z0-9]{7,9}",rev):
# partial clone, manually construct version string
# this is the format before we started using git-describe
# to get an ordering on dev version strings.
rev ="v%s.dev-%s" % (VERSION, rev)
# Strip leading v from tags format "vx.y.z" to get th version string
FULLVERSION = rev.lstrip('v')
else:
FULLVERSION += QUALIFIER
def write_version_py(filename=None):
cnt = """\
version = '%s'
short_version = '%s'
"""
if not filename:
filename = os.path.join(
os.path.dirname(__file__), 'pandas', 'version.py')
a = open(filename, 'w')
try:
a.write(cnt % (FULLVERSION, VERSION))
finally:
a.close()
if write_version:
write_version_py()
class CleanCommand(Command):
"""Custom distutils command to clean the .so and .pyc files."""
user_options = [("all", "a", "")]
def initialize_options(self):
self.all = True
self._clean_me = []
self._clean_trees = []
self._clean_exclude = ['np_datetime.c',
'np_datetime_strings.c',
'period.c',
'tokenizer.c',
'io.c',
'ujson.c',
'objToJSON.c',
'JSONtoObj.c',
'ultrajsonenc.c',
'ultrajsondec.c',
]
for root, dirs, files in os.walk('pandas'):
for f in files:
if f in self._clean_exclude:
continue
# XXX
if 'ujson' in f:
continue
if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o',
'.pyo',
'.pyd', '.c', '.orig'):
self._clean_me.append(pjoin(root, f))
for d in dirs:
if d == '__pycache__':
self._clean_trees.append(pjoin(root, d))
for d in ('build', 'dist'):
if os.path.exists(d):
self._clean_trees.append(d)
def finalize_options(self):
pass
def run(self):
for clean_me in self._clean_me:
try:
os.unlink(clean_me)
except Exception:
pass
for clean_tree in self._clean_trees:
try:
shutil.rmtree(clean_tree)
except Exception:
pass
class CheckSDist(sdist):
"""Custom sdist that ensures Cython has compiled all pyx files to c."""
_pyxfiles = ['pandas/lib.pyx',
'pandas/hashtable.pyx',
'pandas/tslib.pyx',
'pandas/index.pyx',
'pandas/algos.pyx',
'pandas/parser.pyx',
'pandas/src/sparse.pyx',
'pandas/src/testing.pyx']
def initialize_options(self):
sdist.initialize_options(self)
'''
self._pyxfiles = []
for root, dirs, files in os.walk('pandas'):
for f in files:
if f.endswith('.pyx'):
self._pyxfiles.append(pjoin(root, f))
'''
def run(self):
if 'cython' in cmdclass:
self.run_command('cython')
else:
for pyxfile in self._pyxfiles:
cfile = pyxfile[:-3] + 'c'
msg = "C-source file '%s' not found." % (cfile) +\
" Run 'setup.py cython' before sdist."
assert os.path.isfile(cfile), msg
sdist.run(self)
class CheckingBuildExt(build_ext):
"""Subclass build_ext to get clearer report if Cython is necessary."""
def check_cython_extensions(self, extensions):
for ext in extensions:
for src in ext.sources:
if not os.path.exists(src):
raise Exception("""Cython-generated file '%s' not found.
Cython is required to compile pandas from a development branch.
Please install Cython or download a release package of pandas.
""" % src)
def build_extensions(self):
self.check_cython_extensions(self.extensions)
build_ext.build_extensions(self)
class CythonCommand(build_ext):
"""Custom distutils command subclassed from Cython.Distutils.build_ext
to compile pyx->c, and stop there. All this does is override the
C-compile method build_extension() with a no-op."""
def build_extension(self, ext):
pass
class DummyBuildSrc(Command):
""" numpy's build_src command interferes with Cython's build_ext.
"""
user_options = []
def initialize_options(self):
self.py_modules_dict = {}
def finalize_options(self):
pass
def run(self):
pass
cmdclass = {'clean': CleanCommand,
'build': build,
'sdist': CheckSDist}
try:
from wheel.bdist_wheel import bdist_wheel
class BdistWheel(bdist_wheel):
def get_tag(self):
tag = bdist_wheel.get_tag(self)
repl = 'macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64'
if tag[2] == 'macosx_10_6_intel':
tag = (tag[0], tag[1], repl)
return tag
cmdclass['bdist_wheel'] = BdistWheel
except ImportError:
pass
if cython:
suffix = '.pyx'
cmdclass['build_ext'] = CheckingBuildExt
cmdclass['cython'] = CythonCommand
else:
suffix = '.c'
cmdclass['build_src'] = DummyBuildSrc
cmdclass['build_ext'] = CheckingBuildExt
lib_depends = ['reduce', 'inference', 'properties']
def srcpath(name=None, suffix='.pyx', subdir='src'):
return pjoin('pandas', subdir, name + suffix)
if suffix == '.pyx':
lib_depends = [srcpath(f, suffix='.pyx') for f in lib_depends]
lib_depends.append('pandas/src/util.pxd')
else:
lib_depends = []
plib_depends = []
common_include = ['pandas/src/klib', 'pandas/src']
def pxd(name):
return os.path.abspath(pjoin('pandas', name + '.pxd'))
lib_depends = lib_depends + ['pandas/src/numpy_helper.h',
'pandas/src/parse_helper.h']
tseries_depends = ['pandas/src/datetime/np_datetime.h',
'pandas/src/datetime/np_datetime_strings.h',
'pandas/src/period.h']
# some linux distros require it
libraries = ['m'] if 'win32' not in sys.platform else []
ext_data = dict(
lib={'pyxfile': 'lib',
'pxdfiles': [],
'depends': lib_depends},
hashtable={'pyxfile': 'hashtable',
'pxdfiles': ['hashtable']},
tslib={'pyxfile': 'tslib',
'depends': tseries_depends,
'sources': ['pandas/src/datetime/np_datetime.c',
'pandas/src/datetime/np_datetime_strings.c',
'pandas/src/period.c']},
index={'pyxfile': 'index',
'sources': ['pandas/src/datetime/np_datetime.c',
'pandas/src/datetime/np_datetime_strings.c']},
algos={'pyxfile': 'algos',
'depends': [srcpath('generated', suffix='.pyx'),
srcpath('join', suffix='.pyx')]},
parser=dict(pyxfile='parser',
depends=['pandas/src/parser/tokenizer.h',
'pandas/src/parser/io.h',
'pandas/src/numpy_helper.h'],
sources=['pandas/src/parser/tokenizer.c',
'pandas/src/parser/io.c'])
)
extensions = []
for name, data in ext_data.items():
sources = [srcpath(data['pyxfile'], suffix=suffix, subdir='')]
pxds = [pxd(x) for x in data.get('pxdfiles', [])]
if suffix == '.pyx' and pxds:
sources.extend(pxds)
sources.extend(data.get('sources', []))
include = data.get('include', common_include)
obj = Extension('pandas.%s' % name,
sources=sources,
depends=data.get('depends', []),
include_dirs=include)
extensions.append(obj)
sparse_ext = Extension('pandas._sparse',
sources=[srcpath('sparse', suffix=suffix)],
include_dirs=[],
libraries=libraries)
extensions.extend([sparse_ext])
testing_ext = Extension('pandas._testing',
sources=[srcpath('testing', suffix=suffix)],
include_dirs=[],
libraries=libraries)
extensions.extend([testing_ext])
#----------------------------------------------------------------------
# msgpack stuff here
if sys.byteorder == 'big':
macros = [('__BIG_ENDIAN__', '1')]
else:
macros = [('__LITTLE_ENDIAN__', '1')]
msgpack_ext = Extension('pandas.msgpack',
sources = [srcpath('msgpack',
suffix=suffix if suffix == '.pyx' else '.cpp',
subdir='')],
language='c++',
include_dirs=common_include,
define_macros=macros)
extensions.append(msgpack_ext)
# if not ISRELEASED:
# extensions.extend([sandbox_ext])
if suffix == '.pyx' and 'setuptools' in sys.modules:
# undo dumb setuptools bug clobbering .pyx sources back to .c
for ext in extensions:
if ext.sources[0].endswith(('.c','.cpp')):
root, _ = os.path.splitext(ext.sources[0])
ext.sources[0] = root + suffix
ujson_ext = Extension('pandas.json',
depends=['pandas/src/ujson/lib/ultrajson.h',
'pandas/src/numpy_helper.h'],
sources=['pandas/src/ujson/python/ujson.c',
'pandas/src/ujson/python/objToJSON.c',
'pandas/src/ujson/python/JSONtoObj.c',
'pandas/src/ujson/lib/ultrajsonenc.c',
'pandas/src/ujson/lib/ultrajsondec.c',
'pandas/src/datetime/np_datetime.c',
'pandas/src/datetime/np_datetime_strings.c'],
include_dirs=['pandas/src/ujson/python',
'pandas/src/ujson/lib',
'pandas/src/datetime'] + common_include,
extra_compile_args=['-D_GNU_SOURCE'])
extensions.append(ujson_ext)
if _have_setuptools:
setuptools_kwargs["test_suite"] = "nose.collector"
# The build cache system does string matching below this point.
# if you change something, be careful.
setup(name=DISTNAME,
version=FULLVERSION,
maintainer=AUTHOR,
packages=['pandas',
'pandas.compat',
'pandas.computation',
'pandas.computation.tests',
'pandas.core',
'pandas.io',
'pandas.rpy',
'pandas.sandbox',
'pandas.sparse',
'pandas.sparse.tests',
'pandas.stats',
'pandas.util',
'pandas.tests',
'pandas.tests.test_msgpack',
'pandas.tools',
'pandas.tools.tests',
'pandas.tseries',
'pandas.tseries.tests',
'pandas.io.tests',
'pandas.io.tests.test_json',
'pandas.stats.tests',
],
package_data={'pandas.io': ['tests/data/legacy_hdf/*.h5',
'tests/data/legacy_pickle/0.10.1/*.pickle',
'tests/data/legacy_pickle/0.11.0/*.pickle',
'tests/data/legacy_pickle/0.12.0/*.pickle',
'tests/data/legacy_pickle/0.13.0/*.pickle',
'tests/data/legacy_pickle/0.14.0/*.pickle',
'tests/data/*.csv',
'tests/data/*.dta',
'tests/data/*.txt',
'tests/data/*.xls',
'tests/data/*.xlsx',
'tests/data/*.xlsm',
'tests/data/*.table',
'tests/data/*.html',
'tests/data/html_encoding/*.html',
'tests/test_json/data/*.json'],
'pandas.tools': ['tests/*.csv'],
'pandas.tests': ['data/*.pickle',
'data/*.csv'],
'pandas.tseries.tests': ['data/*.pickle',
'data/*.csv']
},
ext_modules=extensions,
maintainer_email=EMAIL,
description=DESCRIPTION,
license=LICENSE,
cmdclass=cmdclass,
url=URL,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms='any',
**setuptools_kwargs)
| 34.983871 | 97 | 0.570447 |
import os
import sys
import shutil
import warnings
import re
try:
import Cython
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "fake_pyrex"))
except ImportError:
pass
try:
import pkg_resources
try:
pkg_resources.require("setuptools>=0.6c5")
except pkg_resources.VersionConflict:
from ez_setup import use_setuptools
use_setuptools(version="0.6c5")
from setuptools import setup, Command
_have_setuptools = True
except ImportError:
# no setuptools installed
from distutils.core import setup, Command
_have_setuptools = False
setuptools_kwargs = {}
min_numpy_ver = '1.7.0'
if sys.version_info[0] >= 3:
setuptools_kwargs = {
'zip_safe': False,
'install_requires': ['python-dateutil >= 2',
'pytz >= 2011k',
'numpy >= %s' % min_numpy_ver],
'setup_requires': ['numpy >= %s' % min_numpy_ver],
}
if not _have_setuptools:
sys.exit("need setuptools/distribute for Py3k"
"\n$ pip install distribute")
else:
setuptools_kwargs = {
'install_requires': ['python-dateutil',
'pytz >= 2011k',
'numpy >= %s' % min_numpy_ver],
'setup_requires': ['numpy >= %s' % min_numpy_ver],
'zip_safe': False,
}
if not _have_setuptools:
try:
import numpy
import dateutil
setuptools_kwargs = {}
except ImportError:
sys.exit("install requires: 'python-dateutil < 2','numpy'."
" use pip or easy_install."
"\n $ pip install 'python-dateutil < 2' 'numpy'")
from distutils.extension import Extension
from distutils.command.build import build
from distutils.command.sdist import sdist
from distutils.command.build_ext import build_ext as _build_ext
try:
from Cython.Distutils import build_ext as _build_ext
# from Cython.Distutils import Extension # to get pyrex debugging symbols
cython = True
except ImportError:
cython = False
from os.path import join as pjoin
class build_ext(_build_ext):
def build_extensions(self):
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
for ext in self.extensions:
if hasattr(ext, 'include_dirs') and not numpy_incl in ext.include_dirs:
ext.include_dirs.append(numpy_incl)
_build_ext.build_extensions(self)
DESCRIPTION = ("Powerful data structures for data analysis, time series,"
"and statistics")
LONG_DESCRIPTION = """
**pandas** is a Python package providing fast, flexible, and expressive data
structures designed to make working with structured (tabular, multidimensional,
potentially heterogeneous) and time series data both easy and intuitive. It
aims to be the fundamental high-level building block for doing practical,
**real world** data analysis in Python. Additionally, it has the broader goal
of becoming **the most powerful and flexible open source data analysis /
manipulation tool available in any language**. It is already well on its way
toward this goal.
pandas is well suited for many different kinds of data:
- Tabular data with heterogeneously-typed columns, as in an SQL table or
Excel spreadsheet
- Ordered and unordered (not necessarily fixed-frequency) time series data.
- Arbitrary matrix data (homogeneously typed or heterogeneous) with row and
column labels
- Any other form of observational / statistical data sets. The data actually
need not be labeled at all to be placed into a pandas data structure
The two primary data structures of pandas, Series (1-dimensional) and DataFrame
(2-dimensional), handle the vast majority of typical use cases in finance,
statistics, social science, and many areas of engineering. For R users,
DataFrame provides everything that R's ``data.frame`` provides and much
more. pandas is built on top of `NumPy <http://www.numpy.org>`__ and is
intended to integrate well within a scientific computing environment with many
other 3rd party libraries.
Here are just a few of the things that pandas does well:
- Easy handling of **missing data** (represented as NaN) in floating point as
well as non-floating point data
- Size mutability: columns can be **inserted and deleted** from DataFrame and
higher dimensional objects
- Automatic and explicit **data alignment**: objects can be explicitly
aligned to a set of labels, or the user can simply ignore the labels and
let `Series`, `DataFrame`, etc. automatically align the data for you in
computations
- Powerful, flexible **group by** functionality to perform
split-apply-combine operations on data sets, for both aggregating and
transforming data
- Make it **easy to convert** ragged, differently-indexed data in other
Python and NumPy data structures into DataFrame objects
- Intelligent label-based **slicing**, **fancy indexing**, and **subsetting**
of large data sets
- Intuitive **merging** and **joining** data sets
- Flexible **reshaping** and pivoting of data sets
- **Hierarchical** labeling of axes (possible to have multiple labels per
tick)
- Robust IO tools for loading data from **flat files** (CSV and delimited),
Excel files, databases, and saving / loading data from the ultrafast **HDF5
format**
- **Time series**-specific functionality: date range generation and frequency
conversion, moving window statistics, moving window linear regressions,
date shifting and lagging, etc.
Many of these principles are here to address the shortcomings frequently
experienced using other languages / scientific research environments. For data
scientists, working with data is typically divided into multiple stages:
munging and cleaning data, analyzing / modeling it, then organizing the results
of the analysis into a form suitable for plotting or tabular display. pandas is
the ideal tool for all of these tasks.
Note
----
Windows binaries built against NumPy 1.8.1
"""
DISTNAME = 'pandas'
LICENSE = 'BSD'
AUTHOR = "The PyData Development Team"
EMAIL = "pydata@googlegroups.com"
URL = "http://pandas.pydata.org"
DOWNLOAD_URL = ''
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Cython',
'Topic :: Scientific/Engineering',
]
MAJOR = 0
MINOR = 14
MICRO = 1
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
FULLVERSION = VERSION
write_version = True
if not ISRELEASED:
import subprocess
FULLVERSION += '.dev'
pipe = None
for cmd in ['git','git.cmd']:
try:
pipe = subprocess.Popen([cmd, "describe", "--always", "--match", "v[0-9]*"],
stdout=subprocess.PIPE)
(so,serr) = pipe.communicate()
if pipe.returncode == 0:
break
except:
pass
if pipe is None or pipe.returncode != 0:
if os.path.exists('pandas/version.py'):
warnings.warn("WARNING: Couldn't get git revision, using existing pandas/version.py")
write_version = False
else:
warnings.warn("WARNING: Couldn't get git revision, using generic version string")
else:
rev = so.strip()
if sys.version_info[0] >= 3:
rev = rev.decode('ascii')
if not rev.startswith('v') and re.match("[a-zA-Z0-9]{7,9}",rev):
rev ="v%s.dev-%s" % (VERSION, rev)
FULLVERSION = rev.lstrip('v')
else:
FULLVERSION += QUALIFIER
def write_version_py(filename=None):
cnt = """\
version = '%s'
short_version = '%s'
"""
if not filename:
filename = os.path.join(
os.path.dirname(__file__), 'pandas', 'version.py')
a = open(filename, 'w')
try:
a.write(cnt % (FULLVERSION, VERSION))
finally:
a.close()
if write_version:
write_version_py()
class CleanCommand(Command):
user_options = [("all", "a", "")]
def initialize_options(self):
self.all = True
self._clean_me = []
self._clean_trees = []
self._clean_exclude = ['np_datetime.c',
'np_datetime_strings.c',
'period.c',
'tokenizer.c',
'io.c',
'ujson.c',
'objToJSON.c',
'JSONtoObj.c',
'ultrajsonenc.c',
'ultrajsondec.c',
]
for root, dirs, files in os.walk('pandas'):
for f in files:
if f in self._clean_exclude:
continue
if 'ujson' in f:
continue
if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o',
'.pyo',
'.pyd', '.c', '.orig'):
self._clean_me.append(pjoin(root, f))
for d in dirs:
if d == '__pycache__':
self._clean_trees.append(pjoin(root, d))
for d in ('build', 'dist'):
if os.path.exists(d):
self._clean_trees.append(d)
def finalize_options(self):
pass
def run(self):
for clean_me in self._clean_me:
try:
os.unlink(clean_me)
except Exception:
pass
for clean_tree in self._clean_trees:
try:
shutil.rmtree(clean_tree)
except Exception:
pass
class CheckSDist(sdist):
_pyxfiles = ['pandas/lib.pyx',
'pandas/hashtable.pyx',
'pandas/tslib.pyx',
'pandas/index.pyx',
'pandas/algos.pyx',
'pandas/parser.pyx',
'pandas/src/sparse.pyx',
'pandas/src/testing.pyx']
def initialize_options(self):
sdist.initialize_options(self)
def run(self):
if 'cython' in cmdclass:
self.run_command('cython')
else:
for pyxfile in self._pyxfiles:
cfile = pyxfile[:-3] + 'c'
msg = "C-source file '%s' not found." % (cfile) +\
" Run 'setup.py cython' before sdist."
assert os.path.isfile(cfile), msg
sdist.run(self)
class CheckingBuildExt(build_ext):
def check_cython_extensions(self, extensions):
for ext in extensions:
for src in ext.sources:
if not os.path.exists(src):
raise Exception("""Cython-generated file '%s' not found.
Cython is required to compile pandas from a development branch.
Please install Cython or download a release package of pandas.
""" % src)
def build_extensions(self):
self.check_cython_extensions(self.extensions)
build_ext.build_extensions(self)
class CythonCommand(build_ext):
def build_extension(self, ext):
pass
class DummyBuildSrc(Command):
user_options = []
def initialize_options(self):
self.py_modules_dict = {}
def finalize_options(self):
pass
def run(self):
pass
cmdclass = {'clean': CleanCommand,
'build': build,
'sdist': CheckSDist}
try:
from wheel.bdist_wheel import bdist_wheel
class BdistWheel(bdist_wheel):
def get_tag(self):
tag = bdist_wheel.get_tag(self)
repl = 'macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64'
if tag[2] == 'macosx_10_6_intel':
tag = (tag[0], tag[1], repl)
return tag
cmdclass['bdist_wheel'] = BdistWheel
except ImportError:
pass
if cython:
suffix = '.pyx'
cmdclass['build_ext'] = CheckingBuildExt
cmdclass['cython'] = CythonCommand
else:
suffix = '.c'
cmdclass['build_src'] = DummyBuildSrc
cmdclass['build_ext'] = CheckingBuildExt
lib_depends = ['reduce', 'inference', 'properties']
def srcpath(name=None, suffix='.pyx', subdir='src'):
return pjoin('pandas', subdir, name + suffix)
if suffix == '.pyx':
lib_depends = [srcpath(f, suffix='.pyx') for f in lib_depends]
lib_depends.append('pandas/src/util.pxd')
else:
lib_depends = []
plib_depends = []
common_include = ['pandas/src/klib', 'pandas/src']
def pxd(name):
return os.path.abspath(pjoin('pandas', name + '.pxd'))
lib_depends = lib_depends + ['pandas/src/numpy_helper.h',
'pandas/src/parse_helper.h']
tseries_depends = ['pandas/src/datetime/np_datetime.h',
'pandas/src/datetime/np_datetime_strings.h',
'pandas/src/period.h']
libraries = ['m'] if 'win32' not in sys.platform else []
ext_data = dict(
lib={'pyxfile': 'lib',
'pxdfiles': [],
'depends': lib_depends},
hashtable={'pyxfile': 'hashtable',
'pxdfiles': ['hashtable']},
tslib={'pyxfile': 'tslib',
'depends': tseries_depends,
'sources': ['pandas/src/datetime/np_datetime.c',
'pandas/src/datetime/np_datetime_strings.c',
'pandas/src/period.c']},
index={'pyxfile': 'index',
'sources': ['pandas/src/datetime/np_datetime.c',
'pandas/src/datetime/np_datetime_strings.c']},
algos={'pyxfile': 'algos',
'depends': [srcpath('generated', suffix='.pyx'),
srcpath('join', suffix='.pyx')]},
parser=dict(pyxfile='parser',
depends=['pandas/src/parser/tokenizer.h',
'pandas/src/parser/io.h',
'pandas/src/numpy_helper.h'],
sources=['pandas/src/parser/tokenizer.c',
'pandas/src/parser/io.c'])
)
extensions = []
for name, data in ext_data.items():
sources = [srcpath(data['pyxfile'], suffix=suffix, subdir='')]
pxds = [pxd(x) for x in data.get('pxdfiles', [])]
if suffix == '.pyx' and pxds:
sources.extend(pxds)
sources.extend(data.get('sources', []))
include = data.get('include', common_include)
obj = Extension('pandas.%s' % name,
sources=sources,
depends=data.get('depends', []),
include_dirs=include)
extensions.append(obj)
sparse_ext = Extension('pandas._sparse',
sources=[srcpath('sparse', suffix=suffix)],
include_dirs=[],
libraries=libraries)
extensions.extend([sparse_ext])
testing_ext = Extension('pandas._testing',
sources=[srcpath('testing', suffix=suffix)],
include_dirs=[],
libraries=libraries)
extensions.extend([testing_ext])
if sys.byteorder == 'big':
macros = [('__BIG_ENDIAN__', '1')]
else:
macros = [('__LITTLE_ENDIAN__', '1')]
msgpack_ext = Extension('pandas.msgpack',
sources = [srcpath('msgpack',
suffix=suffix if suffix == '.pyx' else '.cpp',
subdir='')],
language='c++',
include_dirs=common_include,
define_macros=macros)
extensions.append(msgpack_ext)
if suffix == '.pyx' and 'setuptools' in sys.modules:
for ext in extensions:
if ext.sources[0].endswith(('.c','.cpp')):
root, _ = os.path.splitext(ext.sources[0])
ext.sources[0] = root + suffix
ujson_ext = Extension('pandas.json',
depends=['pandas/src/ujson/lib/ultrajson.h',
'pandas/src/numpy_helper.h'],
sources=['pandas/src/ujson/python/ujson.c',
'pandas/src/ujson/python/objToJSON.c',
'pandas/src/ujson/python/JSONtoObj.c',
'pandas/src/ujson/lib/ultrajsonenc.c',
'pandas/src/ujson/lib/ultrajsondec.c',
'pandas/src/datetime/np_datetime.c',
'pandas/src/datetime/np_datetime_strings.c'],
include_dirs=['pandas/src/ujson/python',
'pandas/src/ujson/lib',
'pandas/src/datetime'] + common_include,
extra_compile_args=['-D_GNU_SOURCE'])
extensions.append(ujson_ext)
if _have_setuptools:
setuptools_kwargs["test_suite"] = "nose.collector"
setup(name=DISTNAME,
version=FULLVERSION,
maintainer=AUTHOR,
packages=['pandas',
'pandas.compat',
'pandas.computation',
'pandas.computation.tests',
'pandas.core',
'pandas.io',
'pandas.rpy',
'pandas.sandbox',
'pandas.sparse',
'pandas.sparse.tests',
'pandas.stats',
'pandas.util',
'pandas.tests',
'pandas.tests.test_msgpack',
'pandas.tools',
'pandas.tools.tests',
'pandas.tseries',
'pandas.tseries.tests',
'pandas.io.tests',
'pandas.io.tests.test_json',
'pandas.stats.tests',
],
package_data={'pandas.io': ['tests/data/legacy_hdf/*.h5',
'tests/data/legacy_pickle/0.10.1/*.pickle',
'tests/data/legacy_pickle/0.11.0/*.pickle',
'tests/data/legacy_pickle/0.12.0/*.pickle',
'tests/data/legacy_pickle/0.13.0/*.pickle',
'tests/data/legacy_pickle/0.14.0/*.pickle',
'tests/data/*.csv',
'tests/data/*.dta',
'tests/data/*.txt',
'tests/data/*.xls',
'tests/data/*.xlsx',
'tests/data/*.xlsm',
'tests/data/*.table',
'tests/data/*.html',
'tests/data/html_encoding/*.html',
'tests/test_json/data/*.json'],
'pandas.tools': ['tests/*.csv'],
'pandas.tests': ['data/*.pickle',
'data/*.csv'],
'pandas.tseries.tests': ['data/*.pickle',
'data/*.csv']
},
ext_modules=extensions,
maintainer_email=EMAIL,
description=DESCRIPTION,
license=LICENSE,
cmdclass=cmdclass,
url=URL,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms='any',
**setuptools_kwargs)
| true | true |
f7f77eb039de355d8757c0ebf27520ca8ce894b5 | 1,611 | py | Python | npactflask/bin/cleanup.py | NProfileAnalysisComputationalTool/npact | d4495f5cba2a936f2be2f2c821edd5429d1a58da | [
"BSD-3-Clause"
] | 2 | 2015-09-18T02:01:19.000Z | 2021-09-03T18:40:59.000Z | npactflask/bin/cleanup.py | NProfileAnalysisComputationalTool/npact | d4495f5cba2a936f2be2f2c821edd5429d1a58da | [
"BSD-3-Clause"
] | null | null | null | npactflask/bin/cleanup.py | NProfileAnalysisComputationalTool/npact | d4495f5cba2a936f2be2f2c821edd5429d1a58da | [
"BSD-3-Clause"
] | 1 | 2015-09-25T18:58:21.000Z | 2015-09-25T18:58:21.000Z | #!/usr/bin/env python
"""This script scans for apparently unused files in the upload path
and deletes them.
'unused files' are considered to be ones for which the atime is older
than 14 days (see ATIME_DEFAULT)
"""
import logging
import sys
from optparse import OptionParser
logger = logging.getLogger('cleanup')
if __name__ == '__main__':
from npactflask import app, cleanup
parser = OptionParser("""usage: %prog [options]
This script scans for apparently unused files in the upload path.
'unused files' are considered to be ones for which the atime is
older than X days; set in django settings or via the --atime flag.
""")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
help="Show more verbose log messages.")
parser.add_option("-a", "--atime", action="store", dest="atime",
default=app.config['ATIME_DEFAULT'],
help="argument to find's atime predicate for how many "
"days since it has been accessed before we decide to "
"delete it. Defaults to %default")
(options, args) = parser.parse_args()
if options.verbose:
logger.setLevel(logging.DEBUG)
try:
days = int(options.atime)
except:
logger.error("Invalid number of days to keep for; must be an integer.")
sys.exit(1)
try:
if cleanup.cleanup_old_files(days):
logger.info("Success!")
except SystemExit:
raise
except:
logger.exception("Error during cleanup.")
sys.exit(1)
| 29.833333 | 79 | 0.633147 |
import logging
import sys
from optparse import OptionParser
logger = logging.getLogger('cleanup')
if __name__ == '__main__':
from npactflask import app, cleanup
parser = OptionParser("""usage: %prog [options]
This script scans for apparently unused files in the upload path.
'unused files' are considered to be ones for which the atime is
older than X days; set in django settings or via the --atime flag.
""")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
help="Show more verbose log messages.")
parser.add_option("-a", "--atime", action="store", dest="atime",
default=app.config['ATIME_DEFAULT'],
help="argument to find's atime predicate for how many "
"days since it has been accessed before we decide to "
"delete it. Defaults to %default")
(options, args) = parser.parse_args()
if options.verbose:
logger.setLevel(logging.DEBUG)
try:
days = int(options.atime)
except:
logger.error("Invalid number of days to keep for; must be an integer.")
sys.exit(1)
try:
if cleanup.cleanup_old_files(days):
logger.info("Success!")
except SystemExit:
raise
except:
logger.exception("Error during cleanup.")
sys.exit(1)
| true | true |
f7f77eff8efdfdd454b5bb13512b1a20955a8f7c | 2,082 | py | Python | detools/data_format/__init__.py | advmach/detools | 2b7b98bb8e5eb1232d15cb1731fe72f8954a2d09 | [
"BSD-2-Clause"
] | 119 | 2019-02-23T07:48:11.000Z | 2022-03-23T20:45:51.000Z | detools/data_format/__init__.py | advmach/detools | 2b7b98bb8e5eb1232d15cb1731fe72f8954a2d09 | [
"BSD-2-Clause"
] | 6 | 2020-01-27T11:15:32.000Z | 2021-09-15T17:58:34.000Z | detools/data_format/__init__.py | advmach/detools | 2b7b98bb8e5eb1232d15cb1731fe72f8954a2d09 | [
"BSD-2-Clause"
] | 10 | 2019-04-23T17:28:48.000Z | 2022-02-14T05:35:31.000Z | from collections import defaultdict
from operator import itemgetter
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
from ..errors import Error
from ..common import DATA_FORMAT_AARCH64
from ..common import DATA_FORMAT_ARM_CORTEX_M4
from ..common import DATA_FORMAT_XTENSA_LX106
from ..common import format_bad_data_format
from ..common import format_bad_data_format_number
from . import aarch64
from . import arm_cortex_m4
from . import xtensa_lx106
def encode(ffrom, fto, data_format, data_segment):
"""Returns the new from-data and to-data, along with a patch that can
be used to convert the new from-data to the original to-data later
(by the diff and from readers).
"""
if data_format == 'aarch64':
return aarch64.encode(ffrom, fto, data_segment)
elif data_format == 'arm-cortex-m4':
return arm_cortex_m4.encode(ffrom, fto, data_segment)
elif data_format == 'xtensa-lx106':
return xtensa_lx106.encode(ffrom, fto, data_segment)
else:
raise Error(format_bad_data_format(data_format))
def create_readers(data_format, ffrom, patch, to_size):
"""Returns diff and from readers, used when applying a patch.
"""
if data_format == DATA_FORMAT_AARCH64:
return aarch64.create_readers(ffrom, patch, to_size)
elif data_format == DATA_FORMAT_ARM_CORTEX_M4:
return arm_cortex_m4.create_readers(ffrom, patch, to_size)
elif data_format == DATA_FORMAT_XTENSA_LX106:
return xtensa_lx106.create_readers(ffrom, patch, to_size)
else:
raise Error(format_bad_data_format_number(data_format))
def info(data_format, patch, fsize):
"""Returns an info string.
"""
if data_format == DATA_FORMAT_AARCH64:
return aarch64.info(patch, fsize)
elif data_format == DATA_FORMAT_ARM_CORTEX_M4:
return arm_cortex_m4.info(patch, fsize)
elif data_format == DATA_FORMAT_XTENSA_LX106:
return xtensa_lx106.info(patch, fsize)
else:
raise Error(format_bad_data_format_number(data_format))
| 33.047619 | 73 | 0.741114 | from collections import defaultdict
from operator import itemgetter
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
from ..errors import Error
from ..common import DATA_FORMAT_AARCH64
from ..common import DATA_FORMAT_ARM_CORTEX_M4
from ..common import DATA_FORMAT_XTENSA_LX106
from ..common import format_bad_data_format
from ..common import format_bad_data_format_number
from . import aarch64
from . import arm_cortex_m4
from . import xtensa_lx106
def encode(ffrom, fto, data_format, data_segment):
if data_format == 'aarch64':
return aarch64.encode(ffrom, fto, data_segment)
elif data_format == 'arm-cortex-m4':
return arm_cortex_m4.encode(ffrom, fto, data_segment)
elif data_format == 'xtensa-lx106':
return xtensa_lx106.encode(ffrom, fto, data_segment)
else:
raise Error(format_bad_data_format(data_format))
def create_readers(data_format, ffrom, patch, to_size):
if data_format == DATA_FORMAT_AARCH64:
return aarch64.create_readers(ffrom, patch, to_size)
elif data_format == DATA_FORMAT_ARM_CORTEX_M4:
return arm_cortex_m4.create_readers(ffrom, patch, to_size)
elif data_format == DATA_FORMAT_XTENSA_LX106:
return xtensa_lx106.create_readers(ffrom, patch, to_size)
else:
raise Error(format_bad_data_format_number(data_format))
def info(data_format, patch, fsize):
if data_format == DATA_FORMAT_AARCH64:
return aarch64.info(patch, fsize)
elif data_format == DATA_FORMAT_ARM_CORTEX_M4:
return arm_cortex_m4.info(patch, fsize)
elif data_format == DATA_FORMAT_XTENSA_LX106:
return xtensa_lx106.info(patch, fsize)
else:
raise Error(format_bad_data_format_number(data_format))
| true | true |
f7f77f25311a643b4b4da1c44a6a6e0278c8e1e2 | 26,661 | py | Python | diskord/iterators.py | Hype3808/diskord | 46162537152866d28eb85cd4bc3b69d5213adf5a | [
"MIT"
] | 30 | 2021-09-16T10:14:06.000Z | 2022-02-28T07:25:48.000Z | diskord/iterators.py | Hype3808/diskord | 46162537152866d28eb85cd4bc3b69d5213adf5a | [
"MIT"
] | 25 | 2021-10-01T10:45:34.000Z | 2022-02-11T01:21:26.000Z | diskord/iterators.py | Hype3808/diskord | 46162537152866d28eb85cd4bc3b69d5213adf5a | [
"MIT"
] | 14 | 2021-10-02T17:16:53.000Z | 2022-02-19T17:28:46.000Z | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
import datetime
from typing import (
Awaitable,
TYPE_CHECKING,
TypeVar,
Optional,
Any,
Callable,
Union,
List,
AsyncIterator,
)
from .errors import NoMoreItems
from .utils import snowflake_time, time_snowflake, maybe_coroutine
from .object import Object
from .audit_logs import AuditLogEntry
__all__ = (
"ReactionIterator",
"HistoryIterator",
"AuditLogIterator",
"GuildIterator",
"MemberIterator",
)
if TYPE_CHECKING:
from .types.audit_log import (
AuditLog as AuditLogPayload,
)
from .types.guild import (
Guild as GuildPayload,
)
from .types.message import (
Message as MessagePayload,
)
from .types.user import (
PartialUser as PartialUserPayload,
)
from .types.threads import (
Thread as ThreadPayload,
)
from .member import Member
from .user import User
from .message import Message
from .audit_logs import AuditLogEntry
from .guild import Guild
from .threads import Thread
from .abc import Snowflake
T = TypeVar("T")
OT = TypeVar("OT")
_Func = Callable[[T], Union[OT, Awaitable[OT]]]
OLDEST_OBJECT = Object(id=0)
class _AsyncIterator(AsyncIterator[T]):
__slots__ = ()
async def next(self) -> T:
raise NotImplementedError
def get(self, **attrs: Any) -> Awaitable[Optional[T]]:
def predicate(elem: T):
for attr, val in attrs.items():
nested = attr.split("__")
obj = elem
for attribute in nested:
obj = getattr(obj, attribute)
if obj != val:
return False
return True
return self.find(predicate)
async def find(self, predicate: _Func[T, bool]) -> Optional[T]:
while True:
try:
elem = await self.next()
except NoMoreItems:
return None
ret = await maybe_coroutine(predicate, elem)
if ret:
return elem
def chunk(self, max_size: int) -> _ChunkedAsyncIterator[T]:
if max_size <= 0:
raise ValueError("async iterator chunk sizes must be greater than 0.")
return _ChunkedAsyncIterator(self, max_size)
def map(self, func: _Func[T, OT]) -> _MappedAsyncIterator[OT]:
return _MappedAsyncIterator(self, func)
def filter(self, predicate: _Func[T, bool]) -> _FilteredAsyncIterator[T]:
return _FilteredAsyncIterator(self, predicate)
async def flatten(self) -> List[T]:
return [element async for element in self]
async def __anext__(self) -> T:
try:
return await self.next()
except NoMoreItems:
raise StopAsyncIteration()
def _identity(x):
return x
class _ChunkedAsyncIterator(_AsyncIterator[List[T]]):
def __init__(self, iterator, max_size):
self.iterator = iterator
self.max_size = max_size
async def next(self) -> List[T]:
ret: List[T] = []
n = 0
while n < self.max_size:
try:
item = await self.iterator.next()
except NoMoreItems:
if ret:
return ret
raise
else:
ret.append(item)
n += 1
return ret
class _MappedAsyncIterator(_AsyncIterator[T]):
def __init__(self, iterator, func):
self.iterator = iterator
self.func = func
async def next(self) -> T:
# this raises NoMoreItems and will propagate appropriately
item = await self.iterator.next()
return await maybe_coroutine(self.func, item)
class _FilteredAsyncIterator(_AsyncIterator[T]):
def __init__(self, iterator, predicate):
self.iterator = iterator
if predicate is None:
predicate = _identity
self.predicate = predicate
async def next(self) -> T:
getter = self.iterator.next
pred = self.predicate
while True:
# propagate NoMoreItems similar to _MappedAsyncIterator
item = await getter()
ret = await maybe_coroutine(pred, item)
if ret:
return item
class ReactionIterator(_AsyncIterator[Union["User", "Member"]]):
def __init__(self, message, emoji, limit=100, after=None):
self.message = message
self.limit = limit
self.after = after
state = message._state
self.getter = state.http.get_reaction_users
self.state = state
self.emoji = emoji
self.guild = message.guild
self.channel_id = message.channel.id
self.users = asyncio.Queue()
async def next(self) -> Union[User, Member]:
if self.users.empty():
await self.fill_users()
try:
return self.users.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
async def fill_users(self):
# this is a hack because >circular imports<
from .user import User
if self.limit > 0:
retrieve = self.limit if self.limit <= 100 else 100
after = self.after.id if self.after else None
data: List[PartialUserPayload] = await self.getter(
self.channel_id, self.message.id, self.emoji, retrieve, after=after
)
if data:
self.limit -= retrieve
self.after = Object(id=int(data[-1]["id"]))
if self.guild is None or isinstance(self.guild, Object):
for element in reversed(data):
await self.users.put(User(state=self.state, data=element))
else:
for element in reversed(data):
member_id = int(element["id"])
member = self.guild.get_member(member_id)
if member is not None:
await self.users.put(member)
else:
await self.users.put(User(state=self.state, data=element))
class HistoryIterator(_AsyncIterator["Message"]):
"""Iterator for receiving a channel's message history.
The messages endpoint has two behaviours we care about here:
If ``before`` is specified, the messages endpoint returns the `limit`
newest messages before ``before``, sorted with newest first. For filling over
100 messages, update the ``before`` parameter to the oldest message received.
Messages will be returned in order by time.
If ``after`` is specified, it returns the ``limit`` oldest messages after
``after``, sorted with newest first. For filling over 100 messages, update the
``after`` parameter to the newest message received. If messages are not
reversed, they will be out of order (99-0, 199-100, so on)
A note that if both ``before`` and ``after`` are specified, ``before`` is ignored by the
messages endpoint.
Parameters
-----------
messageable: :class:`abc.Messageable`
Messageable class to retrieve message history from.
limit: :class:`int`
Maximum number of messages to retrieve
before: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]
Message before which all messages must be.
after: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]
Message after which all messages must be.
around: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]
Message around which all messages must be. Limit max 101. Note that if
limit is an even number, this will return at most limit+1 messages.
oldest_first: Optional[:class:`bool`]
If set to ``True``, return messages in oldest->newest order. Defaults to
``True`` if `after` is specified, otherwise ``False``.
"""
def __init__(
self,
messageable,
limit,
before=None,
after=None,
around=None,
oldest_first=None,
):
if isinstance(before, datetime.datetime):
before = Object(id=time_snowflake(before, high=False))
if isinstance(after, datetime.datetime):
after = Object(id=time_snowflake(after, high=True))
if isinstance(around, datetime.datetime):
around = Object(id=time_snowflake(around))
if oldest_first is None:
self.reverse = after is not None
else:
self.reverse = oldest_first
self.messageable = messageable
self.limit = limit
self.before = before
self.after = after or OLDEST_OBJECT
self.around = around
self._filter = None # message dict -> bool
self.state = self.messageable._state
self.logs_from = self.state.http.logs_from
self.messages = asyncio.Queue()
if self.around:
if self.limit is None:
raise ValueError("history does not support around with limit=None")
if self.limit > 101:
raise ValueError(
"history max limit 101 when specifying around parameter"
)
elif self.limit == 101:
self.limit = 100 # Thanks discord
self._retrieve_messages = self._retrieve_messages_around_strategy # type: ignore
if self.before and self.after:
self._filter = lambda m: self.after.id < int(m["id"]) < self.before.id
elif self.before:
self._filter = lambda m: int(m["id"]) < self.before.id
elif self.after:
self._filter = lambda m: self.after.id < int(m["id"])
else:
if self.reverse:
self._retrieve_messages = self._retrieve_messages_after_strategy # type: ignore
if self.before:
self._filter = lambda m: int(m["id"]) < self.before.id
else:
self._retrieve_messages = self._retrieve_messages_before_strategy # type: ignore
if self.after and self.after != OLDEST_OBJECT:
self._filter = lambda m: int(m["id"]) > self.after.id
async def next(self) -> Message:
if self.messages.empty():
await self.fill_messages()
try:
return self.messages.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
def _get_retrieve(self):
l = self.limit
if l is None or l > 100:
r = 100
else:
r = l
self.retrieve = r
return r > 0
async def fill_messages(self):
if not hasattr(self, "channel"):
# do the required set up
channel = await self.messageable._get_channel()
self.channel = channel
if self._get_retrieve():
data = await self._retrieve_messages(self.retrieve)
if len(data) < 100:
self.limit = 0 # terminate the infinite loop
if self.reverse:
data = reversed(data)
if self._filter:
data = filter(self._filter, data)
channel = self.channel
for element in data:
await self.messages.put(
self.state.create_message(channel=channel, data=element)
)
async def _retrieve_messages(self, retrieve) -> List[Message]:
"""Retrieve messages and update next parameters."""
raise NotImplementedError
async def _retrieve_messages_before_strategy(self, retrieve):
"""Retrieve messages using before parameter."""
before = self.before.id if self.before else None
data: List[MessagePayload] = await self.logs_from(
self.channel.id, retrieve, before=before
)
if len(data):
if self.limit is not None:
self.limit -= retrieve
self.before = Object(id=int(data[-1]["id"]))
return data
async def _retrieve_messages_after_strategy(self, retrieve):
"""Retrieve messages using after parameter."""
after = self.after.id if self.after else None
data: List[MessagePayload] = await self.logs_from(
self.channel.id, retrieve, after=after
)
if len(data):
if self.limit is not None:
self.limit -= retrieve
self.after = Object(id=int(data[0]["id"]))
return data
async def _retrieve_messages_around_strategy(self, retrieve):
"""Retrieve messages using around parameter."""
if self.around:
around = self.around.id if self.around else None
data: List[MessagePayload] = await self.logs_from(
self.channel.id, retrieve, around=around
)
self.around = None
return data
return []
class AuditLogIterator(_AsyncIterator["AuditLogEntry"]):
def __init__(
self,
guild,
limit=None,
before=None,
after=None,
oldest_first=None,
user_id=None,
action_type=None,
):
if isinstance(before, datetime.datetime):
before = Object(id=time_snowflake(before, high=False))
if isinstance(after, datetime.datetime):
after = Object(id=time_snowflake(after, high=True))
if oldest_first is None:
self.reverse = after is not None
else:
self.reverse = oldest_first
self.guild = guild
self.loop = guild._state.loop
self.request = guild._state.http.get_audit_logs
self.limit = limit
self.before = before
self.user_id = user_id
self.action_type = action_type
self.after = OLDEST_OBJECT
self._users = {}
self._state = guild._state
self._filter = None # entry dict -> bool
self.entries = asyncio.Queue()
if self.reverse:
self._strategy = self._after_strategy
if self.before:
self._filter = lambda m: int(m["id"]) < self.before.id
else:
self._strategy = self._before_strategy
if self.after and self.after != OLDEST_OBJECT:
self._filter = lambda m: int(m["id"]) > self.after.id
async def _before_strategy(self, retrieve):
before = self.before.id if self.before else None
data: AuditLogPayload = await self.request(
self.guild.id,
limit=retrieve,
user_id=self.user_id,
action_type=self.action_type,
before=before,
)
entries = data.get("audit_log_entries", [])
if len(data) and entries:
if self.limit is not None:
self.limit -= retrieve
self.before = Object(id=int(entries[-1]["id"]))
return data.get("users", []), entries
async def _after_strategy(self, retrieve):
after = self.after.id if self.after else None
data: AuditLogPayload = await self.request(
self.guild.id,
limit=retrieve,
user_id=self.user_id,
action_type=self.action_type,
after=after,
)
entries = data.get("audit_log_entries", [])
if len(data) and entries:
if self.limit is not None:
self.limit -= retrieve
self.after = Object(id=int(entries[0]["id"]))
return data.get("users", []), entries
async def next(self) -> AuditLogEntry:
if self.entries.empty():
await self._fill()
try:
return self.entries.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
def _get_retrieve(self):
l = self.limit
if l is None or l > 100:
r = 100
else:
r = l
self.retrieve = r
return r > 0
async def _fill(self):
from .user import User
if self._get_retrieve():
users, data = await self._strategy(self.retrieve)
if len(data) < 100:
self.limit = 0 # terminate the infinite loop
if self.reverse:
data = reversed(data)
if self._filter:
data = filter(self._filter, data)
for user in users:
u = User(data=user, state=self._state)
self._users[u.id] = u
for element in data:
# TODO: remove this if statement later
if element["action_type"] is None:
continue
await self.entries.put(
AuditLogEntry(data=element, users=self._users, guild=self.guild)
)
class GuildIterator(_AsyncIterator["Guild"]):
"""Iterator for receiving the client's guilds.
The guilds endpoint has the same two behaviours as described
in :class:`HistoryIterator`:
If ``before`` is specified, the guilds endpoint returns the ``limit``
newest guilds before ``before``, sorted with newest first. For filling over
100 guilds, update the ``before`` parameter to the oldest guild received.
Guilds will be returned in order by time.
If `after` is specified, it returns the ``limit`` oldest guilds after ``after``,
sorted with newest first. For filling over 100 guilds, update the ``after``
parameter to the newest guild received, If guilds are not reversed, they
will be out of order (99-0, 199-100, so on)
Not that if both ``before`` and ``after`` are specified, ``before`` is ignored by the
guilds endpoint.
Parameters
-----------
bot: :class:`diskord.Client`
The client to retrieve the guilds from.
limit: :class:`int`
Maximum number of guilds to retrieve.
before: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]
Object before which all guilds must be.
after: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]
Object after which all guilds must be.
"""
def __init__(self, bot, limit, before=None, after=None):
if isinstance(before, datetime.datetime):
before = Object(id=time_snowflake(before, high=False))
if isinstance(after, datetime.datetime):
after = Object(id=time_snowflake(after, high=True))
self.bot = bot
self.limit = limit
self.before = before
self.after = after
self._filter = None
self.state = self.bot._connection
self.get_guilds = self.bot.http.get_guilds
self.guilds = asyncio.Queue()
if self.before and self.after:
self._retrieve_guilds = self._retrieve_guilds_before_strategy # type: ignore
self._filter = lambda m: int(m["id"]) > self.after.id
elif self.after:
self._retrieve_guilds = self._retrieve_guilds_after_strategy # type: ignore
else:
self._retrieve_guilds = self._retrieve_guilds_before_strategy # type: ignore
async def next(self) -> Guild:
if self.guilds.empty():
await self.fill_guilds()
try:
return self.guilds.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
def _get_retrieve(self):
l = self.limit
if l is None or l > 100:
r = 100
else:
r = l
self.retrieve = r
return r > 0
def create_guild(self, data):
from .guild import Guild
return Guild(state=self.state, data=data)
async def fill_guilds(self):
if self._get_retrieve():
data = await self._retrieve_guilds(self.retrieve)
if self.limit is None or len(data) < 100:
self.limit = 0
if self._filter:
data = filter(self._filter, data)
for element in data:
await self.guilds.put(self.create_guild(element))
async def _retrieve_guilds(self, retrieve) -> List[Guild]:
"""Retrieve guilds and update next parameters."""
raise NotImplementedError
async def _retrieve_guilds_before_strategy(self, retrieve):
"""Retrieve guilds using before parameter."""
before = self.before.id if self.before else None
data: List[GuildPayload] = await self.get_guilds(retrieve, before=before)
if len(data):
if self.limit is not None:
self.limit -= retrieve
self.before = Object(id=int(data[-1]["id"]))
return data
async def _retrieve_guilds_after_strategy(self, retrieve):
"""Retrieve guilds using after parameter."""
after = self.after.id if self.after else None
data: List[GuildPayload] = await self.get_guilds(retrieve, after=after)
if len(data):
if self.limit is not None:
self.limit -= retrieve
self.after = Object(id=int(data[0]["id"]))
return data
class MemberIterator(_AsyncIterator["Member"]):
def __init__(self, guild, limit=1000, after=None):
if isinstance(after, datetime.datetime):
after = Object(id=time_snowflake(after, high=True))
self.guild = guild
self.limit = limit
self.after = after or OLDEST_OBJECT
self.state = self.guild._state
self.get_members = self.state.http.get_members
self.members = asyncio.Queue()
async def next(self) -> Member:
if self.members.empty():
await self.fill_members()
try:
return self.members.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
def _get_retrieve(self):
l = self.limit
if l is None or l > 1000:
r = 1000
else:
r = l
self.retrieve = r
return r > 0
async def fill_members(self):
if self._get_retrieve():
after = self.after.id if self.after else None
data = await self.get_members(self.guild.id, self.retrieve, after)
if not data:
# no data, terminate
return
if len(data) < 1000:
self.limit = 0 # terminate loop
self.after = Object(id=int(data[-1]["user"]["id"]))
for element in reversed(data):
await self.members.put(self.create_member(element))
def create_member(self, data):
from .member import Member
return Member(data=data, guild=self.guild, state=self.state)
class ArchivedThreadIterator(_AsyncIterator["Thread"]):
def __init__(
self,
channel_id: int,
guild: Guild,
limit: Optional[int],
joined: bool,
private: bool,
before: Optional[Union[Snowflake, datetime.datetime]] = None,
):
self.channel_id = channel_id
self.guild = guild
self.limit = limit
self.joined = joined
self.private = private
self.http = guild._state.http
if joined and not private:
raise ValueError("Cannot iterate over joined public archived threads")
self.before: Optional[str]
if before is None:
self.before = None
elif isinstance(before, datetime.datetime):
if joined:
self.before = str(time_snowflake(before, high=False))
else:
self.before = before.isoformat()
else:
if joined:
self.before = str(before.id)
else:
self.before = snowflake_time(before.id).isoformat()
self.update_before: Callable[[ThreadPayload], str] = self.get_archive_timestamp
if joined:
self.endpoint = self.http.get_joined_private_archived_threads
self.update_before = self.get_thread_id
elif private:
self.endpoint = self.http.get_private_archived_threads
else:
self.endpoint = self.http.get_public_archived_threads
self.queue: asyncio.Queue[Thread] = asyncio.Queue()
self.has_more: bool = True
async def next(self) -> Thread:
if self.queue.empty():
await self.fill_queue()
try:
return self.queue.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
@staticmethod
def get_archive_timestamp(data: ThreadPayload) -> str:
return data["thread_metadata"]["archive_timestamp"]
@staticmethod
def get_thread_id(data: ThreadPayload) -> str:
return data["id"] # type: ignore
async def fill_queue(self) -> None:
if not self.has_more:
raise NoMoreItems()
limit = 50 if self.limit is None else max(self.limit, 50)
data = await self.endpoint(self.channel_id, before=self.before, limit=limit)
# This stuff is obviously WIP because 'members' is always empty
threads: List[ThreadPayload] = data.get("threads", [])
for d in reversed(threads):
self.queue.put_nowait(self.create_thread(d))
self.has_more = data.get("has_more", False)
if self.limit is not None:
self.limit -= len(threads)
if self.limit <= 0:
self.has_more = False
if self.has_more:
self.before = self.update_before(threads[-1])
def create_thread(self, data: ThreadPayload) -> Thread:
from .threads import Thread
return Thread(guild=self.guild, state=self.guild._state, data=data)
| 33.243142 | 97 | 0.598252 |
from __future__ import annotations
import asyncio
import datetime
from typing import (
Awaitable,
TYPE_CHECKING,
TypeVar,
Optional,
Any,
Callable,
Union,
List,
AsyncIterator,
)
from .errors import NoMoreItems
from .utils import snowflake_time, time_snowflake, maybe_coroutine
from .object import Object
from .audit_logs import AuditLogEntry
__all__ = (
"ReactionIterator",
"HistoryIterator",
"AuditLogIterator",
"GuildIterator",
"MemberIterator",
)
if TYPE_CHECKING:
from .types.audit_log import (
AuditLog as AuditLogPayload,
)
from .types.guild import (
Guild as GuildPayload,
)
from .types.message import (
Message as MessagePayload,
)
from .types.user import (
PartialUser as PartialUserPayload,
)
from .types.threads import (
Thread as ThreadPayload,
)
from .member import Member
from .user import User
from .message import Message
from .audit_logs import AuditLogEntry
from .guild import Guild
from .threads import Thread
from .abc import Snowflake
T = TypeVar("T")
OT = TypeVar("OT")
_Func = Callable[[T], Union[OT, Awaitable[OT]]]
OLDEST_OBJECT = Object(id=0)
class _AsyncIterator(AsyncIterator[T]):
__slots__ = ()
async def next(self) -> T:
raise NotImplementedError
def get(self, **attrs: Any) -> Awaitable[Optional[T]]:
def predicate(elem: T):
for attr, val in attrs.items():
nested = attr.split("__")
obj = elem
for attribute in nested:
obj = getattr(obj, attribute)
if obj != val:
return False
return True
return self.find(predicate)
async def find(self, predicate: _Func[T, bool]) -> Optional[T]:
while True:
try:
elem = await self.next()
except NoMoreItems:
return None
ret = await maybe_coroutine(predicate, elem)
if ret:
return elem
def chunk(self, max_size: int) -> _ChunkedAsyncIterator[T]:
if max_size <= 0:
raise ValueError("async iterator chunk sizes must be greater than 0.")
return _ChunkedAsyncIterator(self, max_size)
def map(self, func: _Func[T, OT]) -> _MappedAsyncIterator[OT]:
return _MappedAsyncIterator(self, func)
def filter(self, predicate: _Func[T, bool]) -> _FilteredAsyncIterator[T]:
return _FilteredAsyncIterator(self, predicate)
async def flatten(self) -> List[T]:
return [element async for element in self]
async def __anext__(self) -> T:
try:
return await self.next()
except NoMoreItems:
raise StopAsyncIteration()
def _identity(x):
return x
class _ChunkedAsyncIterator(_AsyncIterator[List[T]]):
def __init__(self, iterator, max_size):
self.iterator = iterator
self.max_size = max_size
async def next(self) -> List[T]:
ret: List[T] = []
n = 0
while n < self.max_size:
try:
item = await self.iterator.next()
except NoMoreItems:
if ret:
return ret
raise
else:
ret.append(item)
n += 1
return ret
class _MappedAsyncIterator(_AsyncIterator[T]):
def __init__(self, iterator, func):
self.iterator = iterator
self.func = func
async def next(self) -> T:
item = await self.iterator.next()
return await maybe_coroutine(self.func, item)
class _FilteredAsyncIterator(_AsyncIterator[T]):
def __init__(self, iterator, predicate):
self.iterator = iterator
if predicate is None:
predicate = _identity
self.predicate = predicate
async def next(self) -> T:
getter = self.iterator.next
pred = self.predicate
while True:
item = await getter()
ret = await maybe_coroutine(pred, item)
if ret:
return item
class ReactionIterator(_AsyncIterator[Union["User", "Member"]]):
def __init__(self, message, emoji, limit=100, after=None):
self.message = message
self.limit = limit
self.after = after
state = message._state
self.getter = state.http.get_reaction_users
self.state = state
self.emoji = emoji
self.guild = message.guild
self.channel_id = message.channel.id
self.users = asyncio.Queue()
async def next(self) -> Union[User, Member]:
if self.users.empty():
await self.fill_users()
try:
return self.users.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
async def fill_users(self):
from .user import User
if self.limit > 0:
retrieve = self.limit if self.limit <= 100 else 100
after = self.after.id if self.after else None
data: List[PartialUserPayload] = await self.getter(
self.channel_id, self.message.id, self.emoji, retrieve, after=after
)
if data:
self.limit -= retrieve
self.after = Object(id=int(data[-1]["id"]))
if self.guild is None or isinstance(self.guild, Object):
for element in reversed(data):
await self.users.put(User(state=self.state, data=element))
else:
for element in reversed(data):
member_id = int(element["id"])
member = self.guild.get_member(member_id)
if member is not None:
await self.users.put(member)
else:
await self.users.put(User(state=self.state, data=element))
class HistoryIterator(_AsyncIterator["Message"]):
def __init__(
self,
messageable,
limit,
before=None,
after=None,
around=None,
oldest_first=None,
):
if isinstance(before, datetime.datetime):
before = Object(id=time_snowflake(before, high=False))
if isinstance(after, datetime.datetime):
after = Object(id=time_snowflake(after, high=True))
if isinstance(around, datetime.datetime):
around = Object(id=time_snowflake(around))
if oldest_first is None:
self.reverse = after is not None
else:
self.reverse = oldest_first
self.messageable = messageable
self.limit = limit
self.before = before
self.after = after or OLDEST_OBJECT
self.around = around
self._filter = None
self.state = self.messageable._state
self.logs_from = self.state.http.logs_from
self.messages = asyncio.Queue()
if self.around:
if self.limit is None:
raise ValueError("history does not support around with limit=None")
if self.limit > 101:
raise ValueError(
"history max limit 101 when specifying around parameter"
)
elif self.limit == 101:
self.limit = 100
self._retrieve_messages = self._retrieve_messages_around_strategy
if self.before and self.after:
self._filter = lambda m: self.after.id < int(m["id"]) < self.before.id
elif self.before:
self._filter = lambda m: int(m["id"]) < self.before.id
elif self.after:
self._filter = lambda m: self.after.id < int(m["id"])
else:
if self.reverse:
self._retrieve_messages = self._retrieve_messages_after_strategy
if self.before:
self._filter = lambda m: int(m["id"]) < self.before.id
else:
self._retrieve_messages = self._retrieve_messages_before_strategy
if self.after and self.after != OLDEST_OBJECT:
self._filter = lambda m: int(m["id"]) > self.after.id
async def next(self) -> Message:
if self.messages.empty():
await self.fill_messages()
try:
return self.messages.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
def _get_retrieve(self):
l = self.limit
if l is None or l > 100:
r = 100
else:
r = l
self.retrieve = r
return r > 0
async def fill_messages(self):
if not hasattr(self, "channel"):
channel = await self.messageable._get_channel()
self.channel = channel
if self._get_retrieve():
data = await self._retrieve_messages(self.retrieve)
if len(data) < 100:
self.limit = 0
if self.reverse:
data = reversed(data)
if self._filter:
data = filter(self._filter, data)
channel = self.channel
for element in data:
await self.messages.put(
self.state.create_message(channel=channel, data=element)
)
async def _retrieve_messages(self, retrieve) -> List[Message]:
raise NotImplementedError
async def _retrieve_messages_before_strategy(self, retrieve):
before = self.before.id if self.before else None
data: List[MessagePayload] = await self.logs_from(
self.channel.id, retrieve, before=before
)
if len(data):
if self.limit is not None:
self.limit -= retrieve
self.before = Object(id=int(data[-1]["id"]))
return data
async def _retrieve_messages_after_strategy(self, retrieve):
after = self.after.id if self.after else None
data: List[MessagePayload] = await self.logs_from(
self.channel.id, retrieve, after=after
)
if len(data):
if self.limit is not None:
self.limit -= retrieve
self.after = Object(id=int(data[0]["id"]))
return data
async def _retrieve_messages_around_strategy(self, retrieve):
if self.around:
around = self.around.id if self.around else None
data: List[MessagePayload] = await self.logs_from(
self.channel.id, retrieve, around=around
)
self.around = None
return data
return []
class AuditLogIterator(_AsyncIterator["AuditLogEntry"]):
def __init__(
self,
guild,
limit=None,
before=None,
after=None,
oldest_first=None,
user_id=None,
action_type=None,
):
if isinstance(before, datetime.datetime):
before = Object(id=time_snowflake(before, high=False))
if isinstance(after, datetime.datetime):
after = Object(id=time_snowflake(after, high=True))
if oldest_first is None:
self.reverse = after is not None
else:
self.reverse = oldest_first
self.guild = guild
self.loop = guild._state.loop
self.request = guild._state.http.get_audit_logs
self.limit = limit
self.before = before
self.user_id = user_id
self.action_type = action_type
self.after = OLDEST_OBJECT
self._users = {}
self._state = guild._state
self._filter = None
self.entries = asyncio.Queue()
if self.reverse:
self._strategy = self._after_strategy
if self.before:
self._filter = lambda m: int(m["id"]) < self.before.id
else:
self._strategy = self._before_strategy
if self.after and self.after != OLDEST_OBJECT:
self._filter = lambda m: int(m["id"]) > self.after.id
async def _before_strategy(self, retrieve):
before = self.before.id if self.before else None
data: AuditLogPayload = await self.request(
self.guild.id,
limit=retrieve,
user_id=self.user_id,
action_type=self.action_type,
before=before,
)
entries = data.get("audit_log_entries", [])
if len(data) and entries:
if self.limit is not None:
self.limit -= retrieve
self.before = Object(id=int(entries[-1]["id"]))
return data.get("users", []), entries
async def _after_strategy(self, retrieve):
after = self.after.id if self.after else None
data: AuditLogPayload = await self.request(
self.guild.id,
limit=retrieve,
user_id=self.user_id,
action_type=self.action_type,
after=after,
)
entries = data.get("audit_log_entries", [])
if len(data) and entries:
if self.limit is not None:
self.limit -= retrieve
self.after = Object(id=int(entries[0]["id"]))
return data.get("users", []), entries
async def next(self) -> AuditLogEntry:
if self.entries.empty():
await self._fill()
try:
return self.entries.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
def _get_retrieve(self):
l = self.limit
if l is None or l > 100:
r = 100
else:
r = l
self.retrieve = r
return r > 0
async def _fill(self):
from .user import User
if self._get_retrieve():
users, data = await self._strategy(self.retrieve)
if len(data) < 100:
self.limit = 0
if self.reverse:
data = reversed(data)
if self._filter:
data = filter(self._filter, data)
for user in users:
u = User(data=user, state=self._state)
self._users[u.id] = u
for element in data:
if element["action_type"] is None:
continue
await self.entries.put(
AuditLogEntry(data=element, users=self._users, guild=self.guild)
)
class GuildIterator(_AsyncIterator["Guild"]):
def __init__(self, bot, limit, before=None, after=None):
if isinstance(before, datetime.datetime):
before = Object(id=time_snowflake(before, high=False))
if isinstance(after, datetime.datetime):
after = Object(id=time_snowflake(after, high=True))
self.bot = bot
self.limit = limit
self.before = before
self.after = after
self._filter = None
self.state = self.bot._connection
self.get_guilds = self.bot.http.get_guilds
self.guilds = asyncio.Queue()
if self.before and self.after:
self._retrieve_guilds = self._retrieve_guilds_before_strategy
self._filter = lambda m: int(m["id"]) > self.after.id
elif self.after:
self._retrieve_guilds = self._retrieve_guilds_after_strategy
else:
self._retrieve_guilds = self._retrieve_guilds_before_strategy
async def next(self) -> Guild:
if self.guilds.empty():
await self.fill_guilds()
try:
return self.guilds.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
def _get_retrieve(self):
l = self.limit
if l is None or l > 100:
r = 100
else:
r = l
self.retrieve = r
return r > 0
def create_guild(self, data):
from .guild import Guild
return Guild(state=self.state, data=data)
async def fill_guilds(self):
if self._get_retrieve():
data = await self._retrieve_guilds(self.retrieve)
if self.limit is None or len(data) < 100:
self.limit = 0
if self._filter:
data = filter(self._filter, data)
for element in data:
await self.guilds.put(self.create_guild(element))
async def _retrieve_guilds(self, retrieve) -> List[Guild]:
raise NotImplementedError
async def _retrieve_guilds_before_strategy(self, retrieve):
before = self.before.id if self.before else None
data: List[GuildPayload] = await self.get_guilds(retrieve, before=before)
if len(data):
if self.limit is not None:
self.limit -= retrieve
self.before = Object(id=int(data[-1]["id"]))
return data
async def _retrieve_guilds_after_strategy(self, retrieve):
after = self.after.id if self.after else None
data: List[GuildPayload] = await self.get_guilds(retrieve, after=after)
if len(data):
if self.limit is not None:
self.limit -= retrieve
self.after = Object(id=int(data[0]["id"]))
return data
class MemberIterator(_AsyncIterator["Member"]):
def __init__(self, guild, limit=1000, after=None):
if isinstance(after, datetime.datetime):
after = Object(id=time_snowflake(after, high=True))
self.guild = guild
self.limit = limit
self.after = after or OLDEST_OBJECT
self.state = self.guild._state
self.get_members = self.state.http.get_members
self.members = asyncio.Queue()
async def next(self) -> Member:
if self.members.empty():
await self.fill_members()
try:
return self.members.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
def _get_retrieve(self):
l = self.limit
if l is None or l > 1000:
r = 1000
else:
r = l
self.retrieve = r
return r > 0
async def fill_members(self):
if self._get_retrieve():
after = self.after.id if self.after else None
data = await self.get_members(self.guild.id, self.retrieve, after)
if not data:
return
if len(data) < 1000:
self.limit = 0
self.after = Object(id=int(data[-1]["user"]["id"]))
for element in reversed(data):
await self.members.put(self.create_member(element))
def create_member(self, data):
from .member import Member
return Member(data=data, guild=self.guild, state=self.state)
class ArchivedThreadIterator(_AsyncIterator["Thread"]):
def __init__(
self,
channel_id: int,
guild: Guild,
limit: Optional[int],
joined: bool,
private: bool,
before: Optional[Union[Snowflake, datetime.datetime]] = None,
):
self.channel_id = channel_id
self.guild = guild
self.limit = limit
self.joined = joined
self.private = private
self.http = guild._state.http
if joined and not private:
raise ValueError("Cannot iterate over joined public archived threads")
self.before: Optional[str]
if before is None:
self.before = None
elif isinstance(before, datetime.datetime):
if joined:
self.before = str(time_snowflake(before, high=False))
else:
self.before = before.isoformat()
else:
if joined:
self.before = str(before.id)
else:
self.before = snowflake_time(before.id).isoformat()
self.update_before: Callable[[ThreadPayload], str] = self.get_archive_timestamp
if joined:
self.endpoint = self.http.get_joined_private_archived_threads
self.update_before = self.get_thread_id
elif private:
self.endpoint = self.http.get_private_archived_threads
else:
self.endpoint = self.http.get_public_archived_threads
self.queue: asyncio.Queue[Thread] = asyncio.Queue()
self.has_more: bool = True
async def next(self) -> Thread:
if self.queue.empty():
await self.fill_queue()
try:
return self.queue.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
@staticmethod
def get_archive_timestamp(data: ThreadPayload) -> str:
return data["thread_metadata"]["archive_timestamp"]
@staticmethod
def get_thread_id(data: ThreadPayload) -> str:
return data["id"]
async def fill_queue(self) -> None:
if not self.has_more:
raise NoMoreItems()
limit = 50 if self.limit is None else max(self.limit, 50)
data = await self.endpoint(self.channel_id, before=self.before, limit=limit)
threads: List[ThreadPayload] = data.get("threads", [])
for d in reversed(threads):
self.queue.put_nowait(self.create_thread(d))
self.has_more = data.get("has_more", False)
if self.limit is not None:
self.limit -= len(threads)
if self.limit <= 0:
self.has_more = False
if self.has_more:
self.before = self.update_before(threads[-1])
def create_thread(self, data: ThreadPayload) -> Thread:
from .threads import Thread
return Thread(guild=self.guild, state=self.guild._state, data=data)
| true | true |
f7f77fca299a63711a2471a1af8ccb1d590016c5 | 774 | py | Python | estimator/migrations/0003_auto_20200405_0028.py | barrezuetai/CostEstimator | 991f797e7e35f63e4e207cfbc134999a83d19682 | [
"MIT"
] | null | null | null | estimator/migrations/0003_auto_20200405_0028.py | barrezuetai/CostEstimator | 991f797e7e35f63e4e207cfbc134999a83d19682 | [
"MIT"
] | 7 | 2020-02-20T03:57:11.000Z | 2021-09-22T18:38:44.000Z | estimator/migrations/0003_auto_20200405_0028.py | barrezuetai/CostEstimator | 991f797e7e35f63e4e207cfbc134999a83d19682 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-04-05 00:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('estimator', '0002_auto_20200404_2207'),
]
operations = [
migrations.AlterField(
model_name='hospital',
name='additions_to_revenue',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='hospital',
name='contractual_adjustments',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='hospital',
name='other_deductions',
field=models.IntegerField(blank=True, null=True),
),
]
| 26.689655 | 61 | 0.594315 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('estimator', '0002_auto_20200404_2207'),
]
operations = [
migrations.AlterField(
model_name='hospital',
name='additions_to_revenue',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='hospital',
name='contractual_adjustments',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='hospital',
name='other_deductions',
field=models.IntegerField(blank=True, null=True),
),
]
| true | true |
f7f77fefbd8764a8d4b4d545241c85e4537d35fb | 5,648 | py | Python | CybORG/CybORG/Agents/ComplexAgents/utilities/data_structures/Action_Balanced_Replay_Buffer.py | rafvasq/cage-challenge-1 | 95affdfa38afc1124f1a1a09c92fbc0ed5b96318 | [
"MIT"
] | 4,461 | 2019-01-13T02:06:25.000Z | 2022-03-31T11:50:11.000Z | CybORG/CybORG/Agents/ComplexAgents/utilities/data_structures/Action_Balanced_Replay_Buffer.py | rafvasq/cage-challenge-1 | 95affdfa38afc1124f1a1a09c92fbc0ed5b96318 | [
"MIT"
] | 66 | 2019-01-17T10:36:21.000Z | 2022-02-22T21:29:30.000Z | CybORG/CybORG/Agents/ComplexAgents/utilities/data_structures/Action_Balanced_Replay_Buffer.py | rafvasq/cage-challenge-1 | 95affdfa38afc1124f1a1a09c92fbc0ed5b96318 | [
"MIT"
] | 1,051 | 2019-01-13T17:30:49.000Z | 2022-03-31T03:33:00.000Z | import random
from collections import namedtuple, deque
import torch
import numpy as np
from .Replay_Buffer import Replay_Buffer
class Action_Balanced_Replay_Buffer(Replay_Buffer):
"""Replay buffer that provides sample of experiences that have an equal number of each action being conducted"""
def __init__(self, buffer_size, batch_size, seed, num_actions):
self.num_actions = num_actions
self.buffer_size_per_memory = int(buffer_size / self.num_actions)
print("NUM ACTIONS ", self.num_actions)
self.memories = {action: deque(maxlen=self.buffer_size_per_memory) for action in range(self.num_actions)}
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def add_experience(self, states, actions, rewards, next_states, dones):
"""Adds experience or list of experiences into the replay buffer"""
if type(dones) == list:
assert type(dones[0]) != list, "A done shouldn't be a list"
experiences = [self.experience(state, action, reward, next_state, done)
for state, action, reward, next_state, done in
zip(states, actions, rewards, next_states, dones)]
for experience in experiences:
action = experience.action
self.memories[action].append(experience)
else:
experience = self.experience(states, actions, rewards, next_states, dones)
self.memories[actions].append(experience)
def pick_experiences(self, num_experiences=None):
"""Picks the experiences that the sample function will return as a random sample of experiences. It works by picking
an equal number of experiences that used each action (as far as possible)"""
if num_experiences: batch_size = num_experiences
else: batch_size = self.batch_size
batch_per_action = self.calculate_batch_sizes_per_action(batch_size)
samples_split_by_action = self.sample_each_action_equally(batch_per_action)
combined_sample = []
for key in samples_split_by_action.keys():
combined_sample.extend(samples_split_by_action[key])
return combined_sample
def calculate_batch_sizes_per_action(self, batch_size):
"""Calculates the batch size we need to randomly draw from each action to make sure there is equal coverage
per action and that the batch gets filled up"""
min_batch_per_action = int(batch_size / self.num_actions)
batch_per_action = {k: min_batch_per_action for k in range(self.num_actions)}
current_batch_size = np.sum([batch_per_action[k] for k in range(self.num_actions)])
remainder = batch_size - current_batch_size
give_remainder_to = random.sample(range(self.num_actions), remainder)
for action in give_remainder_to:
batch_per_action[action] += 1
return batch_per_action
def sample_each_action_equally(self, batch_per_action):
"""Samples a number of experiences (determined by batch_per_action) from the memory buffer for each action"""
samples = {}
for action in range(self.num_actions):
memory = self.memories[action]
batch_size_for_action = batch_per_action[action]
action_memory_size = len(memory)
assert action_memory_size > 0, "Need at least 1 experience for each action"
if action_memory_size >= batch_size_for_action:
samples[action] = random.sample(memory, batch_size_for_action)
else:
print("Memory size {} vs. required batch size {}".format(action_memory_size, batch_size_for_action))
samples_for_action = []
while len(samples_for_action) < batch_per_action[action]:
remainder = batch_per_action[action] - len(samples_for_action)
sampled_experiences = random.sample(memory, min(remainder, action_memory_size))
samples_for_action.extend(sampled_experiences)
samples[action] = samples_for_action
return samples
def __len__(self):
return np.sum([len(memory) for memory in self.memories.values()])
def sample_experiences_with_certain_actions(self, allowed_actions, num_all_actions, required_batch_size):
"""Samples a number of experiences where the action conducted was in the list of required actions"""
assert isinstance(allowed_actions, list)
assert len(allowed_actions) > 0
num_new_actions = len(allowed_actions)
experiences_to_sample = int(required_batch_size * float(num_all_actions) / float(num_new_actions))
experiences = self.sample(num_experiences=experiences_to_sample)
states, actions, rewards, next_states, dones = experiences
matching_indexes = np.argwhere((np.in1d(actions.numpy(), allowed_actions)))
assert matching_indexes.shape[1] == 1
matching_indexes = matching_indexes[:, 0]
states = states[matching_indexes]
actions = actions[matching_indexes]
rewards = rewards[matching_indexes]
next_states = next_states[matching_indexes]
dones = dones[matching_indexes]
assert abs(states.shape[0] - required_batch_size) <= 0.05*required_batch_size, "{} vs. {}".format(states.shape[0], required_batch_size)
return (states, actions, rewards, next_states, dones)
| 53.790476 | 143 | 0.688385 | import random
from collections import namedtuple, deque
import torch
import numpy as np
from .Replay_Buffer import Replay_Buffer
class Action_Balanced_Replay_Buffer(Replay_Buffer):
def __init__(self, buffer_size, batch_size, seed, num_actions):
self.num_actions = num_actions
self.buffer_size_per_memory = int(buffer_size / self.num_actions)
print("NUM ACTIONS ", self.num_actions)
self.memories = {action: deque(maxlen=self.buffer_size_per_memory) for action in range(self.num_actions)}
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def add_experience(self, states, actions, rewards, next_states, dones):
if type(dones) == list:
assert type(dones[0]) != list, "A done shouldn't be a list"
experiences = [self.experience(state, action, reward, next_state, done)
for state, action, reward, next_state, done in
zip(states, actions, rewards, next_states, dones)]
for experience in experiences:
action = experience.action
self.memories[action].append(experience)
else:
experience = self.experience(states, actions, rewards, next_states, dones)
self.memories[actions].append(experience)
def pick_experiences(self, num_experiences=None):
if num_experiences: batch_size = num_experiences
else: batch_size = self.batch_size
batch_per_action = self.calculate_batch_sizes_per_action(batch_size)
samples_split_by_action = self.sample_each_action_equally(batch_per_action)
combined_sample = []
for key in samples_split_by_action.keys():
combined_sample.extend(samples_split_by_action[key])
return combined_sample
def calculate_batch_sizes_per_action(self, batch_size):
min_batch_per_action = int(batch_size / self.num_actions)
batch_per_action = {k: min_batch_per_action for k in range(self.num_actions)}
current_batch_size = np.sum([batch_per_action[k] for k in range(self.num_actions)])
remainder = batch_size - current_batch_size
give_remainder_to = random.sample(range(self.num_actions), remainder)
for action in give_remainder_to:
batch_per_action[action] += 1
return batch_per_action
def sample_each_action_equally(self, batch_per_action):
samples = {}
for action in range(self.num_actions):
memory = self.memories[action]
batch_size_for_action = batch_per_action[action]
action_memory_size = len(memory)
assert action_memory_size > 0, "Need at least 1 experience for each action"
if action_memory_size >= batch_size_for_action:
samples[action] = random.sample(memory, batch_size_for_action)
else:
print("Memory size {} vs. required batch size {}".format(action_memory_size, batch_size_for_action))
samples_for_action = []
while len(samples_for_action) < batch_per_action[action]:
remainder = batch_per_action[action] - len(samples_for_action)
sampled_experiences = random.sample(memory, min(remainder, action_memory_size))
samples_for_action.extend(sampled_experiences)
samples[action] = samples_for_action
return samples
def __len__(self):
return np.sum([len(memory) for memory in self.memories.values()])
def sample_experiences_with_certain_actions(self, allowed_actions, num_all_actions, required_batch_size):
assert isinstance(allowed_actions, list)
assert len(allowed_actions) > 0
num_new_actions = len(allowed_actions)
experiences_to_sample = int(required_batch_size * float(num_all_actions) / float(num_new_actions))
experiences = self.sample(num_experiences=experiences_to_sample)
states, actions, rewards, next_states, dones = experiences
matching_indexes = np.argwhere((np.in1d(actions.numpy(), allowed_actions)))
assert matching_indexes.shape[1] == 1
matching_indexes = matching_indexes[:, 0]
states = states[matching_indexes]
actions = actions[matching_indexes]
rewards = rewards[matching_indexes]
next_states = next_states[matching_indexes]
dones = dones[matching_indexes]
assert abs(states.shape[0] - required_batch_size) <= 0.05*required_batch_size, "{} vs. {}".format(states.shape[0], required_batch_size)
return (states, actions, rewards, next_states, dones)
| true | true |
f7f780de9ba006adef1c95f0551abc73ee5602b5 | 1,604 | py | Python | qwerty.py | telegrambotdev/Polyglot | b6f6a89905dd63ed26b4bc9f08f5a63402278089 | [
"MIT"
] | null | null | null | qwerty.py | telegrambotdev/Polyglot | b6f6a89905dd63ed26b4bc9f08f5a63402278089 | [
"MIT"
] | null | null | null | qwerty.py | telegrambotdev/Polyglot | b6f6a89905dd63ed26b4bc9f08f5a63402278089 | [
"MIT"
] | null | null | null | import traceback
import logger
import utils
from googletrans import LANGUAGES
def qwerty_main(message):
text = utils.textparser(message)
if text is None:
logger.write_log("none", message)
return
logger.write_log(text, message)
arg1, arg2 = utils.extract_arg(message.text, 1), utils.extract_arg(message.text, 2)
if arg2 is None:
tab1 = utils.layouts.get(utils.extract_lang(text))
tab2 = utils.layouts.get(arg1)
else:
tab1 = utils.layouts.get(arg1)
tab2 = utils.layouts.get(arg2)
if tab1 is None and arg2 is None:
utils.bot.reply_to(message, "Исходный язык не распознан. Неправильный аргумент или неверно распознан "
"язык? (" + LANGUAGES.get(utils.extract_lang(text)) + ")\n"
"Попробуйте указать исходный язык вручную. Возможно, язык отсутствует в "
"словаре символов")
return
if tab1 is None or tab2 is None:
utils.bot.reply_to(message, "Неизвестная раскладка. Возможно, язык отсутствует в словаре символов")
return
try:
translated_text = text.translate(str.maketrans(tab1, tab2))
utils.bot.reply_to(message, translated_text)
except Exception as e:
logger.write_log("ERR: " + str(e) + "\n" + traceback.format_exc())
utils.bot.reply_to(message, "Ошибка смены раскладки текста. Обратитесь к авторам бота\n"
"Информация для отладки сохранена в логах бота.")
| 36.454545 | 111 | 0.607855 | import traceback
import logger
import utils
from googletrans import LANGUAGES
def qwerty_main(message):
text = utils.textparser(message)
if text is None:
logger.write_log("none", message)
return
logger.write_log(text, message)
arg1, arg2 = utils.extract_arg(message.text, 1), utils.extract_arg(message.text, 2)
if arg2 is None:
tab1 = utils.layouts.get(utils.extract_lang(text))
tab2 = utils.layouts.get(arg1)
else:
tab1 = utils.layouts.get(arg1)
tab2 = utils.layouts.get(arg2)
if tab1 is None and arg2 is None:
utils.bot.reply_to(message, "Исходный язык не распознан. Неправильный аргумент или неверно распознан "
"язык? (" + LANGUAGES.get(utils.extract_lang(text)) + ")\n"
"Попробуйте указать исходный язык вручную. Возможно, язык отсутствует в "
"словаре символов")
return
if tab1 is None or tab2 is None:
utils.bot.reply_to(message, "Неизвестная раскладка. Возможно, язык отсутствует в словаре символов")
return
try:
translated_text = text.translate(str.maketrans(tab1, tab2))
utils.bot.reply_to(message, translated_text)
except Exception as e:
logger.write_log("ERR: " + str(e) + "\n" + traceback.format_exc())
utils.bot.reply_to(message, "Ошибка смены раскладки текста. Обратитесь к авторам бота\n"
"Информация для отладки сохранена в логах бота.")
| true | true |
f7f7815ad2f6e822348a8f017669ba8ff3728d0d | 558 | py | Python | authentik/outposts/migrations/0012_service_connection_non_unique.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 15 | 2020-01-05T09:09:57.000Z | 2020-11-28T05:27:39.000Z | authentik/outposts/migrations/0012_service_connection_non_unique.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 302 | 2020-01-21T08:03:59.000Z | 2020-12-04T05:04:57.000Z | authentik/outposts/migrations/0012_service_connection_non_unique.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 3 | 2020-03-04T08:21:59.000Z | 2020-08-01T20:37:18.000Z | # Generated by Django 3.1.3 on 2020-11-18 21:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("authentik_outposts", "0011_docker_tls_auth"),
]
operations = [
migrations.AlterField(
model_name="outpostserviceconnection",
name="local",
field=models.BooleanField(
default=False,
help_text="If enabled, use the local connection. Required Docker socket/Kubernetes Integration",
),
),
]
| 25.363636 | 112 | 0.607527 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("authentik_outposts", "0011_docker_tls_auth"),
]
operations = [
migrations.AlterField(
model_name="outpostserviceconnection",
name="local",
field=models.BooleanField(
default=False,
help_text="If enabled, use the local connection. Required Docker socket/Kubernetes Integration",
),
),
]
| true | true |
f7f781e0fa83526164ac63731be48eae1641c691 | 4,781 | py | Python | 2015/23_OpeningtheTuringLock/computer.py | deanearlwright/AdventOfCode | ca4cf6315c0efa38bd7748fb6f4bc99e7934871d | [
"MIT"
] | 1 | 2021-01-03T23:09:28.000Z | 2021-01-03T23:09:28.000Z | 2015/23_OpeningtheTuringLock/computer.py | deanearlwright/AdventOfCode | ca4cf6315c0efa38bd7748fb6f4bc99e7934871d | [
"MIT"
] | 6 | 2020-12-26T21:02:42.000Z | 2020-12-26T21:02:52.000Z | 2015/23_OpeningtheTuringLock/computer.py | deanearlwright/AdventOfCode | ca4cf6315c0efa38bd7748fb6f4bc99e7934871d | [
"MIT"
] | null | null | null | # ======================================================================
# Opening the Turing Lock
# Advent of Code 2015 Day 23 -- Eric Wastl -- https://adventofcode.com
#
# Python implementation by Dr. Dean Earl Wright III
# ======================================================================
# ======================================================================
# c o m p u t e r . p y
# ======================================================================
"A solver for the Advent of Code 2015 Day 23 puzzle"
# ----------------------------------------------------------------------
# import
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# constants
# ----------------------------------------------------------------------
# ======================================================================
# Computer
# ======================================================================
class Computer(object): # pylint: disable=R0902, R0205
"Object for Opening the Turing Lock"
def __init__(self, text=None, part2=False):
# 1. Set the initial values
self.part2 = part2
self.text = text
if text is None:
self.text = []
self.regs = {'a': 0, 'b': 0}
self.addr = 0
self.reset()
def reset(self):
"Reset the computer"
# 1. Restore the initial values
self.regs = {'a': 0, 'b': 0}
self.addr = 0
# 2. Part two has a twist
if self.part2:
self.regs['a'] = 1
def step(self):
"Execute a single instruction"
# 1. Get the instruction
if self.addr < 0 or self.addr >= len(self.text):
return False
inst = self.text[self.addr]
# 2. Break the instruction info parts
parts = inst.replace(',', '').split()
opcode = parts[0]
op1 = parts[1]
if len(parts) == 3:
op2 = parts[2]
else:
op2 = None
next_addr = self.addr + 1
# print(self.addr, inst, opcode, op1, op2, next_addr)
# 3. Execute the instruction
if opcode == 'hlf':
# 3a. hlf r sets register r to half its current value
self.regs[op1] = self.regs[op1] // 2
elif opcode == 'tpl':
# 3b. tpl r sets register r to triple its current value
self.regs[op1] = self.regs[op1] * 3
elif opcode == 'inc':
# 3c. inc r increments register r, adding 1 to it
self.regs[op1] = self.regs[op1] + 1
elif opcode == 'jmp':
# 3d. jmp offset is a relative jump
next_addr = self.addr + int(op1)
elif opcode == 'jie':
# 3e. jie r, offset is like jmp, but if reg r is even
if self.regs[op1] % 2 == 0:
next_addr = self.addr + int(op2)
elif opcode == 'jio':
# 3f. jio r, offset is like jmp, but if reg r is one
if self.regs[op1] == 1:
next_addr = self.addr + int(op2)
else:
print("Bad inst", self.addr, inst, opcode, op1, op2, next_addr)
return False
# 4. Set the program counter to its next value
self.addr = next_addr
# 5. Return success
return True
def run(self):
"Run the program"
# 1. Loop with the program counter is good
while self.step():
pass
# 2. Return the b register
return self.regs['b']
def part_one(self, verbose=False, limit=0):
"Returns the solution for part one"
# 0. Precondition axioms
assert verbose in [True, False]
assert limit >= 0
# 1. Return the solution for part one
return self.run()
def part_two(self, verbose=False, limit=0):
"Returns the solution for part two"
# 0. Precondition axioms
assert verbose in [True, False]
assert limit >= 0
# 1. Return the solution for part two
return self.run()
# ----------------------------------------------------------------------
# module initialization
# ----------------------------------------------------------------------
if __name__ == '__main__':
pass
# ======================================================================
# end c o m p u t e r . p y end
# ======================================================================
| 33.907801 | 75 | 0.389877 |
class Computer(object):
def __init__(self, text=None, part2=False):
self.part2 = part2
self.text = text
if text is None:
self.text = []
self.regs = {'a': 0, 'b': 0}
self.addr = 0
self.reset()
def reset(self):
self.regs = {'a': 0, 'b': 0}
self.addr = 0
if self.part2:
self.regs['a'] = 1
def step(self):
if self.addr < 0 or self.addr >= len(self.text):
return False
inst = self.text[self.addr]
parts = inst.replace(',', '').split()
opcode = parts[0]
op1 = parts[1]
if len(parts) == 3:
op2 = parts[2]
else:
op2 = None
next_addr = self.addr + 1
if opcode == 'hlf':
self.regs[op1] = self.regs[op1] // 2
elif opcode == 'tpl':
self.regs[op1] = self.regs[op1] * 3
elif opcode == 'inc':
self.regs[op1] = self.regs[op1] + 1
elif opcode == 'jmp':
next_addr = self.addr + int(op1)
elif opcode == 'jie':
if self.regs[op1] % 2 == 0:
next_addr = self.addr + int(op2)
elif opcode == 'jio':
if self.regs[op1] == 1:
next_addr = self.addr + int(op2)
else:
print("Bad inst", self.addr, inst, opcode, op1, op2, next_addr)
return False
self.addr = next_addr
return True
def run(self):
while self.step():
pass
return self.regs['b']
def part_one(self, verbose=False, limit=0):
assert verbose in [True, False]
assert limit >= 0
return self.run()
def part_two(self, verbose=False, limit=0):
assert verbose in [True, False]
assert limit >= 0
return self.run()
if __name__ == '__main__':
pass
| true | true |
f7f78231b4fd20e5c68f6262aee5a33dd6c98947 | 890 | py | Python | venv/Lib/site-packages/dataframe/search_tree/__init__.py | kavanAdeshara/Expense_Tracker | b3e4810e858a7786e05cda6b91ba674b73b87981 | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/dataframe/search_tree/__init__.py | kavanAdeshara/Expense_Tracker | b3e4810e858a7786e05cda6b91ba674b73b87981 | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/dataframe/search_tree/__init__.py | kavanAdeshara/Expense_Tracker | b3e4810e858a7786e05cda6b91ba674b73b87981 | [
"Apache-2.0"
] | null | null | null | # dataframe: a data-frame implementation using method piping
#
# Copyright (C) 2016 Simon Dirmeier
#
# This file is part of dataframe.
#
# dataframe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dataframe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dataframe. If not, see <http://www.gnu.org/licenses/>.
#
#
# @author = 'Simon Dirmeier'
# @email = 'mail@simon-dirmeier.net'
from dataframe.search_tree.search_tree import SearchTree
| 34.230769 | 70 | 0.760674 |
from dataframe.search_tree.search_tree import SearchTree
| true | true |
f7f783392fa2525b2ab11f4e1569c9831f3ac7d7 | 131,895 | py | Python | sympy/core/tests/test_args.py | dsavransky/sympy | c05c2755fec78ff19af89a4599f76edb2ee104af | [
"BSD-3-Clause"
] | null | null | null | sympy/core/tests/test_args.py | dsavransky/sympy | c05c2755fec78ff19af89a4599f76edb2ee104af | [
"BSD-3-Clause"
] | null | null | null | sympy/core/tests/test_args.py | dsavransky/sympy | c05c2755fec78ff19af89a4599f76edb2ee104af | [
"BSD-3-Clause"
] | null | null | null | """Test whether all elements of cls.args are instances of Basic. """
# NOTE: keep tests sorted by (module, class name) key. If a class can't
# be instantiated, add it here anyway with @SKIP("abstract class) (see
# e.g. Function).
import os
import re
import warnings
import io
from sympy import (Basic, S, symbols, sqrt, sin, oo, Interval, exp, Lambda, pi,
Eq, log)
from sympy.core.compatibility import range
from sympy.utilities.pytest import XFAIL, SKIP
from sympy.utilities.exceptions import SymPyDeprecationWarning
x, y, z = symbols('x,y,z')
def test_all_classes_are_tested():
this = os.path.split(__file__)[0]
path = os.path.join(this, os.pardir, os.pardir)
sympy_path = os.path.abspath(path)
prefix = os.path.split(sympy_path)[0] + os.sep
re_cls = re.compile(r"^class ([A-Za-z][A-Za-z0-9_]*)\s*\(", re.MULTILINE)
modules = {}
for root, dirs, files in os.walk(sympy_path):
module = root.replace(prefix, "").replace(os.sep, ".")
for file in files:
if file.startswith(("_", "test_", "bench_")):
continue
if not file.endswith(".py"):
continue
with io.open(os.path.join(root, file), "r", encoding='utf-8') as f:
text = f.read()
submodule = module + '.' + file[:-3]
names = re_cls.findall(text)
if not names:
continue
try:
mod = __import__(submodule, fromlist=names)
except ImportError:
continue
def is_Basic(name):
cls = getattr(mod, name)
if hasattr(cls, '_sympy_deprecated_func'):
cls = cls._sympy_deprecated_func
return issubclass(cls, Basic)
names = list(filter(is_Basic, names))
if names:
modules[submodule] = names
ns = globals()
failed = []
for module, names in modules.items():
mod = module.replace('.', '__')
for name in names:
test = 'test_' + mod + '__' + name
if test not in ns:
failed.append(module + '.' + name)
# reset all SymPyDeprecationWarning into errors
warnings.simplefilter("error", category=SymPyDeprecationWarning)
assert not failed, "Missing classes: %s. Please add tests for these to sympy/core/tests/test_args.py." % ", ".join(failed)
def _test_args(obj):
return all(isinstance(arg, Basic) for arg in obj.args)
def test_sympy__assumptions__assume__AppliedPredicate():
from sympy.assumptions.assume import AppliedPredicate, Predicate
from sympy import Q
assert _test_args(AppliedPredicate(Predicate("test"), 2))
assert _test_args(Q.is_true(True))
def test_sympy__assumptions__assume__Predicate():
from sympy.assumptions.assume import Predicate
assert _test_args(Predicate("test"))
def test_sympy__assumptions__sathandlers__UnevaluatedOnFree():
from sympy.assumptions.sathandlers import UnevaluatedOnFree
from sympy import Q
assert _test_args(UnevaluatedOnFree(Q.positive))
assert _test_args(UnevaluatedOnFree(Q.positive(x)))
assert _test_args(UnevaluatedOnFree(Q.positive(x*y)))
def test_sympy__assumptions__sathandlers__AllArgs():
from sympy.assumptions.sathandlers import AllArgs
from sympy import Q
assert _test_args(AllArgs(Q.positive))
assert _test_args(AllArgs(Q.positive(x)))
assert _test_args(AllArgs(Q.positive(x*y)))
def test_sympy__assumptions__sathandlers__AnyArgs():
from sympy.assumptions.sathandlers import AnyArgs
from sympy import Q
assert _test_args(AnyArgs(Q.positive))
assert _test_args(AnyArgs(Q.positive(x)))
assert _test_args(AnyArgs(Q.positive(x*y)))
def test_sympy__assumptions__sathandlers__ExactlyOneArg():
from sympy.assumptions.sathandlers import ExactlyOneArg
from sympy import Q
assert _test_args(ExactlyOneArg(Q.positive))
assert _test_args(ExactlyOneArg(Q.positive(x)))
assert _test_args(ExactlyOneArg(Q.positive(x*y)))
def test_sympy__assumptions__sathandlers__CheckOldAssump():
from sympy.assumptions.sathandlers import CheckOldAssump
from sympy import Q
assert _test_args(CheckOldAssump(Q.positive))
assert _test_args(CheckOldAssump(Q.positive(x)))
assert _test_args(CheckOldAssump(Q.positive(x*y)))
def test_sympy__assumptions__sathandlers__CheckIsPrime():
from sympy.assumptions.sathandlers import CheckIsPrime
from sympy import Q
# Input must be a number
assert _test_args(CheckIsPrime(Q.positive))
assert _test_args(CheckIsPrime(Q.positive(5)))
@SKIP("abstract Class")
def test_sympy__codegen__ast__AugmentedAssignment():
from sympy.codegen.ast import AugmentedAssignment
assert _test_args(AugmentedAssignment(x, 1))
def test_sympy__codegen__ast__AddAugmentedAssignment():
from sympy.codegen.ast import AddAugmentedAssignment
assert _test_args(AddAugmentedAssignment(x, 1))
def test_sympy__codegen__ast__SubAugmentedAssignment():
from sympy.codegen.ast import SubAugmentedAssignment
assert _test_args(SubAugmentedAssignment(x, 1))
def test_sympy__codegen__ast__MulAugmentedAssignment():
from sympy.codegen.ast import MulAugmentedAssignment
assert _test_args(MulAugmentedAssignment(x, 1))
def test_sympy__codegen__ast__DivAugmentedAssignment():
from sympy.codegen.ast import DivAugmentedAssignment
assert _test_args(DivAugmentedAssignment(x, 1))
def test_sympy__codegen__ast__ModAugmentedAssignment():
from sympy.codegen.ast import ModAugmentedAssignment
assert _test_args(ModAugmentedAssignment(x, 1))
def test_sympy__codegen__ast__CodeBlock():
from sympy.codegen.ast import CodeBlock, Assignment
assert _test_args(CodeBlock(Assignment(x, 1), Assignment(y, 2)))
def test_sympy__codegen__ast__For():
from sympy.codegen.ast import For, CodeBlock, AddAugmentedAssignment
from sympy import Range
assert _test_args(For(x, Range(10), CodeBlock(AddAugmentedAssignment(y, 1))))
def test_sympy__codegen__ast__Token():
from sympy.codegen.ast import Token
assert _test_args(Token())
def test_sympy__codegen__ast__Type():
from sympy.codegen.ast import Type
assert _test_args(Type('float128'))
def test_sympy__codegen__ast__IntBaseType():
from sympy.codegen.ast import IntBaseType
assert _test_args(IntBaseType('bigint'))
def test_sympy__codegen__ast___SizedIntType():
from sympy.codegen.ast import _SizedIntType
assert _test_args(_SizedIntType('int128', 128))
def test_sympy__codegen__ast__SignedIntType():
from sympy.codegen.ast import SignedIntType
assert _test_args(SignedIntType('int128_with_sign', 128))
def test_sympy__codegen__ast__UnsignedIntType():
from sympy.codegen.ast import UnsignedIntType
assert _test_args(UnsignedIntType('unt128', 128))
def test_sympy__codegen__ast__FloatType():
from sympy.codegen.ast import FloatType
assert _test_args(FloatType('float242', 242, nmant=142, nexp=99))
def test_sympy__codegen__ast__ComplexType():
from sympy.codegen.ast import ComplexType
assert _test_args(ComplexType('complex42', 42, nmant=15, nexp=5))
def test_sympy__codegen__ast__Attribute():
from sympy.codegen.ast import Attribute
assert _test_args(Attribute('noexcept'))
def test_sympy__codegen__ast__Variable():
from sympy.codegen.ast import Variable, Type, value_const
assert _test_args(Variable(x))
assert _test_args(Variable(y, {value_const}, Type('float32')))
assert _test_args(Variable(z, type_=Type('float64')))
def test_sympy__codegen__ast__Pointer():
from sympy.codegen.ast import Pointer, Type, pointer_const
assert _test_args(Pointer(x))
assert _test_args(Pointer(y, type_=Type('float32')))
assert _test_args(Pointer(z, {pointer_const}, Type('float64')))
def test_sympy__codegen__ast__Declaration():
from sympy.codegen.ast import Declaration, Variable, Type
vx = Variable(x, type_=Type('float'))
assert _test_args(Declaration(vx))
assert _test_args(Declaration(vx, 3.0))
@XFAIL
def test_sympy__combinatorics__graycode__GrayCode():
from sympy.combinatorics.graycode import GrayCode
# an integer is given and returned from GrayCode as the arg
assert _test_args(GrayCode(3, start='100'))
assert _test_args(GrayCode(3, rank=1))
def test_sympy__combinatorics__subsets__Subset():
from sympy.combinatorics.subsets import Subset
assert _test_args(Subset([0, 1], [0, 1, 2, 3]))
assert _test_args(Subset(['c', 'd'], ['a', 'b', 'c', 'd']))
@XFAIL
def test_sympy__combinatorics__permutations__Permutation():
from sympy.combinatorics.permutations import Permutation
assert _test_args(Permutation([0, 1, 2, 3]))
def test_sympy__combinatorics__perm_groups__PermutationGroup():
from sympy.combinatorics.permutations import Permutation
from sympy.combinatorics.perm_groups import PermutationGroup
assert _test_args(PermutationGroup([Permutation([0, 1])]))
def test_sympy__combinatorics__polyhedron__Polyhedron():
from sympy.combinatorics.permutations import Permutation
from sympy.combinatorics.polyhedron import Polyhedron
from sympy.abc import w, x, y, z
pgroup = [Permutation([[0, 1, 2], [3]]),
Permutation([[0, 1, 3], [2]]),
Permutation([[0, 2, 3], [1]]),
Permutation([[1, 2, 3], [0]]),
Permutation([[0, 1], [2, 3]]),
Permutation([[0, 2], [1, 3]]),
Permutation([[0, 3], [1, 2]]),
Permutation([[0, 1, 2, 3]])]
corners = [w, x, y, z]
faces = [(w, x, y), (w, y, z), (w, z, x), (x, y, z)]
assert _test_args(Polyhedron(corners, faces, pgroup))
@XFAIL
def test_sympy__combinatorics__prufer__Prufer():
from sympy.combinatorics.prufer import Prufer
assert _test_args(Prufer([[0, 1], [0, 2], [0, 3]], 4))
def test_sympy__combinatorics__partitions__Partition():
from sympy.combinatorics.partitions import Partition
assert _test_args(Partition([1]))
@XFAIL
def test_sympy__combinatorics__partitions__IntegerPartition():
from sympy.combinatorics.partitions import IntegerPartition
assert _test_args(IntegerPartition([1]))
def test_sympy__concrete__products__Product():
from sympy.concrete.products import Product
assert _test_args(Product(x, (x, 0, 10)))
assert _test_args(Product(x, (x, 0, y), (y, 0, 10)))
@SKIP("abstract Class")
def test_sympy__concrete__expr_with_limits__ExprWithLimits():
from sympy.concrete.expr_with_limits import ExprWithLimits
assert _test_args(ExprWithLimits(x, (x, 0, 10)))
assert _test_args(ExprWithLimits(x*y, (x, 0, 10.),(y,1.,3)))
@SKIP("abstract Class")
def test_sympy__concrete__expr_with_limits__AddWithLimits():
from sympy.concrete.expr_with_limits import AddWithLimits
assert _test_args(AddWithLimits(x, (x, 0, 10)))
assert _test_args(AddWithLimits(x*y, (x, 0, 10),(y,1,3)))
@SKIP("abstract Class")
def test_sympy__concrete__expr_with_intlimits__ExprWithIntLimits():
from sympy.concrete.expr_with_intlimits import ExprWithIntLimits
assert _test_args(ExprWithIntLimits(x, (x, 0, 10)))
assert _test_args(ExprWithIntLimits(x*y, (x, 0, 10),(y,1,3)))
def test_sympy__concrete__summations__Sum():
from sympy.concrete.summations import Sum
assert _test_args(Sum(x, (x, 0, 10)))
assert _test_args(Sum(x, (x, 0, y), (y, 0, 10)))
def test_sympy__core__add__Add():
from sympy.core.add import Add
assert _test_args(Add(x, y, z, 2))
def test_sympy__core__basic__Atom():
from sympy.core.basic import Atom
assert _test_args(Atom())
def test_sympy__core__basic__Basic():
from sympy.core.basic import Basic
assert _test_args(Basic())
def test_sympy__core__containers__Dict():
from sympy.core.containers import Dict
assert _test_args(Dict({x: y, y: z}))
def test_sympy__core__containers__Tuple():
from sympy.core.containers import Tuple
assert _test_args(Tuple(x, y, z, 2))
def test_sympy__core__expr__AtomicExpr():
from sympy.core.expr import AtomicExpr
assert _test_args(AtomicExpr())
def test_sympy__core__expr__Expr():
from sympy.core.expr import Expr
assert _test_args(Expr())
def test_sympy__core__expr__UnevaluatedExpr():
from sympy.core.expr import UnevaluatedExpr
from sympy.abc import x
assert _test_args(UnevaluatedExpr(x))
def test_sympy__core__function__Application():
from sympy.core.function import Application
assert _test_args(Application(1, 2, 3))
def test_sympy__core__function__AppliedUndef():
from sympy.core.function import AppliedUndef
assert _test_args(AppliedUndef(1, 2, 3))
def test_sympy__core__function__Derivative():
from sympy.core.function import Derivative
assert _test_args(Derivative(2, x, y, 3))
@SKIP("abstract class")
def test_sympy__core__function__Function():
pass
def test_sympy__core__function__Lambda():
assert _test_args(Lambda((x, y), x + y + z))
def test_sympy__core__function__Subs():
from sympy.core.function import Subs
assert _test_args(Subs(x + y, x, 2))
def test_sympy__core__function__WildFunction():
from sympy.core.function import WildFunction
assert _test_args(WildFunction('f'))
def test_sympy__core__mod__Mod():
from sympy.core.mod import Mod
assert _test_args(Mod(x, 2))
def test_sympy__core__mul__Mul():
from sympy.core.mul import Mul
assert _test_args(Mul(2, x, y, z))
def test_sympy__core__numbers__Catalan():
from sympy.core.numbers import Catalan
assert _test_args(Catalan())
def test_sympy__core__numbers__ComplexInfinity():
from sympy.core.numbers import ComplexInfinity
assert _test_args(ComplexInfinity())
def test_sympy__core__numbers__EulerGamma():
from sympy.core.numbers import EulerGamma
assert _test_args(EulerGamma())
def test_sympy__core__numbers__Exp1():
from sympy.core.numbers import Exp1
assert _test_args(Exp1())
def test_sympy__core__numbers__Float():
from sympy.core.numbers import Float
assert _test_args(Float(1.23))
def test_sympy__core__numbers__GoldenRatio():
from sympy.core.numbers import GoldenRatio
assert _test_args(GoldenRatio())
def test_sympy__core__numbers__Half():
from sympy.core.numbers import Half
assert _test_args(Half())
def test_sympy__core__numbers__ImaginaryUnit():
from sympy.core.numbers import ImaginaryUnit
assert _test_args(ImaginaryUnit())
def test_sympy__core__numbers__Infinity():
from sympy.core.numbers import Infinity
assert _test_args(Infinity())
def test_sympy__core__numbers__Integer():
from sympy.core.numbers import Integer
assert _test_args(Integer(7))
@SKIP("abstract class")
def test_sympy__core__numbers__IntegerConstant():
pass
def test_sympy__core__numbers__NaN():
from sympy.core.numbers import NaN
assert _test_args(NaN())
def test_sympy__core__numbers__NegativeInfinity():
from sympy.core.numbers import NegativeInfinity
assert _test_args(NegativeInfinity())
def test_sympy__core__numbers__NegativeOne():
from sympy.core.numbers import NegativeOne
assert _test_args(NegativeOne())
def test_sympy__core__numbers__Number():
from sympy.core.numbers import Number
assert _test_args(Number(1, 7))
def test_sympy__core__numbers__NumberSymbol():
from sympy.core.numbers import NumberSymbol
assert _test_args(NumberSymbol())
def test_sympy__core__numbers__One():
from sympy.core.numbers import One
assert _test_args(One())
def test_sympy__core__numbers__Pi():
from sympy.core.numbers import Pi
assert _test_args(Pi())
def test_sympy__core__numbers__Rational():
from sympy.core.numbers import Rational
assert _test_args(Rational(1, 7))
@SKIP("abstract class")
def test_sympy__core__numbers__RationalConstant():
pass
def test_sympy__core__numbers__Zero():
from sympy.core.numbers import Zero
assert _test_args(Zero())
@SKIP("abstract class")
def test_sympy__core__operations__AssocOp():
pass
@SKIP("abstract class")
def test_sympy__core__operations__LatticeOp():
pass
def test_sympy__core__power__Pow():
from sympy.core.power import Pow
assert _test_args(Pow(x, 2))
def test_sympy__algebras__quaternion__Quaternion():
from sympy.algebras.quaternion import Quaternion
assert _test_args(Quaternion(x, 1, 2, 3))
def test_sympy__core__relational__Equality():
from sympy.core.relational import Equality
assert _test_args(Equality(x, 2))
def test_sympy__core__relational__GreaterThan():
from sympy.core.relational import GreaterThan
assert _test_args(GreaterThan(x, 2))
def test_sympy__core__relational__LessThan():
from sympy.core.relational import LessThan
assert _test_args(LessThan(x, 2))
@SKIP("abstract class")
def test_sympy__core__relational__Relational():
pass
def test_sympy__core__relational__StrictGreaterThan():
from sympy.core.relational import StrictGreaterThan
assert _test_args(StrictGreaterThan(x, 2))
def test_sympy__core__relational__StrictLessThan():
from sympy.core.relational import StrictLessThan
assert _test_args(StrictLessThan(x, 2))
def test_sympy__core__relational__Unequality():
from sympy.core.relational import Unequality
assert _test_args(Unequality(x, 2))
def test_sympy__sandbox__indexed_integrals__IndexedIntegral():
from sympy.tensor import IndexedBase, Idx
from sympy.sandbox.indexed_integrals import IndexedIntegral
A = IndexedBase('A')
i, j = symbols('i j', integer=True)
a1, a2 = symbols('a1:3', cls=Idx)
assert _test_args(IndexedIntegral(A[a1], A[a2]))
assert _test_args(IndexedIntegral(A[i], A[j]))
def test_sympy__calculus__util__AccumulationBounds():
from sympy.calculus.util import AccumulationBounds
assert _test_args(AccumulationBounds(0, 1))
def test_sympy__sets__ordinals__OmegaPower():
from sympy.sets.ordinals import OmegaPower
assert _test_args(OmegaPower(1, 1))
def test_sympy__sets__ordinals__Ordinal():
from sympy.sets.ordinals import Ordinal, OmegaPower
assert _test_args(Ordinal(OmegaPower(2, 1)))
def test_sympy__sets__ordinals__OrdinalOmega():
from sympy.sets.ordinals import OrdinalOmega
assert _test_args(OrdinalOmega())
def test_sympy__sets__ordinals__OrdinalZero():
from sympy.sets.ordinals import OrdinalZero
assert _test_args(OrdinalZero())
def test_sympy__sets__sets__EmptySet():
from sympy.sets.sets import EmptySet
assert _test_args(EmptySet())
def test_sympy__sets__sets__UniversalSet():
from sympy.sets.sets import UniversalSet
assert _test_args(UniversalSet())
def test_sympy__sets__sets__FiniteSet():
from sympy.sets.sets import FiniteSet
assert _test_args(FiniteSet(x, y, z))
def test_sympy__sets__sets__Interval():
from sympy.sets.sets import Interval
assert _test_args(Interval(0, 1))
def test_sympy__sets__sets__ProductSet():
from sympy.sets.sets import ProductSet, Interval
assert _test_args(ProductSet(Interval(0, 1), Interval(0, 1)))
@SKIP("does it make sense to test this?")
def test_sympy__sets__sets__Set():
from sympy.sets.sets import Set
assert _test_args(Set())
def test_sympy__sets__sets__Intersection():
from sympy.sets.sets import Intersection, Interval
assert _test_args(Intersection(Interval(0, 3), Interval(2, 4),
evaluate=False))
def test_sympy__sets__sets__Union():
from sympy.sets.sets import Union, Interval
assert _test_args(Union(Interval(0, 1), Interval(2, 3)))
def test_sympy__sets__sets__Complement():
from sympy.sets.sets import Complement
assert _test_args(Complement(Interval(0, 2), Interval(0, 1)))
def test_sympy__sets__sets__SymmetricDifference():
from sympy.sets.sets import FiniteSet, SymmetricDifference
assert _test_args(SymmetricDifference(FiniteSet(1, 2, 3), \
FiniteSet(2, 3, 4)))
def test_sympy__core__trace__Tr():
from sympy.core.trace import Tr
a, b = symbols('a b')
assert _test_args(Tr(a + b))
def test_sympy__sets__setexpr__SetExpr():
from sympy.sets.setexpr import SetExpr
assert _test_args(SetExpr(Interval(0, 1)))
def test_sympy__sets__fancysets__Naturals():
from sympy.sets.fancysets import Naturals
assert _test_args(Naturals())
def test_sympy__sets__fancysets__Naturals0():
from sympy.sets.fancysets import Naturals0
assert _test_args(Naturals0())
def test_sympy__sets__fancysets__Integers():
from sympy.sets.fancysets import Integers
assert _test_args(Integers())
def test_sympy__sets__fancysets__Reals():
from sympy.sets.fancysets import Reals
assert _test_args(Reals())
def test_sympy__sets__fancysets__Complexes():
from sympy.sets.fancysets import Complexes
assert _test_args(Complexes())
def test_sympy__sets__fancysets__ComplexRegion():
from sympy.sets.fancysets import ComplexRegion
from sympy import S
from sympy.sets import Interval
a = Interval(0, 1)
b = Interval(2, 3)
theta = Interval(0, 2*S.Pi)
assert _test_args(ComplexRegion(a*b))
assert _test_args(ComplexRegion(a*theta, polar=True))
def test_sympy__sets__fancysets__ImageSet():
from sympy.sets.fancysets import ImageSet
from sympy import S, Symbol
x = Symbol('x')
assert _test_args(ImageSet(Lambda(x, x**2), S.Naturals))
def test_sympy__sets__fancysets__Range():
from sympy.sets.fancysets import Range
assert _test_args(Range(1, 5, 1))
def test_sympy__sets__conditionset__ConditionSet():
from sympy.sets.conditionset import ConditionSet
from sympy import S, Symbol
x = Symbol('x')
assert _test_args(ConditionSet(x, Eq(x**2, 1), S.Reals))
def test_sympy__sets__contains__Contains():
from sympy.sets.fancysets import Range
from sympy.sets.contains import Contains
assert _test_args(Contains(x, Range(0, 10, 2)))
# STATS
from sympy.stats.crv_types import NormalDistribution
nd = NormalDistribution(0, 1)
from sympy.stats.frv_types import DieDistribution
die = DieDistribution(6)
def test_sympy__stats__crv__ContinuousDomain():
from sympy.stats.crv import ContinuousDomain
assert _test_args(ContinuousDomain({x}, Interval(-oo, oo)))
def test_sympy__stats__crv__SingleContinuousDomain():
from sympy.stats.crv import SingleContinuousDomain
assert _test_args(SingleContinuousDomain(x, Interval(-oo, oo)))
def test_sympy__stats__crv__ProductContinuousDomain():
from sympy.stats.crv import SingleContinuousDomain, ProductContinuousDomain
D = SingleContinuousDomain(x, Interval(-oo, oo))
E = SingleContinuousDomain(y, Interval(0, oo))
assert _test_args(ProductContinuousDomain(D, E))
def test_sympy__stats__crv__ConditionalContinuousDomain():
from sympy.stats.crv import (SingleContinuousDomain,
ConditionalContinuousDomain)
D = SingleContinuousDomain(x, Interval(-oo, oo))
assert _test_args(ConditionalContinuousDomain(D, x > 0))
def test_sympy__stats__crv__ContinuousPSpace():
from sympy.stats.crv import ContinuousPSpace, SingleContinuousDomain
D = SingleContinuousDomain(x, Interval(-oo, oo))
assert _test_args(ContinuousPSpace(D, nd))
def test_sympy__stats__crv__SingleContinuousPSpace():
from sympy.stats.crv import SingleContinuousPSpace
assert _test_args(SingleContinuousPSpace(x, nd))
def test_sympy__stats__crv__ProductContinuousPSpace():
from sympy.stats.crv import ProductContinuousPSpace, SingleContinuousPSpace
A = SingleContinuousPSpace(x, nd)
B = SingleContinuousPSpace(y, nd)
assert _test_args(ProductContinuousPSpace(A, B))
@SKIP("abstract class")
def test_sympy__stats__crv__SingleContinuousDistribution():
pass
def test_sympy__stats__drv__SingleDiscreteDomain():
from sympy.stats.drv import SingleDiscreteDomain
assert _test_args(SingleDiscreteDomain(x, S.Naturals))
def test_sympy__stats__drv__SingleDiscretePSpace():
from sympy.stats.drv import SingleDiscretePSpace
from sympy.stats.drv_types import PoissonDistribution
assert _test_args(SingleDiscretePSpace(x, PoissonDistribution(1)))
def test_sympy__stats__drv__DiscretePSpace():
from sympy.stats.drv import DiscretePSpace, SingleDiscreteDomain
density = Lambda(x, 2**(-x))
domain = SingleDiscreteDomain(x, S.Naturals)
assert _test_args(DiscretePSpace(domain, density))
def test_sympy__stats__drv__ConditionalDiscreteDomain():
from sympy.stats.drv import ConditionalDiscreteDomain, SingleDiscreteDomain
X = SingleDiscreteDomain(x, S.Naturals0)
assert _test_args(ConditionalDiscreteDomain(X, x > 2))
@SKIP("abstract class")
def test_sympy__stats__drv__SingleDiscreteDistribution():
pass
@SKIP("abstract class")
def test_sympy__stats__drv__DiscreteDomain():
pass
def test_sympy__stats__rv__RandomDomain():
from sympy.stats.rv import RandomDomain
from sympy.sets.sets import FiniteSet
assert _test_args(RandomDomain(FiniteSet(x), FiniteSet(1, 2, 3)))
def test_sympy__stats__rv__SingleDomain():
from sympy.stats.rv import SingleDomain
from sympy.sets.sets import FiniteSet
assert _test_args(SingleDomain(x, FiniteSet(1, 2, 3)))
def test_sympy__stats__rv__ConditionalDomain():
from sympy.stats.rv import ConditionalDomain, RandomDomain
from sympy.sets.sets import FiniteSet
D = RandomDomain(FiniteSet(x), FiniteSet(1, 2))
assert _test_args(ConditionalDomain(D, x > 1))
def test_sympy__stats__rv__PSpace():
from sympy.stats.rv import PSpace, RandomDomain
from sympy import FiniteSet
D = RandomDomain(FiniteSet(x), FiniteSet(1, 2, 3, 4, 5, 6))
assert _test_args(PSpace(D, die))
@SKIP("abstract Class")
def test_sympy__stats__rv__SinglePSpace():
pass
def test_sympy__stats__rv__RandomSymbol():
from sympy.stats.rv import RandomSymbol
from sympy.stats.crv import SingleContinuousPSpace
A = SingleContinuousPSpace(x, nd)
assert _test_args(RandomSymbol(x, A))
def test_sympy__stats__rv__ProductPSpace():
from sympy.stats.rv import ProductPSpace
from sympy.stats.crv import SingleContinuousPSpace
A = SingleContinuousPSpace(x, nd)
B = SingleContinuousPSpace(y, nd)
assert _test_args(ProductPSpace(A, B))
def test_sympy__stats__rv__ProductDomain():
from sympy.stats.rv import ProductDomain, SingleDomain
D = SingleDomain(x, Interval(-oo, oo))
E = SingleDomain(y, Interval(0, oo))
assert _test_args(ProductDomain(D, E))
def test_sympy__stats__symbolic_probability__Probability():
from sympy.stats.symbolic_probability import Probability
from sympy.stats import Normal
X = Normal('X', 0, 1)
assert _test_args(Probability(X > 0))
def test_sympy__stats__symbolic_probability__Expectation():
from sympy.stats.symbolic_probability import Expectation
from sympy.stats import Normal
X = Normal('X', 0, 1)
assert _test_args(Expectation(X > 0))
def test_sympy__stats__symbolic_probability__Covariance():
from sympy.stats.symbolic_probability import Covariance
from sympy.stats import Normal
X = Normal('X', 0, 1)
Y = Normal('Y', 0, 3)
assert _test_args(Covariance(X, Y))
def test_sympy__stats__symbolic_probability__Variance():
from sympy.stats.symbolic_probability import Variance
from sympy.stats import Normal
X = Normal('X', 0, 1)
assert _test_args(Variance(X))
def test_sympy__stats__frv_types__DiscreteUniformDistribution():
from sympy.stats.frv_types import DiscreteUniformDistribution
from sympy.core.containers import Tuple
assert _test_args(DiscreteUniformDistribution(Tuple(*list(range(6)))))
def test_sympy__stats__frv_types__DieDistribution():
from sympy.stats.frv_types import DieDistribution
assert _test_args(DieDistribution(6))
def test_sympy__stats__frv_types__BernoulliDistribution():
from sympy.stats.frv_types import BernoulliDistribution
assert _test_args(BernoulliDistribution(S.Half, 0, 1))
def test_sympy__stats__frv_types__BinomialDistribution():
from sympy.stats.frv_types import BinomialDistribution
assert _test_args(BinomialDistribution(5, S.Half, 1, 0))
def test_sympy__stats__frv_types__HypergeometricDistribution():
from sympy.stats.frv_types import HypergeometricDistribution
assert _test_args(HypergeometricDistribution(10, 5, 3))
def test_sympy__stats__frv_types__RademacherDistribution():
from sympy.stats.frv_types import RademacherDistribution
assert _test_args(RademacherDistribution())
def test_sympy__stats__frv__FiniteDomain():
from sympy.stats.frv import FiniteDomain
assert _test_args(FiniteDomain({(x, 1), (x, 2)})) # x can be 1 or 2
def test_sympy__stats__frv__SingleFiniteDomain():
from sympy.stats.frv import SingleFiniteDomain
assert _test_args(SingleFiniteDomain(x, {1, 2})) # x can be 1 or 2
def test_sympy__stats__frv__ProductFiniteDomain():
from sympy.stats.frv import SingleFiniteDomain, ProductFiniteDomain
xd = SingleFiniteDomain(x, {1, 2})
yd = SingleFiniteDomain(y, {1, 2})
assert _test_args(ProductFiniteDomain(xd, yd))
def test_sympy__stats__frv__ConditionalFiniteDomain():
from sympy.stats.frv import SingleFiniteDomain, ConditionalFiniteDomain
xd = SingleFiniteDomain(x, {1, 2})
assert _test_args(ConditionalFiniteDomain(xd, x > 1))
def test_sympy__stats__frv__FinitePSpace():
from sympy.stats.frv import FinitePSpace, SingleFiniteDomain
xd = SingleFiniteDomain(x, {1, 2, 3, 4, 5, 6})
p = 1.0/6
xd = SingleFiniteDomain(x, {1, 2})
assert _test_args(FinitePSpace(xd, {(x, 1): S.Half, (x, 2): S.Half}))
def test_sympy__stats__frv__SingleFinitePSpace():
from sympy.stats.frv import SingleFinitePSpace
from sympy import Symbol
assert _test_args(SingleFinitePSpace(Symbol('x'), die))
def test_sympy__stats__frv__ProductFinitePSpace():
from sympy.stats.frv import SingleFinitePSpace, ProductFinitePSpace
from sympy import Symbol
xp = SingleFinitePSpace(Symbol('x'), die)
yp = SingleFinitePSpace(Symbol('y'), die)
assert _test_args(ProductFinitePSpace(xp, yp))
@SKIP("abstract class")
def test_sympy__stats__frv__SingleFiniteDistribution():
pass
@SKIP("abstract class")
def test_sympy__stats__crv__ContinuousDistribution():
pass
def test_sympy__stats__frv_types__FiniteDistributionHandmade():
from sympy.stats.frv_types import FiniteDistributionHandmade
assert _test_args(FiniteDistributionHandmade({1: 1}))
def test_sympy__stats__crv__ContinuousDistributionHandmade():
from sympy.stats.crv import ContinuousDistributionHandmade
from sympy import Symbol, Interval
assert _test_args(ContinuousDistributionHandmade(Symbol('x'),
Interval(0, 2)))
def test_sympy__stats__rv__Density():
from sympy.stats.rv import Density
from sympy.stats.crv_types import Normal
assert _test_args(Density(Normal('x', 0, 1)))
def test_sympy__stats__crv_types__ArcsinDistribution():
from sympy.stats.crv_types import ArcsinDistribution
assert _test_args(ArcsinDistribution(0, 1))
def test_sympy__stats__crv_types__BeniniDistribution():
from sympy.stats.crv_types import BeniniDistribution
assert _test_args(BeniniDistribution(1, 1, 1))
def test_sympy__stats__crv_types__BetaDistribution():
from sympy.stats.crv_types import BetaDistribution
assert _test_args(BetaDistribution(1, 1))
def test_sympy__stats__crv_types__BetaPrimeDistribution():
from sympy.stats.crv_types import BetaPrimeDistribution
assert _test_args(BetaPrimeDistribution(1, 1))
def test_sympy__stats__crv_types__CauchyDistribution():
from sympy.stats.crv_types import CauchyDistribution
assert _test_args(CauchyDistribution(0, 1))
def test_sympy__stats__crv_types__ChiDistribution():
from sympy.stats.crv_types import ChiDistribution
assert _test_args(ChiDistribution(1))
def test_sympy__stats__crv_types__ChiNoncentralDistribution():
from sympy.stats.crv_types import ChiNoncentralDistribution
assert _test_args(ChiNoncentralDistribution(1,1))
def test_sympy__stats__crv_types__ChiSquaredDistribution():
from sympy.stats.crv_types import ChiSquaredDistribution
assert _test_args(ChiSquaredDistribution(1))
def test_sympy__stats__crv_types__DagumDistribution():
from sympy.stats.crv_types import DagumDistribution
assert _test_args(DagumDistribution(1, 1, 1))
def test_sympy__stats__crv_types__ExponentialDistribution():
from sympy.stats.crv_types import ExponentialDistribution
assert _test_args(ExponentialDistribution(1))
def test_sympy__stats__crv_types__FDistributionDistribution():
from sympy.stats.crv_types import FDistributionDistribution
assert _test_args(FDistributionDistribution(1, 1))
def test_sympy__stats__crv_types__FisherZDistribution():
from sympy.stats.crv_types import FisherZDistribution
assert _test_args(FisherZDistribution(1, 1))
def test_sympy__stats__crv_types__FrechetDistribution():
from sympy.stats.crv_types import FrechetDistribution
assert _test_args(FrechetDistribution(1, 1, 1))
def test_sympy__stats__crv_types__GammaInverseDistribution():
from sympy.stats.crv_types import GammaInverseDistribution
assert _test_args(GammaInverseDistribution(1, 1))
def test_sympy__stats__crv_types__GammaDistribution():
from sympy.stats.crv_types import GammaDistribution
assert _test_args(GammaDistribution(1, 1))
def test_sympy__stats__crv_types__GumbelDistribution():
from sympy.stats.crv_types import GumbelDistribution
assert _test_args(GumbelDistribution(1, 1))
def test_sympy__stats__crv_types__GompertzDistribution():
from sympy.stats.crv_types import GompertzDistribution
assert _test_args(GompertzDistribution(1, 1))
def test_sympy__stats__crv_types__KumaraswamyDistribution():
from sympy.stats.crv_types import KumaraswamyDistribution
assert _test_args(KumaraswamyDistribution(1, 1))
def test_sympy__stats__crv_types__LaplaceDistribution():
from sympy.stats.crv_types import LaplaceDistribution
assert _test_args(LaplaceDistribution(0, 1))
def test_sympy__stats__crv_types__LogisticDistribution():
from sympy.stats.crv_types import LogisticDistribution
assert _test_args(LogisticDistribution(0, 1))
def test_sympy__stats__crv_types__LogNormalDistribution():
from sympy.stats.crv_types import LogNormalDistribution
assert _test_args(LogNormalDistribution(0, 1))
def test_sympy__stats__crv_types__MaxwellDistribution():
from sympy.stats.crv_types import MaxwellDistribution
assert _test_args(MaxwellDistribution(1))
def test_sympy__stats__crv_types__NakagamiDistribution():
from sympy.stats.crv_types import NakagamiDistribution
assert _test_args(NakagamiDistribution(1, 1))
def test_sympy__stats__crv_types__NormalDistribution():
from sympy.stats.crv_types import NormalDistribution
assert _test_args(NormalDistribution(0, 1))
def test_sympy__stats__crv_types__ParetoDistribution():
from sympy.stats.crv_types import ParetoDistribution
assert _test_args(ParetoDistribution(1, 1))
def test_sympy__stats__crv_types__QuadraticUDistribution():
from sympy.stats.crv_types import QuadraticUDistribution
assert _test_args(QuadraticUDistribution(1, 2))
def test_sympy__stats__crv_types__RaisedCosineDistribution():
from sympy.stats.crv_types import RaisedCosineDistribution
assert _test_args(RaisedCosineDistribution(1, 1))
def test_sympy__stats__crv_types__RayleighDistribution():
from sympy.stats.crv_types import RayleighDistribution
assert _test_args(RayleighDistribution(1))
def test_sympy__stats__crv_types__ShiftedGompertzDistribution():
from sympy.stats.crv_types import ShiftedGompertzDistribution
assert _test_args(ShiftedGompertzDistribution(1, 1))
def test_sympy__stats__crv_types__StudentTDistribution():
from sympy.stats.crv_types import StudentTDistribution
assert _test_args(StudentTDistribution(1))
def test_sympy__stats__crv_types__TrapezoidalDistribution():
from sympy.stats.crv_types import TrapezoidalDistribution
assert _test_args(TrapezoidalDistribution(1, 2, 3, 4))
def test_sympy__stats__crv_types__TriangularDistribution():
from sympy.stats.crv_types import TriangularDistribution
assert _test_args(TriangularDistribution(-1, 0, 1))
def test_sympy__stats__crv_types__UniformDistribution():
from sympy.stats.crv_types import UniformDistribution
assert _test_args(UniformDistribution(0, 1))
def test_sympy__stats__crv_types__UniformSumDistribution():
from sympy.stats.crv_types import UniformSumDistribution
assert _test_args(UniformSumDistribution(1))
def test_sympy__stats__crv_types__VonMisesDistribution():
from sympy.stats.crv_types import VonMisesDistribution
assert _test_args(VonMisesDistribution(1, 1))
def test_sympy__stats__crv_types__WeibullDistribution():
from sympy.stats.crv_types import WeibullDistribution
assert _test_args(WeibullDistribution(1, 1))
def test_sympy__stats__crv_types__WignerSemicircleDistribution():
from sympy.stats.crv_types import WignerSemicircleDistribution
assert _test_args(WignerSemicircleDistribution(1))
def test_sympy__stats__drv_types__PoissonDistribution():
from sympy.stats.drv_types import PoissonDistribution
assert _test_args(PoissonDistribution(1))
def test_sympy__stats__drv_types__GeometricDistribution():
from sympy.stats.drv_types import GeometricDistribution
assert _test_args(GeometricDistribution(.5))
def test_sympy__core__symbol__Dummy():
from sympy.core.symbol import Dummy
assert _test_args(Dummy('t'))
def test_sympy__core__symbol__Symbol():
from sympy.core.symbol import Symbol
assert _test_args(Symbol('t'))
def test_sympy__core__symbol__Wild():
from sympy.core.symbol import Wild
assert _test_args(Wild('x', exclude=[x]))
@SKIP("abstract class")
def test_sympy__functions__combinatorial__factorials__CombinatorialFunction():
pass
def test_sympy__functions__combinatorial__factorials__FallingFactorial():
from sympy.functions.combinatorial.factorials import FallingFactorial
assert _test_args(FallingFactorial(2, x))
def test_sympy__functions__combinatorial__factorials__MultiFactorial():
from sympy.functions.combinatorial.factorials import MultiFactorial
assert _test_args(MultiFactorial(x))
def test_sympy__functions__combinatorial__factorials__RisingFactorial():
from sympy.functions.combinatorial.factorials import RisingFactorial
assert _test_args(RisingFactorial(2, x))
def test_sympy__functions__combinatorial__factorials__binomial():
from sympy.functions.combinatorial.factorials import binomial
assert _test_args(binomial(2, x))
def test_sympy__functions__combinatorial__factorials__subfactorial():
from sympy.functions.combinatorial.factorials import subfactorial
assert _test_args(subfactorial(1))
def test_sympy__functions__combinatorial__factorials__factorial():
from sympy.functions.combinatorial.factorials import factorial
assert _test_args(factorial(x))
def test_sympy__functions__combinatorial__factorials__factorial2():
from sympy.functions.combinatorial.factorials import factorial2
assert _test_args(factorial2(x))
def test_sympy__functions__combinatorial__numbers__bell():
from sympy.functions.combinatorial.numbers import bell
assert _test_args(bell(x, y))
def test_sympy__functions__combinatorial__numbers__bernoulli():
from sympy.functions.combinatorial.numbers import bernoulli
assert _test_args(bernoulli(x))
def test_sympy__functions__combinatorial__numbers__catalan():
from sympy.functions.combinatorial.numbers import catalan
assert _test_args(catalan(x))
def test_sympy__functions__combinatorial__numbers__genocchi():
from sympy.functions.combinatorial.numbers import genocchi
assert _test_args(genocchi(x))
def test_sympy__functions__combinatorial__numbers__euler():
from sympy.functions.combinatorial.numbers import euler
assert _test_args(euler(x))
def test_sympy__functions__combinatorial__numbers__fibonacci():
from sympy.functions.combinatorial.numbers import fibonacci
assert _test_args(fibonacci(x))
def test_sympy__functions__combinatorial__numbers__harmonic():
from sympy.functions.combinatorial.numbers import harmonic
assert _test_args(harmonic(x, 2))
def test_sympy__functions__combinatorial__numbers__lucas():
from sympy.functions.combinatorial.numbers import lucas
assert _test_args(lucas(x))
def test_sympy__functions__combinatorial__numbers__partition():
from sympy.core.symbol import Symbol
from sympy.functions.combinatorial.numbers import partition
assert _test_args(partition(Symbol('a', integer=True)))
def test_sympy__functions__elementary__complexes__Abs():
from sympy.functions.elementary.complexes import Abs
assert _test_args(Abs(x))
def test_sympy__functions__elementary__complexes__adjoint():
from sympy.functions.elementary.complexes import adjoint
assert _test_args(adjoint(x))
def test_sympy__functions__elementary__complexes__arg():
from sympy.functions.elementary.complexes import arg
assert _test_args(arg(x))
def test_sympy__functions__elementary__complexes__conjugate():
from sympy.functions.elementary.complexes import conjugate
assert _test_args(conjugate(x))
def test_sympy__functions__elementary__complexes__im():
from sympy.functions.elementary.complexes import im
assert _test_args(im(x))
def test_sympy__functions__elementary__complexes__re():
from sympy.functions.elementary.complexes import re
assert _test_args(re(x))
def test_sympy__functions__elementary__complexes__sign():
from sympy.functions.elementary.complexes import sign
assert _test_args(sign(x))
def test_sympy__functions__elementary__complexes__polar_lift():
from sympy.functions.elementary.complexes import polar_lift
assert _test_args(polar_lift(x))
def test_sympy__functions__elementary__complexes__periodic_argument():
from sympy.functions.elementary.complexes import periodic_argument
assert _test_args(periodic_argument(x, y))
def test_sympy__functions__elementary__complexes__principal_branch():
from sympy.functions.elementary.complexes import principal_branch
assert _test_args(principal_branch(x, y))
def test_sympy__functions__elementary__complexes__transpose():
from sympy.functions.elementary.complexes import transpose
assert _test_args(transpose(x))
def test_sympy__functions__elementary__exponential__LambertW():
from sympy.functions.elementary.exponential import LambertW
assert _test_args(LambertW(2))
@SKIP("abstract class")
def test_sympy__functions__elementary__exponential__ExpBase():
pass
def test_sympy__functions__elementary__exponential__exp():
from sympy.functions.elementary.exponential import exp
assert _test_args(exp(2))
def test_sympy__functions__elementary__exponential__exp_polar():
from sympy.functions.elementary.exponential import exp_polar
assert _test_args(exp_polar(2))
def test_sympy__functions__elementary__exponential__log():
from sympy.functions.elementary.exponential import log
assert _test_args(log(2))
@SKIP("abstract class")
def test_sympy__functions__elementary__hyperbolic__HyperbolicFunction():
pass
@SKIP("abstract class")
def test_sympy__functions__elementary__hyperbolic__ReciprocalHyperbolicFunction():
pass
@SKIP("abstract class")
def test_sympy__functions__elementary__hyperbolic__InverseHyperbolicFunction():
pass
def test_sympy__functions__elementary__hyperbolic__acosh():
from sympy.functions.elementary.hyperbolic import acosh
assert _test_args(acosh(2))
def test_sympy__functions__elementary__hyperbolic__acoth():
from sympy.functions.elementary.hyperbolic import acoth
assert _test_args(acoth(2))
def test_sympy__functions__elementary__hyperbolic__asinh():
from sympy.functions.elementary.hyperbolic import asinh
assert _test_args(asinh(2))
def test_sympy__functions__elementary__hyperbolic__atanh():
from sympy.functions.elementary.hyperbolic import atanh
assert _test_args(atanh(2))
def test_sympy__functions__elementary__hyperbolic__asech():
from sympy.functions.elementary.hyperbolic import asech
assert _test_args(asech(2))
def test_sympy__functions__elementary__hyperbolic__acsch():
from sympy.functions.elementary.hyperbolic import acsch
assert _test_args(acsch(2))
def test_sympy__functions__elementary__hyperbolic__cosh():
from sympy.functions.elementary.hyperbolic import cosh
assert _test_args(cosh(2))
def test_sympy__functions__elementary__hyperbolic__coth():
from sympy.functions.elementary.hyperbolic import coth
assert _test_args(coth(2))
def test_sympy__functions__elementary__hyperbolic__csch():
from sympy.functions.elementary.hyperbolic import csch
assert _test_args(csch(2))
def test_sympy__functions__elementary__hyperbolic__sech():
from sympy.functions.elementary.hyperbolic import sech
assert _test_args(sech(2))
def test_sympy__functions__elementary__hyperbolic__sinh():
from sympy.functions.elementary.hyperbolic import sinh
assert _test_args(sinh(2))
def test_sympy__functions__elementary__hyperbolic__tanh():
from sympy.functions.elementary.hyperbolic import tanh
assert _test_args(tanh(2))
@SKIP("does this work at all?")
def test_sympy__functions__elementary__integers__RoundFunction():
from sympy.functions.elementary.integers import RoundFunction
assert _test_args(RoundFunction())
def test_sympy__functions__elementary__integers__ceiling():
from sympy.functions.elementary.integers import ceiling
assert _test_args(ceiling(x))
def test_sympy__functions__elementary__integers__floor():
from sympy.functions.elementary.integers import floor
assert _test_args(floor(x))
def test_sympy__functions__elementary__integers__frac():
from sympy.functions.elementary.integers import frac
assert _test_args(frac(x))
def test_sympy__functions__elementary__miscellaneous__IdentityFunction():
from sympy.functions.elementary.miscellaneous import IdentityFunction
assert _test_args(IdentityFunction())
def test_sympy__functions__elementary__miscellaneous__Max():
from sympy.functions.elementary.miscellaneous import Max
assert _test_args(Max(x, 2))
def test_sympy__functions__elementary__miscellaneous__Min():
from sympy.functions.elementary.miscellaneous import Min
assert _test_args(Min(x, 2))
@SKIP("abstract class")
def test_sympy__functions__elementary__miscellaneous__MinMaxBase():
pass
def test_sympy__functions__elementary__piecewise__ExprCondPair():
from sympy.functions.elementary.piecewise import ExprCondPair
assert _test_args(ExprCondPair(1, True))
def test_sympy__functions__elementary__piecewise__Piecewise():
from sympy.functions.elementary.piecewise import Piecewise
assert _test_args(Piecewise((1, x >= 0), (0, True)))
@SKIP("abstract class")
def test_sympy__functions__elementary__trigonometric__TrigonometricFunction():
pass
@SKIP("abstract class")
def test_sympy__functions__elementary__trigonometric__ReciprocalTrigonometricFunction():
pass
@SKIP("abstract class")
def test_sympy__functions__elementary__trigonometric__InverseTrigonometricFunction():
pass
def test_sympy__functions__elementary__trigonometric__acos():
from sympy.functions.elementary.trigonometric import acos
assert _test_args(acos(2))
def test_sympy__functions__elementary__trigonometric__acot():
from sympy.functions.elementary.trigonometric import acot
assert _test_args(acot(2))
def test_sympy__functions__elementary__trigonometric__asin():
from sympy.functions.elementary.trigonometric import asin
assert _test_args(asin(2))
def test_sympy__functions__elementary__trigonometric__asec():
from sympy.functions.elementary.trigonometric import asec
assert _test_args(asec(2))
def test_sympy__functions__elementary__trigonometric__acsc():
from sympy.functions.elementary.trigonometric import acsc
assert _test_args(acsc(2))
def test_sympy__functions__elementary__trigonometric__atan():
from sympy.functions.elementary.trigonometric import atan
assert _test_args(atan(2))
def test_sympy__functions__elementary__trigonometric__atan2():
from sympy.functions.elementary.trigonometric import atan2
assert _test_args(atan2(2, 3))
def test_sympy__functions__elementary__trigonometric__cos():
from sympy.functions.elementary.trigonometric import cos
assert _test_args(cos(2))
def test_sympy__functions__elementary__trigonometric__csc():
from sympy.functions.elementary.trigonometric import csc
assert _test_args(csc(2))
def test_sympy__functions__elementary__trigonometric__cot():
from sympy.functions.elementary.trigonometric import cot
assert _test_args(cot(2))
def test_sympy__functions__elementary__trigonometric__sin():
assert _test_args(sin(2))
def test_sympy__functions__elementary__trigonometric__sinc():
from sympy.functions.elementary.trigonometric import sinc
assert _test_args(sinc(2))
def test_sympy__functions__elementary__trigonometric__sec():
from sympy.functions.elementary.trigonometric import sec
assert _test_args(sec(2))
def test_sympy__functions__elementary__trigonometric__tan():
from sympy.functions.elementary.trigonometric import tan
assert _test_args(tan(2))
@SKIP("abstract class")
def test_sympy__functions__special__bessel__BesselBase():
pass
@SKIP("abstract class")
def test_sympy__functions__special__bessel__SphericalBesselBase():
pass
@SKIP("abstract class")
def test_sympy__functions__special__bessel__SphericalHankelBase():
pass
def test_sympy__functions__special__bessel__besseli():
from sympy.functions.special.bessel import besseli
assert _test_args(besseli(x, 1))
def test_sympy__functions__special__bessel__besselj():
from sympy.functions.special.bessel import besselj
assert _test_args(besselj(x, 1))
def test_sympy__functions__special__bessel__besselk():
from sympy.functions.special.bessel import besselk
assert _test_args(besselk(x, 1))
def test_sympy__functions__special__bessel__bessely():
from sympy.functions.special.bessel import bessely
assert _test_args(bessely(x, 1))
def test_sympy__functions__special__bessel__hankel1():
from sympy.functions.special.bessel import hankel1
assert _test_args(hankel1(x, 1))
def test_sympy__functions__special__bessel__hankel2():
from sympy.functions.special.bessel import hankel2
assert _test_args(hankel2(x, 1))
def test_sympy__functions__special__bessel__jn():
from sympy.functions.special.bessel import jn
assert _test_args(jn(0, x))
def test_sympy__functions__special__bessel__yn():
from sympy.functions.special.bessel import yn
assert _test_args(yn(0, x))
def test_sympy__functions__special__bessel__hn1():
from sympy.functions.special.bessel import hn1
assert _test_args(hn1(0, x))
def test_sympy__functions__special__bessel__hn2():
from sympy.functions.special.bessel import hn2
assert _test_args(hn2(0, x))
def test_sympy__functions__special__bessel__AiryBase():
pass
def test_sympy__functions__special__bessel__airyai():
from sympy.functions.special.bessel import airyai
assert _test_args(airyai(2))
def test_sympy__functions__special__bessel__airybi():
from sympy.functions.special.bessel import airybi
assert _test_args(airybi(2))
def test_sympy__functions__special__bessel__airyaiprime():
from sympy.functions.special.bessel import airyaiprime
assert _test_args(airyaiprime(2))
def test_sympy__functions__special__bessel__airybiprime():
from sympy.functions.special.bessel import airybiprime
assert _test_args(airybiprime(2))
def test_sympy__functions__special__elliptic_integrals__elliptic_k():
from sympy.functions.special.elliptic_integrals import elliptic_k as K
assert _test_args(K(x))
def test_sympy__functions__special__elliptic_integrals__elliptic_f():
from sympy.functions.special.elliptic_integrals import elliptic_f as F
assert _test_args(F(x, y))
def test_sympy__functions__special__elliptic_integrals__elliptic_e():
from sympy.functions.special.elliptic_integrals import elliptic_e as E
assert _test_args(E(x))
assert _test_args(E(x, y))
def test_sympy__functions__special__elliptic_integrals__elliptic_pi():
from sympy.functions.special.elliptic_integrals import elliptic_pi as P
assert _test_args(P(x, y))
assert _test_args(P(x, y, z))
def test_sympy__functions__special__delta_functions__DiracDelta():
from sympy.functions.special.delta_functions import DiracDelta
assert _test_args(DiracDelta(x, 1))
def test_sympy__functions__special__singularity_functions__SingularityFunction():
from sympy.functions.special.singularity_functions import SingularityFunction
assert _test_args(SingularityFunction(x, y, z))
def test_sympy__functions__special__delta_functions__Heaviside():
from sympy.functions.special.delta_functions import Heaviside
assert _test_args(Heaviside(x))
def test_sympy__functions__special__error_functions__erf():
from sympy.functions.special.error_functions import erf
assert _test_args(erf(2))
def test_sympy__functions__special__error_functions__erfc():
from sympy.functions.special.error_functions import erfc
assert _test_args(erfc(2))
def test_sympy__functions__special__error_functions__erfi():
from sympy.functions.special.error_functions import erfi
assert _test_args(erfi(2))
def test_sympy__functions__special__error_functions__erf2():
from sympy.functions.special.error_functions import erf2
assert _test_args(erf2(2, 3))
def test_sympy__functions__special__error_functions__erfinv():
from sympy.functions.special.error_functions import erfinv
assert _test_args(erfinv(2))
def test_sympy__functions__special__error_functions__erfcinv():
from sympy.functions.special.error_functions import erfcinv
assert _test_args(erfcinv(2))
def test_sympy__functions__special__error_functions__erf2inv():
from sympy.functions.special.error_functions import erf2inv
assert _test_args(erf2inv(2, 3))
@SKIP("abstract class")
def test_sympy__functions__special__error_functions__FresnelIntegral():
pass
def test_sympy__functions__special__error_functions__fresnels():
from sympy.functions.special.error_functions import fresnels
assert _test_args(fresnels(2))
def test_sympy__functions__special__error_functions__fresnelc():
from sympy.functions.special.error_functions import fresnelc
assert _test_args(fresnelc(2))
def test_sympy__functions__special__error_functions__erfs():
from sympy.functions.special.error_functions import _erfs
assert _test_args(_erfs(2))
def test_sympy__functions__special__error_functions__Ei():
from sympy.functions.special.error_functions import Ei
assert _test_args(Ei(2))
def test_sympy__functions__special__error_functions__li():
from sympy.functions.special.error_functions import li
assert _test_args(li(2))
def test_sympy__functions__special__error_functions__Li():
from sympy.functions.special.error_functions import Li
assert _test_args(Li(2))
@SKIP("abstract class")
def test_sympy__functions__special__error_functions__TrigonometricIntegral():
pass
def test_sympy__functions__special__error_functions__Si():
from sympy.functions.special.error_functions import Si
assert _test_args(Si(2))
def test_sympy__functions__special__error_functions__Ci():
from sympy.functions.special.error_functions import Ci
assert _test_args(Ci(2))
def test_sympy__functions__special__error_functions__Shi():
from sympy.functions.special.error_functions import Shi
assert _test_args(Shi(2))
def test_sympy__functions__special__error_functions__Chi():
from sympy.functions.special.error_functions import Chi
assert _test_args(Chi(2))
def test_sympy__functions__special__error_functions__expint():
from sympy.functions.special.error_functions import expint
assert _test_args(expint(y, x))
def test_sympy__functions__special__gamma_functions__gamma():
from sympy.functions.special.gamma_functions import gamma
assert _test_args(gamma(x))
def test_sympy__functions__special__gamma_functions__loggamma():
from sympy.functions.special.gamma_functions import loggamma
assert _test_args(loggamma(2))
def test_sympy__functions__special__gamma_functions__lowergamma():
from sympy.functions.special.gamma_functions import lowergamma
assert _test_args(lowergamma(x, 2))
def test_sympy__functions__special__gamma_functions__polygamma():
from sympy.functions.special.gamma_functions import polygamma
assert _test_args(polygamma(x, 2))
def test_sympy__functions__special__gamma_functions__uppergamma():
from sympy.functions.special.gamma_functions import uppergamma
assert _test_args(uppergamma(x, 2))
def test_sympy__functions__special__beta_functions__beta():
from sympy.functions.special.beta_functions import beta
assert _test_args(beta(x, x))
def test_sympy__functions__special__mathieu_functions__MathieuBase():
pass
def test_sympy__functions__special__mathieu_functions__mathieus():
from sympy.functions.special.mathieu_functions import mathieus
assert _test_args(mathieus(1, 1, 1))
def test_sympy__functions__special__mathieu_functions__mathieuc():
from sympy.functions.special.mathieu_functions import mathieuc
assert _test_args(mathieuc(1, 1, 1))
def test_sympy__functions__special__mathieu_functions__mathieusprime():
from sympy.functions.special.mathieu_functions import mathieusprime
assert _test_args(mathieusprime(1, 1, 1))
def test_sympy__functions__special__mathieu_functions__mathieucprime():
from sympy.functions.special.mathieu_functions import mathieucprime
assert _test_args(mathieucprime(1, 1, 1))
@SKIP("abstract class")
def test_sympy__functions__special__hyper__TupleParametersBase():
pass
@SKIP("abstract class")
def test_sympy__functions__special__hyper__TupleArg():
pass
def test_sympy__functions__special__hyper__hyper():
from sympy.functions.special.hyper import hyper
assert _test_args(hyper([1, 2, 3], [4, 5], x))
def test_sympy__functions__special__hyper__meijerg():
from sympy.functions.special.hyper import meijerg
assert _test_args(meijerg([1, 2, 3], [4, 5], [6], [], x))
@SKIP("abstract class")
def test_sympy__functions__special__hyper__HyperRep():
pass
def test_sympy__functions__special__hyper__HyperRep_power1():
from sympy.functions.special.hyper import HyperRep_power1
assert _test_args(HyperRep_power1(x, y))
def test_sympy__functions__special__hyper__HyperRep_power2():
from sympy.functions.special.hyper import HyperRep_power2
assert _test_args(HyperRep_power2(x, y))
def test_sympy__functions__special__hyper__HyperRep_log1():
from sympy.functions.special.hyper import HyperRep_log1
assert _test_args(HyperRep_log1(x))
def test_sympy__functions__special__hyper__HyperRep_atanh():
from sympy.functions.special.hyper import HyperRep_atanh
assert _test_args(HyperRep_atanh(x))
def test_sympy__functions__special__hyper__HyperRep_asin1():
from sympy.functions.special.hyper import HyperRep_asin1
assert _test_args(HyperRep_asin1(x))
def test_sympy__functions__special__hyper__HyperRep_asin2():
from sympy.functions.special.hyper import HyperRep_asin2
assert _test_args(HyperRep_asin2(x))
def test_sympy__functions__special__hyper__HyperRep_sqrts1():
from sympy.functions.special.hyper import HyperRep_sqrts1
assert _test_args(HyperRep_sqrts1(x, y))
def test_sympy__functions__special__hyper__HyperRep_sqrts2():
from sympy.functions.special.hyper import HyperRep_sqrts2
assert _test_args(HyperRep_sqrts2(x, y))
def test_sympy__functions__special__hyper__HyperRep_log2():
from sympy.functions.special.hyper import HyperRep_log2
assert _test_args(HyperRep_log2(x))
def test_sympy__functions__special__hyper__HyperRep_cosasin():
from sympy.functions.special.hyper import HyperRep_cosasin
assert _test_args(HyperRep_cosasin(x, y))
def test_sympy__functions__special__hyper__HyperRep_sinasin():
from sympy.functions.special.hyper import HyperRep_sinasin
assert _test_args(HyperRep_sinasin(x, y))
@SKIP("abstract class")
def test_sympy__functions__special__polynomials__OrthogonalPolynomial():
pass
def test_sympy__functions__special__polynomials__jacobi():
from sympy.functions.special.polynomials import jacobi
assert _test_args(jacobi(x, 2, 2, 2))
def test_sympy__functions__special__polynomials__gegenbauer():
from sympy.functions.special.polynomials import gegenbauer
assert _test_args(gegenbauer(x, 2, 2))
def test_sympy__functions__special__polynomials__chebyshevt():
from sympy.functions.special.polynomials import chebyshevt
assert _test_args(chebyshevt(x, 2))
def test_sympy__functions__special__polynomials__chebyshevt_root():
from sympy.functions.special.polynomials import chebyshevt_root
assert _test_args(chebyshevt_root(3, 2))
def test_sympy__functions__special__polynomials__chebyshevu():
from sympy.functions.special.polynomials import chebyshevu
assert _test_args(chebyshevu(x, 2))
def test_sympy__functions__special__polynomials__chebyshevu_root():
from sympy.functions.special.polynomials import chebyshevu_root
assert _test_args(chebyshevu_root(3, 2))
def test_sympy__functions__special__polynomials__hermite():
from sympy.functions.special.polynomials import hermite
assert _test_args(hermite(x, 2))
def test_sympy__functions__special__polynomials__legendre():
from sympy.functions.special.polynomials import legendre
assert _test_args(legendre(x, 2))
def test_sympy__functions__special__polynomials__assoc_legendre():
from sympy.functions.special.polynomials import assoc_legendre
assert _test_args(assoc_legendre(x, 0, y))
def test_sympy__functions__special__polynomials__laguerre():
from sympy.functions.special.polynomials import laguerre
assert _test_args(laguerre(x, 2))
def test_sympy__functions__special__polynomials__assoc_laguerre():
from sympy.functions.special.polynomials import assoc_laguerre
assert _test_args(assoc_laguerre(x, 0, y))
def test_sympy__functions__special__spherical_harmonics__Ynm():
from sympy.functions.special.spherical_harmonics import Ynm
assert _test_args(Ynm(1, 1, x, y))
def test_sympy__functions__special__spherical_harmonics__Znm():
from sympy.functions.special.spherical_harmonics import Znm
assert _test_args(Znm(1, 1, x, y))
def test_sympy__functions__special__tensor_functions__LeviCivita():
from sympy.functions.special.tensor_functions import LeviCivita
assert _test_args(LeviCivita(x, y, 2))
def test_sympy__functions__special__tensor_functions__KroneckerDelta():
from sympy.functions.special.tensor_functions import KroneckerDelta
assert _test_args(KroneckerDelta(x, y))
def test_sympy__functions__special__zeta_functions__dirichlet_eta():
from sympy.functions.special.zeta_functions import dirichlet_eta
assert _test_args(dirichlet_eta(x))
def test_sympy__functions__special__zeta_functions__zeta():
from sympy.functions.special.zeta_functions import zeta
assert _test_args(zeta(101))
def test_sympy__functions__special__zeta_functions__lerchphi():
from sympy.functions.special.zeta_functions import lerchphi
assert _test_args(lerchphi(x, y, z))
def test_sympy__functions__special__zeta_functions__polylog():
from sympy.functions.special.zeta_functions import polylog
assert _test_args(polylog(x, y))
def test_sympy__functions__special__zeta_functions__stieltjes():
from sympy.functions.special.zeta_functions import stieltjes
assert _test_args(stieltjes(x, y))
def test_sympy__integrals__integrals__Integral():
from sympy.integrals.integrals import Integral
assert _test_args(Integral(2, (x, 0, 1)))
def test_sympy__integrals__risch__NonElementaryIntegral():
from sympy.integrals.risch import NonElementaryIntegral
assert _test_args(NonElementaryIntegral(exp(-x**2), x))
@SKIP("abstract class")
def test_sympy__integrals__transforms__IntegralTransform():
pass
def test_sympy__integrals__transforms__MellinTransform():
from sympy.integrals.transforms import MellinTransform
assert _test_args(MellinTransform(2, x, y))
def test_sympy__integrals__transforms__InverseMellinTransform():
from sympy.integrals.transforms import InverseMellinTransform
assert _test_args(InverseMellinTransform(2, x, y, 0, 1))
def test_sympy__integrals__transforms__LaplaceTransform():
from sympy.integrals.transforms import LaplaceTransform
assert _test_args(LaplaceTransform(2, x, y))
def test_sympy__integrals__transforms__InverseLaplaceTransform():
from sympy.integrals.transforms import InverseLaplaceTransform
assert _test_args(InverseLaplaceTransform(2, x, y, 0))
@SKIP("abstract class")
def test_sympy__integrals__transforms__FourierTypeTransform():
pass
def test_sympy__integrals__transforms__InverseFourierTransform():
from sympy.integrals.transforms import InverseFourierTransform
assert _test_args(InverseFourierTransform(2, x, y))
def test_sympy__integrals__transforms__FourierTransform():
from sympy.integrals.transforms import FourierTransform
assert _test_args(FourierTransform(2, x, y))
@SKIP("abstract class")
def test_sympy__integrals__transforms__SineCosineTypeTransform():
pass
def test_sympy__integrals__transforms__InverseSineTransform():
from sympy.integrals.transforms import InverseSineTransform
assert _test_args(InverseSineTransform(2, x, y))
def test_sympy__integrals__transforms__SineTransform():
from sympy.integrals.transforms import SineTransform
assert _test_args(SineTransform(2, x, y))
def test_sympy__integrals__transforms__InverseCosineTransform():
from sympy.integrals.transforms import InverseCosineTransform
assert _test_args(InverseCosineTransform(2, x, y))
def test_sympy__integrals__transforms__CosineTransform():
from sympy.integrals.transforms import CosineTransform
assert _test_args(CosineTransform(2, x, y))
@SKIP("abstract class")
def test_sympy__integrals__transforms__HankelTypeTransform():
pass
def test_sympy__integrals__transforms__InverseHankelTransform():
from sympy.integrals.transforms import InverseHankelTransform
assert _test_args(InverseHankelTransform(2, x, y, 0))
def test_sympy__integrals__transforms__HankelTransform():
from sympy.integrals.transforms import HankelTransform
assert _test_args(HankelTransform(2, x, y, 0))
@XFAIL
def test_sympy__liealgebras__cartan_type__CartanType_generator():
from sympy.liealgebras.cartan_type import CartanType_generator
assert _test_args(CartanType_generator("A2"))
@XFAIL
def test_sympy__liealgebras__cartan_type__Standard_Cartan():
from sympy.liealgebras.cartan_type import Standard_Cartan
assert _test_args(Standard_Cartan("A", 2))
@XFAIL
def test_sympy__liealgebras__weyl_group__WeylGroup():
from sympy.liealgebras.weyl_group import WeylGroup
assert _test_args(WeylGroup("B4"))
@XFAIL
def test_sympy__liealgebras__root_system__RootSystem():
from sympy.liealgebras.root_system import RootSystem
assert _test_args(RootSystem("A2"))
@XFAIL
def test_sympy__liealgebras__type_a__TypeA():
from sympy.liealgebras.type_a import TypeA
assert _test_args(TypeA(2))
@XFAIL
def test_sympy__liealgebras__type_b__TypeB():
from sympy.liealgebras.type_b import TypeB
assert _test_args(TypeB(4))
@XFAIL
def test_sympy__liealgebras__type_c__TypeC():
from sympy.liealgebras.type_c import TypeC
assert _test_args(TypeC(4))
@XFAIL
def test_sympy__liealgebras__type_d__TypeD():
from sympy.liealgebras.type_d import TypeD
assert _test_args(TypeD(4))
@XFAIL
def test_sympy__liealgebras__type_e__TypeE():
from sympy.liealgebras.type_e import TypeE
assert _test_args(TypeE(6))
@XFAIL
def test_sympy__liealgebras__type_f__TypeF():
from sympy.liealgebras.type_f import TypeF
assert _test_args(TypeF(4))
@XFAIL
def test_sympy__liealgebras__type_g__TypeG():
from sympy.liealgebras.type_g import TypeG
assert _test_args(TypeG(2))
def test_sympy__logic__boolalg__And():
from sympy.logic.boolalg import And
assert _test_args(And(x, y, 1))
@SKIP("abstract class")
def test_sympy__logic__boolalg__Boolean():
pass
def test_sympy__logic__boolalg__BooleanFunction():
from sympy.logic.boolalg import BooleanFunction
assert _test_args(BooleanFunction(1, 2, 3))
@SKIP("abstract class")
def test_sympy__logic__boolalg__BooleanAtom():
pass
def test_sympy__logic__boolalg__BooleanTrue():
from sympy.logic.boolalg import true
assert _test_args(true)
def test_sympy__logic__boolalg__BooleanFalse():
from sympy.logic.boolalg import false
assert _test_args(false)
def test_sympy__logic__boolalg__Equivalent():
from sympy.logic.boolalg import Equivalent
assert _test_args(Equivalent(x, 2))
def test_sympy__logic__boolalg__ITE():
from sympy.logic.boolalg import ITE
assert _test_args(ITE(x, y, 1))
def test_sympy__logic__boolalg__Implies():
from sympy.logic.boolalg import Implies
assert _test_args(Implies(x, y))
def test_sympy__logic__boolalg__Nand():
from sympy.logic.boolalg import Nand
assert _test_args(Nand(x, y, 1))
def test_sympy__logic__boolalg__Nor():
from sympy.logic.boolalg import Nor
assert _test_args(Nor(x, y))
def test_sympy__logic__boolalg__Not():
from sympy.logic.boolalg import Not
assert _test_args(Not(x))
def test_sympy__logic__boolalg__Or():
from sympy.logic.boolalg import Or
assert _test_args(Or(x, y))
def test_sympy__logic__boolalg__Xor():
from sympy.logic.boolalg import Xor
assert _test_args(Xor(x, y, 2))
def test_sympy__logic__boolalg__Xnor():
from sympy.logic.boolalg import Xnor
assert _test_args(Xnor(x, y, 2))
def test_sympy__matrices__matrices__DeferredVector():
from sympy.matrices.matrices import DeferredVector
assert _test_args(DeferredVector("X"))
@SKIP("abstract class")
def test_sympy__matrices__expressions__matexpr__MatrixBase():
pass
def test_sympy__matrices__immutable__ImmutableDenseMatrix():
from sympy.matrices.immutable import ImmutableDenseMatrix
m = ImmutableDenseMatrix([[1, 2], [3, 4]])
assert _test_args(m)
assert _test_args(Basic(*list(m)))
m = ImmutableDenseMatrix(1, 1, [1])
assert _test_args(m)
assert _test_args(Basic(*list(m)))
m = ImmutableDenseMatrix(2, 2, lambda i, j: 1)
assert m[0, 0] is S.One
m = ImmutableDenseMatrix(2, 2, lambda i, j: 1/(1 + i) + 1/(1 + j))
assert m[1, 1] is S.One # true div. will give 1.0 if i,j not sympified
assert _test_args(m)
assert _test_args(Basic(*list(m)))
def test_sympy__matrices__immutable__ImmutableSparseMatrix():
from sympy.matrices.immutable import ImmutableSparseMatrix
m = ImmutableSparseMatrix([[1, 2], [3, 4]])
assert _test_args(m)
assert _test_args(Basic(*list(m)))
m = ImmutableSparseMatrix(1, 1, {(0, 0): 1})
assert _test_args(m)
assert _test_args(Basic(*list(m)))
m = ImmutableSparseMatrix(1, 1, [1])
assert _test_args(m)
assert _test_args(Basic(*list(m)))
m = ImmutableSparseMatrix(2, 2, lambda i, j: 1)
assert m[0, 0] is S.One
m = ImmutableSparseMatrix(2, 2, lambda i, j: 1/(1 + i) + 1/(1 + j))
assert m[1, 1] is S.One # true div. will give 1.0 if i,j not sympified
assert _test_args(m)
assert _test_args(Basic(*list(m)))
def test_sympy__matrices__expressions__slice__MatrixSlice():
from sympy.matrices.expressions.slice import MatrixSlice
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('X', 4, 4)
assert _test_args(MatrixSlice(X, (0, 2), (0, 2)))
def test_sympy__matrices__expressions__blockmatrix__BlockDiagMatrix():
from sympy.matrices.expressions.blockmatrix import BlockDiagMatrix
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('X', x, x)
Y = MatrixSymbol('Y', y, y)
assert _test_args(BlockDiagMatrix(X, Y))
def test_sympy__matrices__expressions__blockmatrix__BlockMatrix():
from sympy.matrices.expressions.blockmatrix import BlockMatrix
from sympy.matrices.expressions import MatrixSymbol, ZeroMatrix
X = MatrixSymbol('X', x, x)
Y = MatrixSymbol('Y', y, y)
Z = MatrixSymbol('Z', x, y)
O = ZeroMatrix(y, x)
assert _test_args(BlockMatrix([[X, Z], [O, Y]]))
def test_sympy__matrices__expressions__inverse__Inverse():
from sympy.matrices.expressions.inverse import Inverse
from sympy.matrices.expressions import MatrixSymbol
assert _test_args(Inverse(MatrixSymbol('A', 3, 3)))
def test_sympy__matrices__expressions__matadd__MatAdd():
from sympy.matrices.expressions.matadd import MatAdd
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('X', x, y)
Y = MatrixSymbol('Y', x, y)
assert _test_args(MatAdd(X, Y))
def test_sympy__matrices__expressions__matexpr__Identity():
from sympy.matrices.expressions.matexpr import Identity
assert _test_args(Identity(3))
@SKIP("abstract class")
def test_sympy__matrices__expressions__matexpr__MatrixExpr():
pass
def test_sympy__matrices__expressions__matexpr__MatrixElement():
from sympy.matrices.expressions.matexpr import MatrixSymbol, MatrixElement
from sympy import S
assert _test_args(MatrixElement(MatrixSymbol('A', 3, 5), S(2), S(3)))
@XFAIL
def test_sympy__matrices__expressions__matexpr__MatrixSymbol():
from sympy.matrices.expressions.matexpr import MatrixSymbol
assert _test_args(MatrixSymbol('A', 3, 5))
def test_sympy__matrices__expressions__matexpr__ZeroMatrix():
from sympy.matrices.expressions.matexpr import ZeroMatrix
assert _test_args(ZeroMatrix(3, 5))
def test_sympy__matrices__expressions__matmul__MatMul():
from sympy.matrices.expressions.matmul import MatMul
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('X', x, y)
Y = MatrixSymbol('Y', y, x)
assert _test_args(MatMul(X, Y))
def test_sympy__matrices__expressions__dotproduct__DotProduct():
from sympy.matrices.expressions.dotproduct import DotProduct
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('X', x, 1)
Y = MatrixSymbol('Y', x, 1)
assert _test_args(DotProduct(X, Y))
def test_sympy__matrices__expressions__diagonal__DiagonalMatrix():
from sympy.matrices.expressions.diagonal import DiagonalMatrix
from sympy.matrices.expressions import MatrixSymbol
x = MatrixSymbol('x', 10, 1)
assert _test_args(DiagonalMatrix(x))
def test_sympy__matrices__expressions__diagonal__DiagonalOf():
from sympy.matrices.expressions.diagonal import DiagonalOf
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('x', 10, 10)
assert _test_args(DiagonalOf(X))
def test_sympy__matrices__expressions__hadamard__HadamardProduct():
from sympy.matrices.expressions.hadamard import HadamardProduct
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('X', x, y)
Y = MatrixSymbol('Y', x, y)
assert _test_args(HadamardProduct(X, Y))
def test_sympy__matrices__expressions__kronecker__KroneckerProduct():
from sympy.matrices.expressions.kronecker import KroneckerProduct
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('X', x, y)
Y = MatrixSymbol('Y', x, y)
assert _test_args(KroneckerProduct(X, Y))
def test_sympy__matrices__expressions__matpow__MatPow():
from sympy.matrices.expressions.matpow import MatPow
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('X', x, x)
assert _test_args(MatPow(X, 2))
def test_sympy__matrices__expressions__transpose__Transpose():
from sympy.matrices.expressions.transpose import Transpose
from sympy.matrices.expressions import MatrixSymbol
assert _test_args(Transpose(MatrixSymbol('A', 3, 5)))
def test_sympy__matrices__expressions__adjoint__Adjoint():
from sympy.matrices.expressions.adjoint import Adjoint
from sympy.matrices.expressions import MatrixSymbol
assert _test_args(Adjoint(MatrixSymbol('A', 3, 5)))
def test_sympy__matrices__expressions__trace__Trace():
from sympy.matrices.expressions.trace import Trace
from sympy.matrices.expressions import MatrixSymbol
assert _test_args(Trace(MatrixSymbol('A', 3, 3)))
def test_sympy__matrices__expressions__determinant__Determinant():
from sympy.matrices.expressions.determinant import Determinant
from sympy.matrices.expressions import MatrixSymbol
assert _test_args(Determinant(MatrixSymbol('A', 3, 3)))
def test_sympy__matrices__expressions__funcmatrix__FunctionMatrix():
from sympy.matrices.expressions.funcmatrix import FunctionMatrix
from sympy import symbols
i, j = symbols('i,j')
assert _test_args(FunctionMatrix(3, 3, Lambda((i, j), i - j) ))
def test_sympy__matrices__expressions__fourier__DFT():
from sympy.matrices.expressions.fourier import DFT
from sympy import S
assert _test_args(DFT(S(2)))
def test_sympy__matrices__expressions__fourier__IDFT():
from sympy.matrices.expressions.fourier import IDFT
from sympy import S
assert _test_args(IDFT(S(2)))
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('X', 10, 10)
def test_sympy__matrices__expressions__factorizations__LofLU():
from sympy.matrices.expressions.factorizations import LofLU
assert _test_args(LofLU(X))
def test_sympy__matrices__expressions__factorizations__UofLU():
from sympy.matrices.expressions.factorizations import UofLU
assert _test_args(UofLU(X))
def test_sympy__matrices__expressions__factorizations__QofQR():
from sympy.matrices.expressions.factorizations import QofQR
assert _test_args(QofQR(X))
def test_sympy__matrices__expressions__factorizations__RofQR():
from sympy.matrices.expressions.factorizations import RofQR
assert _test_args(RofQR(X))
def test_sympy__matrices__expressions__factorizations__LofCholesky():
from sympy.matrices.expressions.factorizations import LofCholesky
assert _test_args(LofCholesky(X))
def test_sympy__matrices__expressions__factorizations__UofCholesky():
from sympy.matrices.expressions.factorizations import UofCholesky
assert _test_args(UofCholesky(X))
def test_sympy__matrices__expressions__factorizations__EigenVectors():
from sympy.matrices.expressions.factorizations import EigenVectors
assert _test_args(EigenVectors(X))
def test_sympy__matrices__expressions__factorizations__EigenValues():
from sympy.matrices.expressions.factorizations import EigenValues
assert _test_args(EigenValues(X))
def test_sympy__matrices__expressions__factorizations__UofSVD():
from sympy.matrices.expressions.factorizations import UofSVD
assert _test_args(UofSVD(X))
def test_sympy__matrices__expressions__factorizations__VofSVD():
from sympy.matrices.expressions.factorizations import VofSVD
assert _test_args(VofSVD(X))
def test_sympy__matrices__expressions__factorizations__SofSVD():
from sympy.matrices.expressions.factorizations import SofSVD
assert _test_args(SofSVD(X))
@SKIP("abstract class")
def test_sympy__matrices__expressions__factorizations__Factorization():
pass
def test_sympy__physics__vector__frame__CoordinateSym():
from sympy.physics.vector import CoordinateSym
from sympy.physics.vector import ReferenceFrame
assert _test_args(CoordinateSym('R_x', ReferenceFrame('R'), 0))
def test_sympy__physics__paulialgebra__Pauli():
from sympy.physics.paulialgebra import Pauli
assert _test_args(Pauli(1))
def test_sympy__physics__quantum__anticommutator__AntiCommutator():
from sympy.physics.quantum.anticommutator import AntiCommutator
assert _test_args(AntiCommutator(x, y))
def test_sympy__physics__quantum__cartesian__PositionBra3D():
from sympy.physics.quantum.cartesian import PositionBra3D
assert _test_args(PositionBra3D(x, y, z))
def test_sympy__physics__quantum__cartesian__PositionKet3D():
from sympy.physics.quantum.cartesian import PositionKet3D
assert _test_args(PositionKet3D(x, y, z))
def test_sympy__physics__quantum__cartesian__PositionState3D():
from sympy.physics.quantum.cartesian import PositionState3D
assert _test_args(PositionState3D(x, y, z))
def test_sympy__physics__quantum__cartesian__PxBra():
from sympy.physics.quantum.cartesian import PxBra
assert _test_args(PxBra(x, y, z))
def test_sympy__physics__quantum__cartesian__PxKet():
from sympy.physics.quantum.cartesian import PxKet
assert _test_args(PxKet(x, y, z))
def test_sympy__physics__quantum__cartesian__PxOp():
from sympy.physics.quantum.cartesian import PxOp
assert _test_args(PxOp(x, y, z))
def test_sympy__physics__quantum__cartesian__XBra():
from sympy.physics.quantum.cartesian import XBra
assert _test_args(XBra(x))
def test_sympy__physics__quantum__cartesian__XKet():
from sympy.physics.quantum.cartesian import XKet
assert _test_args(XKet(x))
def test_sympy__physics__quantum__cartesian__XOp():
from sympy.physics.quantum.cartesian import XOp
assert _test_args(XOp(x))
def test_sympy__physics__quantum__cartesian__YOp():
from sympy.physics.quantum.cartesian import YOp
assert _test_args(YOp(x))
def test_sympy__physics__quantum__cartesian__ZOp():
from sympy.physics.quantum.cartesian import ZOp
assert _test_args(ZOp(x))
def test_sympy__physics__quantum__cg__CG():
from sympy.physics.quantum.cg import CG
from sympy import S
assert _test_args(CG(S(3)/2, S(3)/2, S(1)/2, -S(1)/2, 1, 1))
def test_sympy__physics__quantum__cg__Wigner3j():
from sympy.physics.quantum.cg import Wigner3j
assert _test_args(Wigner3j(6, 0, 4, 0, 2, 0))
def test_sympy__physics__quantum__cg__Wigner6j():
from sympy.physics.quantum.cg import Wigner6j
assert _test_args(Wigner6j(1, 2, 3, 2, 1, 2))
def test_sympy__physics__quantum__cg__Wigner9j():
from sympy.physics.quantum.cg import Wigner9j
assert _test_args(Wigner9j(2, 1, 1, S(3)/2, S(1)/2, 1, S(1)/2, S(1)/2, 0))
def test_sympy__physics__quantum__circuitplot__Mz():
from sympy.physics.quantum.circuitplot import Mz
assert _test_args(Mz(0))
def test_sympy__physics__quantum__circuitplot__Mx():
from sympy.physics.quantum.circuitplot import Mx
assert _test_args(Mx(0))
def test_sympy__physics__quantum__commutator__Commutator():
from sympy.physics.quantum.commutator import Commutator
A, B = symbols('A,B', commutative=False)
assert _test_args(Commutator(A, B))
def test_sympy__physics__quantum__constants__HBar():
from sympy.physics.quantum.constants import HBar
assert _test_args(HBar())
def test_sympy__physics__quantum__dagger__Dagger():
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.state import Ket
assert _test_args(Dagger(Dagger(Ket('psi'))))
def test_sympy__physics__quantum__gate__CGate():
from sympy.physics.quantum.gate import CGate, Gate
assert _test_args(CGate((0, 1), Gate(2)))
def test_sympy__physics__quantum__gate__CGateS():
from sympy.physics.quantum.gate import CGateS, Gate
assert _test_args(CGateS((0, 1), Gate(2)))
def test_sympy__physics__quantum__gate__CNotGate():
from sympy.physics.quantum.gate import CNotGate
assert _test_args(CNotGate(0, 1))
def test_sympy__physics__quantum__gate__Gate():
from sympy.physics.quantum.gate import Gate
assert _test_args(Gate(0))
def test_sympy__physics__quantum__gate__HadamardGate():
from sympy.physics.quantum.gate import HadamardGate
assert _test_args(HadamardGate(0))
def test_sympy__physics__quantum__gate__IdentityGate():
from sympy.physics.quantum.gate import IdentityGate
assert _test_args(IdentityGate(0))
def test_sympy__physics__quantum__gate__OneQubitGate():
from sympy.physics.quantum.gate import OneQubitGate
assert _test_args(OneQubitGate(0))
def test_sympy__physics__quantum__gate__PhaseGate():
from sympy.physics.quantum.gate import PhaseGate
assert _test_args(PhaseGate(0))
def test_sympy__physics__quantum__gate__SwapGate():
from sympy.physics.quantum.gate import SwapGate
assert _test_args(SwapGate(0, 1))
def test_sympy__physics__quantum__gate__TGate():
from sympy.physics.quantum.gate import TGate
assert _test_args(TGate(0))
def test_sympy__physics__quantum__gate__TwoQubitGate():
from sympy.physics.quantum.gate import TwoQubitGate
assert _test_args(TwoQubitGate(0))
def test_sympy__physics__quantum__gate__UGate():
from sympy.physics.quantum.gate import UGate
from sympy.matrices.immutable import ImmutableDenseMatrix
from sympy import Integer, Tuple
assert _test_args(
UGate(Tuple(Integer(1)), ImmutableDenseMatrix([[1, 0], [0, 2]])))
def test_sympy__physics__quantum__gate__XGate():
from sympy.physics.quantum.gate import XGate
assert _test_args(XGate(0))
def test_sympy__physics__quantum__gate__YGate():
from sympy.physics.quantum.gate import YGate
assert _test_args(YGate(0))
def test_sympy__physics__quantum__gate__ZGate():
from sympy.physics.quantum.gate import ZGate
assert _test_args(ZGate(0))
@SKIP("TODO: sympy.physics")
def test_sympy__physics__quantum__grover__OracleGate():
from sympy.physics.quantum.grover import OracleGate
assert _test_args(OracleGate())
def test_sympy__physics__quantum__grover__WGate():
from sympy.physics.quantum.grover import WGate
assert _test_args(WGate(1))
def test_sympy__physics__quantum__hilbert__ComplexSpace():
from sympy.physics.quantum.hilbert import ComplexSpace
assert _test_args(ComplexSpace(x))
def test_sympy__physics__quantum__hilbert__DirectSumHilbertSpace():
from sympy.physics.quantum.hilbert import DirectSumHilbertSpace, ComplexSpace, FockSpace
c = ComplexSpace(2)
f = FockSpace()
assert _test_args(DirectSumHilbertSpace(c, f))
def test_sympy__physics__quantum__hilbert__FockSpace():
from sympy.physics.quantum.hilbert import FockSpace
assert _test_args(FockSpace())
def test_sympy__physics__quantum__hilbert__HilbertSpace():
from sympy.physics.quantum.hilbert import HilbertSpace
assert _test_args(HilbertSpace())
def test_sympy__physics__quantum__hilbert__L2():
from sympy.physics.quantum.hilbert import L2
from sympy import oo, Interval
assert _test_args(L2(Interval(0, oo)))
def test_sympy__physics__quantum__hilbert__TensorPowerHilbertSpace():
from sympy.physics.quantum.hilbert import TensorPowerHilbertSpace, FockSpace
f = FockSpace()
assert _test_args(TensorPowerHilbertSpace(f, 2))
def test_sympy__physics__quantum__hilbert__TensorProductHilbertSpace():
from sympy.physics.quantum.hilbert import TensorProductHilbertSpace, FockSpace, ComplexSpace
c = ComplexSpace(2)
f = FockSpace()
assert _test_args(TensorProductHilbertSpace(f, c))
def test_sympy__physics__quantum__innerproduct__InnerProduct():
from sympy.physics.quantum import Bra, Ket, InnerProduct
b = Bra('b')
k = Ket('k')
assert _test_args(InnerProduct(b, k))
def test_sympy__physics__quantum__operator__DifferentialOperator():
from sympy.physics.quantum.operator import DifferentialOperator
from sympy import Derivative, Function
f = Function('f')
assert _test_args(DifferentialOperator(1/x*Derivative(f(x), x), f(x)))
def test_sympy__physics__quantum__operator__HermitianOperator():
from sympy.physics.quantum.operator import HermitianOperator
assert _test_args(HermitianOperator('H'))
def test_sympy__physics__quantum__operator__IdentityOperator():
from sympy.physics.quantum.operator import IdentityOperator
assert _test_args(IdentityOperator(5))
def test_sympy__physics__quantum__operator__Operator():
from sympy.physics.quantum.operator import Operator
assert _test_args(Operator('A'))
def test_sympy__physics__quantum__operator__OuterProduct():
from sympy.physics.quantum.operator import OuterProduct
from sympy.physics.quantum import Ket, Bra
b = Bra('b')
k = Ket('k')
assert _test_args(OuterProduct(k, b))
def test_sympy__physics__quantum__operator__UnitaryOperator():
from sympy.physics.quantum.operator import UnitaryOperator
assert _test_args(UnitaryOperator('U'))
def test_sympy__physics__quantum__piab__PIABBra():
from sympy.physics.quantum.piab import PIABBra
assert _test_args(PIABBra('B'))
def test_sympy__physics__quantum__boson__BosonOp():
from sympy.physics.quantum.boson import BosonOp
assert _test_args(BosonOp('a'))
assert _test_args(BosonOp('a', False))
def test_sympy__physics__quantum__boson__BosonFockKet():
from sympy.physics.quantum.boson import BosonFockKet
assert _test_args(BosonFockKet(1))
def test_sympy__physics__quantum__boson__BosonFockBra():
from sympy.physics.quantum.boson import BosonFockBra
assert _test_args(BosonFockBra(1))
def test_sympy__physics__quantum__boson__BosonCoherentKet():
from sympy.physics.quantum.boson import BosonCoherentKet
assert _test_args(BosonCoherentKet(1))
def test_sympy__physics__quantum__boson__BosonCoherentBra():
from sympy.physics.quantum.boson import BosonCoherentBra
assert _test_args(BosonCoherentBra(1))
def test_sympy__physics__quantum__fermion__FermionOp():
from sympy.physics.quantum.fermion import FermionOp
assert _test_args(FermionOp('c'))
assert _test_args(FermionOp('c', False))
def test_sympy__physics__quantum__fermion__FermionFockKet():
from sympy.physics.quantum.fermion import FermionFockKet
assert _test_args(FermionFockKet(1))
def test_sympy__physics__quantum__fermion__FermionFockBra():
from sympy.physics.quantum.fermion import FermionFockBra
assert _test_args(FermionFockBra(1))
def test_sympy__physics__quantum__pauli__SigmaOpBase():
from sympy.physics.quantum.pauli import SigmaOpBase
assert _test_args(SigmaOpBase())
def test_sympy__physics__quantum__pauli__SigmaX():
from sympy.physics.quantum.pauli import SigmaX
assert _test_args(SigmaX())
def test_sympy__physics__quantum__pauli__SigmaY():
from sympy.physics.quantum.pauli import SigmaY
assert _test_args(SigmaY())
def test_sympy__physics__quantum__pauli__SigmaZ():
from sympy.physics.quantum.pauli import SigmaZ
assert _test_args(SigmaZ())
def test_sympy__physics__quantum__pauli__SigmaMinus():
from sympy.physics.quantum.pauli import SigmaMinus
assert _test_args(SigmaMinus())
def test_sympy__physics__quantum__pauli__SigmaPlus():
from sympy.physics.quantum.pauli import SigmaPlus
assert _test_args(SigmaPlus())
def test_sympy__physics__quantum__pauli__SigmaZKet():
from sympy.physics.quantum.pauli import SigmaZKet
assert _test_args(SigmaZKet(0))
def test_sympy__physics__quantum__pauli__SigmaZBra():
from sympy.physics.quantum.pauli import SigmaZBra
assert _test_args(SigmaZBra(0))
def test_sympy__physics__quantum__piab__PIABHamiltonian():
from sympy.physics.quantum.piab import PIABHamiltonian
assert _test_args(PIABHamiltonian('P'))
def test_sympy__physics__quantum__piab__PIABKet():
from sympy.physics.quantum.piab import PIABKet
assert _test_args(PIABKet('K'))
def test_sympy__physics__quantum__qexpr__QExpr():
from sympy.physics.quantum.qexpr import QExpr
assert _test_args(QExpr(0))
def test_sympy__physics__quantum__qft__Fourier():
from sympy.physics.quantum.qft import Fourier
assert _test_args(Fourier(0, 1))
def test_sympy__physics__quantum__qft__IQFT():
from sympy.physics.quantum.qft import IQFT
assert _test_args(IQFT(0, 1))
def test_sympy__physics__quantum__qft__QFT():
from sympy.physics.quantum.qft import QFT
assert _test_args(QFT(0, 1))
def test_sympy__physics__quantum__qft__RkGate():
from sympy.physics.quantum.qft import RkGate
assert _test_args(RkGate(0, 1))
def test_sympy__physics__quantum__qubit__IntQubit():
from sympy.physics.quantum.qubit import IntQubit
assert _test_args(IntQubit(0))
def test_sympy__physics__quantum__qubit__IntQubitBra():
from sympy.physics.quantum.qubit import IntQubitBra
assert _test_args(IntQubitBra(0))
def test_sympy__physics__quantum__qubit__IntQubitState():
from sympy.physics.quantum.qubit import IntQubitState, QubitState
assert _test_args(IntQubitState(QubitState(0, 1)))
def test_sympy__physics__quantum__qubit__Qubit():
from sympy.physics.quantum.qubit import Qubit
assert _test_args(Qubit(0, 0, 0))
def test_sympy__physics__quantum__qubit__QubitBra():
from sympy.physics.quantum.qubit import QubitBra
assert _test_args(QubitBra('1', 0))
def test_sympy__physics__quantum__qubit__QubitState():
from sympy.physics.quantum.qubit import QubitState
assert _test_args(QubitState(0, 1))
def test_sympy__physics__quantum__density__Density():
from sympy.physics.quantum.density import Density
from sympy.physics.quantum.state import Ket
assert _test_args(Density([Ket(0), 0.5], [Ket(1), 0.5]))
@SKIP("TODO: sympy.physics.quantum.shor: Cmod Not Implemented")
def test_sympy__physics__quantum__shor__CMod():
from sympy.physics.quantum.shor import CMod
assert _test_args(CMod())
def test_sympy__physics__quantum__spin__CoupledSpinState():
from sympy.physics.quantum.spin import CoupledSpinState
assert _test_args(CoupledSpinState(1, 0, (1, 1)))
assert _test_args(CoupledSpinState(1, 0, (1, S(1)/2, S(1)/2)))
assert _test_args(CoupledSpinState(
1, 0, (1, S(1)/2, S(1)/2), ((2, 3, S(1)/2), (1, 2, 1)) ))
j, m, j1, j2, j3, j12, x = symbols('j m j1:4 j12 x')
assert CoupledSpinState(
j, m, (j1, j2, j3)).subs(j2, x) == CoupledSpinState(j, m, (j1, x, j3))
assert CoupledSpinState(j, m, (j1, j2, j3), ((1, 3, j12), (1, 2, j)) ).subs(j12, x) == \
CoupledSpinState(j, m, (j1, j2, j3), ((1, 3, x), (1, 2, j)) )
def test_sympy__physics__quantum__spin__J2Op():
from sympy.physics.quantum.spin import J2Op
assert _test_args(J2Op('J'))
def test_sympy__physics__quantum__spin__JminusOp():
from sympy.physics.quantum.spin import JminusOp
assert _test_args(JminusOp('J'))
def test_sympy__physics__quantum__spin__JplusOp():
from sympy.physics.quantum.spin import JplusOp
assert _test_args(JplusOp('J'))
def test_sympy__physics__quantum__spin__JxBra():
from sympy.physics.quantum.spin import JxBra
assert _test_args(JxBra(1, 0))
def test_sympy__physics__quantum__spin__JxBraCoupled():
from sympy.physics.quantum.spin import JxBraCoupled
assert _test_args(JxBraCoupled(1, 0, (1, 1)))
def test_sympy__physics__quantum__spin__JxKet():
from sympy.physics.quantum.spin import JxKet
assert _test_args(JxKet(1, 0))
def test_sympy__physics__quantum__spin__JxKetCoupled():
from sympy.physics.quantum.spin import JxKetCoupled
assert _test_args(JxKetCoupled(1, 0, (1, 1)))
def test_sympy__physics__quantum__spin__JxOp():
from sympy.physics.quantum.spin import JxOp
assert _test_args(JxOp('J'))
def test_sympy__physics__quantum__spin__JyBra():
from sympy.physics.quantum.spin import JyBra
assert _test_args(JyBra(1, 0))
def test_sympy__physics__quantum__spin__JyBraCoupled():
from sympy.physics.quantum.spin import JyBraCoupled
assert _test_args(JyBraCoupled(1, 0, (1, 1)))
def test_sympy__physics__quantum__spin__JyKet():
from sympy.physics.quantum.spin import JyKet
assert _test_args(JyKet(1, 0))
def test_sympy__physics__quantum__spin__JyKetCoupled():
from sympy.physics.quantum.spin import JyKetCoupled
assert _test_args(JyKetCoupled(1, 0, (1, 1)))
def test_sympy__physics__quantum__spin__JyOp():
from sympy.physics.quantum.spin import JyOp
assert _test_args(JyOp('J'))
def test_sympy__physics__quantum__spin__JzBra():
from sympy.physics.quantum.spin import JzBra
assert _test_args(JzBra(1, 0))
def test_sympy__physics__quantum__spin__JzBraCoupled():
from sympy.physics.quantum.spin import JzBraCoupled
assert _test_args(JzBraCoupled(1, 0, (1, 1)))
def test_sympy__physics__quantum__spin__JzKet():
from sympy.physics.quantum.spin import JzKet
assert _test_args(JzKet(1, 0))
def test_sympy__physics__quantum__spin__JzKetCoupled():
from sympy.physics.quantum.spin import JzKetCoupled
assert _test_args(JzKetCoupled(1, 0, (1, 1)))
def test_sympy__physics__quantum__spin__JzOp():
from sympy.physics.quantum.spin import JzOp
assert _test_args(JzOp('J'))
def test_sympy__physics__quantum__spin__Rotation():
from sympy.physics.quantum.spin import Rotation
assert _test_args(Rotation(pi, 0, pi/2))
def test_sympy__physics__quantum__spin__SpinState():
from sympy.physics.quantum.spin import SpinState
assert _test_args(SpinState(1, 0))
def test_sympy__physics__quantum__spin__WignerD():
from sympy.physics.quantum.spin import WignerD
assert _test_args(WignerD(0, 1, 2, 3, 4, 5))
def test_sympy__physics__quantum__state__Bra():
from sympy.physics.quantum.state import Bra
assert _test_args(Bra(0))
def test_sympy__physics__quantum__state__BraBase():
from sympy.physics.quantum.state import BraBase
assert _test_args(BraBase(0))
def test_sympy__physics__quantum__state__Ket():
from sympy.physics.quantum.state import Ket
assert _test_args(Ket(0))
def test_sympy__physics__quantum__state__KetBase():
from sympy.physics.quantum.state import KetBase
assert _test_args(KetBase(0))
def test_sympy__physics__quantum__state__State():
from sympy.physics.quantum.state import State
assert _test_args(State(0))
def test_sympy__physics__quantum__state__StateBase():
from sympy.physics.quantum.state import StateBase
assert _test_args(StateBase(0))
def test_sympy__physics__quantum__state__TimeDepBra():
from sympy.physics.quantum.state import TimeDepBra
assert _test_args(TimeDepBra('psi', 't'))
def test_sympy__physics__quantum__state__TimeDepKet():
from sympy.physics.quantum.state import TimeDepKet
assert _test_args(TimeDepKet('psi', 't'))
def test_sympy__physics__quantum__state__TimeDepState():
from sympy.physics.quantum.state import TimeDepState
assert _test_args(TimeDepState('psi', 't'))
def test_sympy__physics__quantum__state__Wavefunction():
from sympy.physics.quantum.state import Wavefunction
from sympy.functions import sin
from sympy import Piecewise
n = 1
L = 1
g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True))
assert _test_args(Wavefunction(g, x))
def test_sympy__physics__quantum__tensorproduct__TensorProduct():
from sympy.physics.quantum.tensorproduct import TensorProduct
assert _test_args(TensorProduct(x, y))
def test_sympy__physics__quantum__identitysearch__GateIdentity():
from sympy.physics.quantum.gate import X
from sympy.physics.quantum.identitysearch import GateIdentity
assert _test_args(GateIdentity(X(0), X(0)))
def test_sympy__physics__quantum__sho1d__SHOOp():
from sympy.physics.quantum.sho1d import SHOOp
assert _test_args(SHOOp('a'))
def test_sympy__physics__quantum__sho1d__RaisingOp():
from sympy.physics.quantum.sho1d import RaisingOp
assert _test_args(RaisingOp('a'))
def test_sympy__physics__quantum__sho1d__LoweringOp():
from sympy.physics.quantum.sho1d import LoweringOp
assert _test_args(LoweringOp('a'))
def test_sympy__physics__quantum__sho1d__NumberOp():
from sympy.physics.quantum.sho1d import NumberOp
assert _test_args(NumberOp('N'))
def test_sympy__physics__quantum__sho1d__Hamiltonian():
from sympy.physics.quantum.sho1d import Hamiltonian
assert _test_args(Hamiltonian('H'))
def test_sympy__physics__quantum__sho1d__SHOState():
from sympy.physics.quantum.sho1d import SHOState
assert _test_args(SHOState(0))
def test_sympy__physics__quantum__sho1d__SHOKet():
from sympy.physics.quantum.sho1d import SHOKet
assert _test_args(SHOKet(0))
def test_sympy__physics__quantum__sho1d__SHOBra():
from sympy.physics.quantum.sho1d import SHOBra
assert _test_args(SHOBra(0))
def test_sympy__physics__secondquant__AnnihilateBoson():
from sympy.physics.secondquant import AnnihilateBoson
assert _test_args(AnnihilateBoson(0))
def test_sympy__physics__secondquant__AnnihilateFermion():
from sympy.physics.secondquant import AnnihilateFermion
assert _test_args(AnnihilateFermion(0))
@SKIP("abstract class")
def test_sympy__physics__secondquant__Annihilator():
pass
def test_sympy__physics__secondquant__AntiSymmetricTensor():
from sympy.physics.secondquant import AntiSymmetricTensor
i, j = symbols('i j', below_fermi=True)
a, b = symbols('a b', above_fermi=True)
assert _test_args(AntiSymmetricTensor('v', (a, i), (b, j)))
def test_sympy__physics__secondquant__BosonState():
from sympy.physics.secondquant import BosonState
assert _test_args(BosonState((0, 1)))
@SKIP("abstract class")
def test_sympy__physics__secondquant__BosonicOperator():
pass
def test_sympy__physics__secondquant__Commutator():
from sympy.physics.secondquant import Commutator
assert _test_args(Commutator(x, y))
def test_sympy__physics__secondquant__CreateBoson():
from sympy.physics.secondquant import CreateBoson
assert _test_args(CreateBoson(0))
def test_sympy__physics__secondquant__CreateFermion():
from sympy.physics.secondquant import CreateFermion
assert _test_args(CreateFermion(0))
@SKIP("abstract class")
def test_sympy__physics__secondquant__Creator():
pass
def test_sympy__physics__secondquant__Dagger():
from sympy.physics.secondquant import Dagger
from sympy import I
assert _test_args(Dagger(2*I))
def test_sympy__physics__secondquant__FermionState():
from sympy.physics.secondquant import FermionState
assert _test_args(FermionState((0, 1)))
def test_sympy__physics__secondquant__FermionicOperator():
from sympy.physics.secondquant import FermionicOperator
assert _test_args(FermionicOperator(0))
def test_sympy__physics__secondquant__FockState():
from sympy.physics.secondquant import FockState
assert _test_args(FockState((0, 1)))
def test_sympy__physics__secondquant__FockStateBosonBra():
from sympy.physics.secondquant import FockStateBosonBra
assert _test_args(FockStateBosonBra((0, 1)))
def test_sympy__physics__secondquant__FockStateBosonKet():
from sympy.physics.secondquant import FockStateBosonKet
assert _test_args(FockStateBosonKet((0, 1)))
def test_sympy__physics__secondquant__FockStateBra():
from sympy.physics.secondquant import FockStateBra
assert _test_args(FockStateBra((0, 1)))
def test_sympy__physics__secondquant__FockStateFermionBra():
from sympy.physics.secondquant import FockStateFermionBra
assert _test_args(FockStateFermionBra((0, 1)))
def test_sympy__physics__secondquant__FockStateFermionKet():
from sympy.physics.secondquant import FockStateFermionKet
assert _test_args(FockStateFermionKet((0, 1)))
def test_sympy__physics__secondquant__FockStateKet():
from sympy.physics.secondquant import FockStateKet
assert _test_args(FockStateKet((0, 1)))
def test_sympy__physics__secondquant__InnerProduct():
from sympy.physics.secondquant import InnerProduct
from sympy.physics.secondquant import FockStateKet, FockStateBra
assert _test_args(InnerProduct(FockStateBra((0, 1)), FockStateKet((0, 1))))
def test_sympy__physics__secondquant__NO():
from sympy.physics.secondquant import NO, F, Fd
assert _test_args(NO(Fd(x)*F(y)))
def test_sympy__physics__secondquant__PermutationOperator():
from sympy.physics.secondquant import PermutationOperator
assert _test_args(PermutationOperator(0, 1))
def test_sympy__physics__secondquant__SqOperator():
from sympy.physics.secondquant import SqOperator
assert _test_args(SqOperator(0))
def test_sympy__physics__secondquant__TensorSymbol():
from sympy.physics.secondquant import TensorSymbol
assert _test_args(TensorSymbol(x))
def test_sympy__physics__units__dimensions__Dimension():
from sympy.physics.units.dimensions import Dimension
assert _test_args(Dimension("length", "L"))
def test_sympy__physics__units__dimensions__DimensionSystem():
from sympy.physics.units.dimensions import DimensionSystem
from sympy.physics.units.dimensions import length, time, velocity
assert _test_args(DimensionSystem((length, time), (velocity,)))
def test_sympy__physics__units__quantities__Quantity():
from sympy.physics.units.quantities import Quantity
from sympy.physics.units import length
assert _test_args(Quantity("dam"))
def test_sympy__physics__units__prefixes__Prefix():
from sympy.physics.units.prefixes import Prefix
assert _test_args(Prefix('kilo', 'k', 3))
def test_sympy__core__numbers__AlgebraicNumber():
from sympy.core.numbers import AlgebraicNumber
assert _test_args(AlgebraicNumber(sqrt(2), [1, 2, 3]))
def test_sympy__polys__polytools__GroebnerBasis():
from sympy.polys.polytools import GroebnerBasis
assert _test_args(GroebnerBasis([x, y, z], x, y, z))
def test_sympy__polys__polytools__Poly():
from sympy.polys.polytools import Poly
assert _test_args(Poly(2, x, y))
def test_sympy__polys__polytools__PurePoly():
from sympy.polys.polytools import PurePoly
assert _test_args(PurePoly(2, x, y))
@SKIP('abstract class')
def test_sympy__polys__rootoftools__RootOf():
pass
def test_sympy__polys__rootoftools__ComplexRootOf():
from sympy.polys.rootoftools import ComplexRootOf
assert _test_args(ComplexRootOf(x**3 + x + 1, 0))
def test_sympy__polys__rootoftools__RootSum():
from sympy.polys.rootoftools import RootSum
assert _test_args(RootSum(x**3 + x + 1, sin))
def test_sympy__series__limits__Limit():
from sympy.series.limits import Limit
assert _test_args(Limit(x, x, 0, dir='-'))
def test_sympy__series__order__Order():
from sympy.series.order import Order
assert _test_args(Order(1, x, y))
@SKIP('Abstract Class')
def test_sympy__series__sequences__SeqBase():
pass
def test_sympy__series__sequences__EmptySequence():
from sympy.series.sequences import EmptySequence
assert _test_args(EmptySequence())
@SKIP('Abstract Class')
def test_sympy__series__sequences__SeqExpr():
pass
def test_sympy__series__sequences__SeqPer():
from sympy.series.sequences import SeqPer
assert _test_args(SeqPer((1, 2, 3), (0, 10)))
def test_sympy__series__sequences__SeqFormula():
from sympy.series.sequences import SeqFormula
assert _test_args(SeqFormula(x**2, (0, 10)))
def test_sympy__series__sequences__SeqExprOp():
from sympy.series.sequences import SeqExprOp, sequence
s1 = sequence((1, 2, 3))
s2 = sequence(x**2)
assert _test_args(SeqExprOp(s1, s2))
def test_sympy__series__sequences__SeqAdd():
from sympy.series.sequences import SeqAdd, sequence
s1 = sequence((1, 2, 3))
s2 = sequence(x**2)
assert _test_args(SeqAdd(s1, s2))
def test_sympy__series__sequences__SeqMul():
from sympy.series.sequences import SeqMul, sequence
s1 = sequence((1, 2, 3))
s2 = sequence(x**2)
assert _test_args(SeqMul(s1, s2))
@SKIP('Abstract Class')
def test_sympy__series__series_class__SeriesBase():
pass
def test_sympy__series__fourier__FourierSeries():
from sympy.series.fourier import fourier_series
assert _test_args(fourier_series(x, (x, -pi, pi)))
def test_sympy__series__formal__FormalPowerSeries():
from sympy.series.formal import fps
assert _test_args(fps(log(1 + x), x))
def test_sympy__simplify__hyperexpand__Hyper_Function():
from sympy.simplify.hyperexpand import Hyper_Function
assert _test_args(Hyper_Function([2], [1]))
def test_sympy__simplify__hyperexpand__G_Function():
from sympy.simplify.hyperexpand import G_Function
assert _test_args(G_Function([2], [1], [], []))
@SKIP("abstract class")
def test_sympy__tensor__array__ndim_array__ImmutableNDimArray():
pass
def test_sympy__tensor__array__dense_ndim_array__ImmutableDenseNDimArray():
from sympy.tensor.array.dense_ndim_array import ImmutableDenseNDimArray
densarr = ImmutableDenseNDimArray(range(10, 34), (2, 3, 4))
assert _test_args(densarr)
def test_sympy__tensor__array__sparse_ndim_array__ImmutableSparseNDimArray():
from sympy.tensor.array.sparse_ndim_array import ImmutableSparseNDimArray
sparr = ImmutableSparseNDimArray(range(10, 34), (2, 3, 4))
assert _test_args(sparr)
def test_sympy__tensor__functions__TensorProduct():
from sympy.tensor.functions import TensorProduct
tp = TensorProduct(3, 4, evaluate=False)
assert _test_args(tp)
def test_sympy__tensor__indexed__Idx():
from sympy.tensor.indexed import Idx
assert _test_args(Idx('test'))
assert _test_args(Idx(1, (0, 10)))
def test_sympy__tensor__indexed__Indexed():
from sympy.tensor.indexed import Indexed, Idx
assert _test_args(Indexed('A', Idx('i'), Idx('j')))
def test_sympy__tensor__indexed__IndexedBase():
from sympy.tensor.indexed import IndexedBase
assert _test_args(IndexedBase('A', shape=(x, y)))
assert _test_args(IndexedBase('A', 1))
assert _test_args(IndexedBase('A')[0, 1])
def test_sympy__tensor__tensor__TensorIndexType():
from sympy.tensor.tensor import TensorIndexType
assert _test_args(TensorIndexType('Lorentz', metric=False))
def test_sympy__tensor__tensor__TensorSymmetry():
from sympy.tensor.tensor import TensorSymmetry, get_symmetric_group_sgs
assert _test_args(TensorSymmetry(get_symmetric_group_sgs(2)))
def test_sympy__tensor__tensor__TensorType():
from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, get_symmetric_group_sgs, TensorType
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
sym = TensorSymmetry(get_symmetric_group_sgs(1))
assert _test_args(TensorType([Lorentz], sym))
def test_sympy__tensor__tensor__TensorHead():
from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, TensorHead
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
sym = TensorSymmetry(get_symmetric_group_sgs(1))
S1 = TensorType([Lorentz], sym)
assert _test_args(TensorHead('p', S1, 0))
def test_sympy__tensor__tensor__TensorIndex():
from sympy.tensor.tensor import TensorIndexType, TensorIndex
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
assert _test_args(TensorIndex('i', Lorentz))
@SKIP("abstract class")
def test_sympy__tensor__tensor__TensExpr():
pass
def test_sympy__tensor__tensor__TensAdd():
from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, tensor_indices, TensAdd
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
a, b = tensor_indices('a,b', Lorentz)
sym = TensorSymmetry(get_symmetric_group_sgs(1))
S1 = TensorType([Lorentz], sym)
p, q = S1('p,q')
t1 = p(a)
t2 = q(a)
assert _test_args(TensAdd(t1, t2))
def test_sympy__tensor__tensor__Tensor():
from sympy.core import S
from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, tensor_indices, TensMul, TIDS
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
a, b = tensor_indices('a,b', Lorentz)
sym = TensorSymmetry(get_symmetric_group_sgs(1))
S1 = TensorType([Lorentz], sym)
p = S1('p')
assert _test_args(p(a))
def test_sympy__tensor__tensor__TensMul():
from sympy.core import S
from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, tensor_indices, TensMul, TIDS
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
a, b = tensor_indices('a,b', Lorentz)
sym = TensorSymmetry(get_symmetric_group_sgs(1))
S1 = TensorType([Lorentz], sym)
p = S1('p')
q = S1('q')
assert _test_args(3*p(a)*q(b))
def test_as_coeff_add():
assert (7, (3*x, 4*x**2)) == (7 + 3*x + 4*x**2).as_coeff_add()
def test_sympy__geometry__curve__Curve():
from sympy.geometry.curve import Curve
assert _test_args(Curve((x, 1), (x, 0, 1)))
def test_sympy__geometry__point__Point():
from sympy.geometry.point import Point
assert _test_args(Point(0, 1))
def test_sympy__geometry__point__Point2D():
from sympy.geometry.point import Point2D
assert _test_args(Point2D(0, 1))
def test_sympy__geometry__point__Point3D():
from sympy.geometry.point import Point3D
assert _test_args(Point3D(0, 1, 2))
def test_sympy__geometry__ellipse__Ellipse():
from sympy.geometry.ellipse import Ellipse
assert _test_args(Ellipse((0, 1), 2, 3))
def test_sympy__geometry__ellipse__Circle():
from sympy.geometry.ellipse import Circle
assert _test_args(Circle((0, 1), 2))
def test_sympy__geometry__parabola__Parabola():
from sympy.geometry.parabola import Parabola
from sympy.geometry.line import Line
assert _test_args(Parabola((0, 0), Line((2, 3), (4, 3))))
@SKIP("abstract class")
def test_sympy__geometry__line__LinearEntity():
pass
def test_sympy__geometry__line__Line():
from sympy.geometry.line import Line
assert _test_args(Line((0, 1), (2, 3)))
def test_sympy__geometry__line__Ray():
from sympy.geometry.line import Ray
assert _test_args(Ray((0, 1), (2, 3)))
def test_sympy__geometry__line__Segment():
from sympy.geometry.line import Segment
assert _test_args(Segment((0, 1), (2, 3)))
@SKIP("abstract class")
def test_sympy__geometry__line__LinearEntity2D():
pass
def test_sympy__geometry__line__Line2D():
from sympy.geometry.line import Line2D
assert _test_args(Line2D((0, 1), (2, 3)))
def test_sympy__geometry__line__Ray2D():
from sympy.geometry.line import Ray2D
assert _test_args(Ray2D((0, 1), (2, 3)))
def test_sympy__geometry__line__Segment2D():
from sympy.geometry.line import Segment2D
assert _test_args(Segment2D((0, 1), (2, 3)))
@SKIP("abstract class")
def test_sympy__geometry__line__LinearEntity3D():
pass
def test_sympy__geometry__line__Line3D():
from sympy.geometry.line import Line3D
assert _test_args(Line3D((0, 1, 1), (2, 3, 4)))
def test_sympy__geometry__line__Segment3D():
from sympy.geometry.line import Segment3D
assert _test_args(Segment3D((0, 1, 1), (2, 3, 4)))
def test_sympy__geometry__line__Ray3D():
from sympy.geometry.line import Ray3D
assert _test_args(Ray3D((0, 1, 1), (2, 3, 4)))
def test_sympy__geometry__plane__Plane():
from sympy.geometry.plane import Plane
assert _test_args(Plane((1, 1, 1), (-3, 4, -2), (1, 2, 3)))
def test_sympy__geometry__polygon__Polygon():
from sympy.geometry.polygon import Polygon
assert _test_args(Polygon((0, 1), (2, 3), (4, 5), (6, 7)))
def test_sympy__geometry__polygon__RegularPolygon():
from sympy.geometry.polygon import RegularPolygon
assert _test_args(RegularPolygon((0, 1), 2, 3, 4))
def test_sympy__geometry__polygon__Triangle():
from sympy.geometry.polygon import Triangle
assert _test_args(Triangle((0, 1), (2, 3), (4, 5)))
def test_sympy__geometry__entity__GeometryEntity():
from sympy.geometry.entity import GeometryEntity
from sympy.geometry.point import Point
assert _test_args(GeometryEntity(Point(1, 0), 1, [1, 2]))
@SKIP("abstract class")
def test_sympy__geometry__entity__GeometrySet():
pass
def test_sympy__diffgeom__diffgeom__Manifold():
from sympy.diffgeom import Manifold
assert _test_args(Manifold('name', 3))
def test_sympy__diffgeom__diffgeom__Patch():
from sympy.diffgeom import Manifold, Patch
assert _test_args(Patch('name', Manifold('name', 3)))
def test_sympy__diffgeom__diffgeom__CoordSystem():
from sympy.diffgeom import Manifold, Patch, CoordSystem
assert _test_args(CoordSystem('name', Patch('name', Manifold('name', 3))))
@XFAIL
def test_sympy__diffgeom__diffgeom__Point():
from sympy.diffgeom import Manifold, Patch, CoordSystem, Point
assert _test_args(Point(
CoordSystem('name', Patch('name', Manifold('name', 3))), [x, y]))
def test_sympy__diffgeom__diffgeom__BaseScalarField():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField
cs = CoordSystem('name', Patch('name', Manifold('name', 3)))
assert _test_args(BaseScalarField(cs, 0))
def test_sympy__diffgeom__diffgeom__BaseVectorField():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseVectorField
cs = CoordSystem('name', Patch('name', Manifold('name', 3)))
assert _test_args(BaseVectorField(cs, 0))
def test_sympy__diffgeom__diffgeom__Differential():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential
cs = CoordSystem('name', Patch('name', Manifold('name', 3)))
assert _test_args(Differential(BaseScalarField(cs, 0)))
def test_sympy__diffgeom__diffgeom__Commutator():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseVectorField, Commutator
cs = CoordSystem('name', Patch('name', Manifold('name', 3)))
cs1 = CoordSystem('name1', Patch('name', Manifold('name', 3)))
v = BaseVectorField(cs, 0)
v1 = BaseVectorField(cs1, 0)
assert _test_args(Commutator(v, v1))
def test_sympy__diffgeom__diffgeom__TensorProduct():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential, TensorProduct
cs = CoordSystem('name', Patch('name', Manifold('name', 3)))
d = Differential(BaseScalarField(cs, 0))
assert _test_args(TensorProduct(d, d))
def test_sympy__diffgeom__diffgeom__WedgeProduct():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential, WedgeProduct
cs = CoordSystem('name', Patch('name', Manifold('name', 3)))
d = Differential(BaseScalarField(cs, 0))
d1 = Differential(BaseScalarField(cs, 1))
assert _test_args(WedgeProduct(d, d1))
def test_sympy__diffgeom__diffgeom__LieDerivative():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential, BaseVectorField, LieDerivative
cs = CoordSystem('name', Patch('name', Manifold('name', 3)))
d = Differential(BaseScalarField(cs, 0))
v = BaseVectorField(cs, 0)
assert _test_args(LieDerivative(v, d))
@XFAIL
def test_sympy__diffgeom__diffgeom__BaseCovarDerivativeOp():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseCovarDerivativeOp
cs = CoordSystem('name', Patch('name', Manifold('name', 3)))
assert _test_args(BaseCovarDerivativeOp(cs, 0, [[[0, ]*3, ]*3, ]*3))
def test_sympy__diffgeom__diffgeom__CovarDerivativeOp():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseVectorField, CovarDerivativeOp
cs = CoordSystem('name', Patch('name', Manifold('name', 3)))
v = BaseVectorField(cs, 0)
_test_args(CovarDerivativeOp(v, [[[0, ]*3, ]*3, ]*3))
def test_sympy__categories__baseclasses__Class():
from sympy.categories.baseclasses import Class
assert _test_args(Class())
def test_sympy__categories__baseclasses__Object():
from sympy.categories import Object
assert _test_args(Object("A"))
@XFAIL
def test_sympy__categories__baseclasses__Morphism():
from sympy.categories import Object, Morphism
assert _test_args(Morphism(Object("A"), Object("B")))
def test_sympy__categories__baseclasses__IdentityMorphism():
from sympy.categories import Object, IdentityMorphism
assert _test_args(IdentityMorphism(Object("A")))
def test_sympy__categories__baseclasses__NamedMorphism():
from sympy.categories import Object, NamedMorphism
assert _test_args(NamedMorphism(Object("A"), Object("B"), "f"))
def test_sympy__categories__baseclasses__CompositeMorphism():
from sympy.categories import Object, NamedMorphism, CompositeMorphism
A = Object("A")
B = Object("B")
C = Object("C")
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, C, "g")
assert _test_args(CompositeMorphism(f, g))
def test_sympy__categories__baseclasses__Diagram():
from sympy.categories import Object, NamedMorphism, Diagram
A = Object("A")
B = Object("B")
C = Object("C")
f = NamedMorphism(A, B, "f")
d = Diagram([f])
assert _test_args(d)
def test_sympy__categories__baseclasses__Category():
from sympy.categories import Object, NamedMorphism, Diagram, Category
A = Object("A")
B = Object("B")
C = Object("C")
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, C, "g")
d1 = Diagram([f, g])
d2 = Diagram([f])
K = Category("K", commutative_diagrams=[d1, d2])
assert _test_args(K)
def test_sympy__ntheory__factor___totient():
from sympy.ntheory.factor_ import totient
k = symbols('k', integer=True)
t = totient(k)
assert _test_args(t)
def test_sympy__ntheory__factor___reduced_totient():
from sympy.ntheory.factor_ import reduced_totient
k = symbols('k', integer=True)
t = reduced_totient(k)
assert _test_args(t)
def test_sympy__ntheory__factor___divisor_sigma():
from sympy.ntheory.factor_ import divisor_sigma
k = symbols('k', integer=True)
n = symbols('n', integer=True)
t = divisor_sigma(n, k)
assert _test_args(t)
def test_sympy__ntheory__factor___udivisor_sigma():
from sympy.ntheory.factor_ import udivisor_sigma
k = symbols('k', integer=True)
n = symbols('n', integer=True)
t = udivisor_sigma(n, k)
assert _test_args(t)
def test_sympy__ntheory__factor___primenu():
from sympy.ntheory.factor_ import primenu
n = symbols('n', integer=True)
t = primenu(n)
assert _test_args(t)
def test_sympy__ntheory__factor___primeomega():
from sympy.ntheory.factor_ import primeomega
n = symbols('n', integer=True)
t = primeomega(n)
assert _test_args(t)
def test_sympy__ntheory__residue_ntheory__mobius():
from sympy.ntheory import mobius
assert _test_args(mobius(2))
def test_sympy__physics__optics__waves__TWave():
from sympy.physics.optics import TWave
A, f, phi = symbols('A, f, phi')
assert _test_args(TWave(A, f, phi))
def test_sympy__physics__optics__gaussopt__BeamParameter():
from sympy.physics.optics import BeamParameter
assert _test_args(BeamParameter(530e-9, 1, w=1e-3))
def test_sympy__physics__optics__medium__Medium():
from sympy.physics.optics import Medium
assert _test_args(Medium('m'))
def test_sympy__codegen__ast__Assignment():
from sympy.codegen.ast import Assignment
assert _test_args(Assignment(x, y))
def test_sympy__codegen__cfunctions__expm1():
from sympy.codegen.cfunctions import expm1
assert _test_args(expm1(x))
def test_sympy__codegen__cfunctions__log1p():
from sympy.codegen.cfunctions import log1p
assert _test_args(log1p(x))
def test_sympy__codegen__cfunctions__exp2():
from sympy.codegen.cfunctions import exp2
assert _test_args(exp2(x))
def test_sympy__codegen__cfunctions__log2():
from sympy.codegen.cfunctions import log2
assert _test_args(log2(x))
def test_sympy__codegen__cfunctions__fma():
from sympy.codegen.cfunctions import fma
assert _test_args(fma(x, y, z))
def test_sympy__codegen__cfunctions__log10():
from sympy.codegen.cfunctions import log10
assert _test_args(log10(x))
def test_sympy__codegen__cfunctions__Sqrt():
from sympy.codegen.cfunctions import Sqrt
assert _test_args(Sqrt(x))
def test_sympy__codegen__cfunctions__Cbrt():
from sympy.codegen.cfunctions import Cbrt
assert _test_args(Cbrt(x))
def test_sympy__codegen__cfunctions__hypot():
from sympy.codegen.cfunctions import hypot
assert _test_args(hypot(x, y))
def test_sympy__codegen__ffunctions__FFunction():
from sympy.codegen.ffunctions import FFunction
assert _test_args(FFunction('f'))
def test_sympy__codegen__ffunctions__F95Function():
from sympy.codegen.ffunctions import F95Function
assert _test_args(F95Function('f'))
def test_sympy__codegen__ffunctions__isign():
from sympy.codegen.ffunctions import isign
assert _test_args(isign(1, x))
def test_sympy__codegen__ffunctions__dsign():
from sympy.codegen.ffunctions import dsign
assert _test_args(dsign(1, x))
def test_sympy__codegen__ffunctions__cmplx():
from sympy.codegen.ffunctions import cmplx
assert _test_args(cmplx(x, y))
def test_sympy__codegen__ffunctions__kind():
from sympy.codegen.ffunctions import kind
assert _test_args(kind(x))
def test_sympy__codegen__ffunctions__merge():
from sympy.codegen.ffunctions import merge
assert _test_args(merge(1, 2, Eq(x, 0)))
def test_sympy__codegen__ffunctions___literal():
from sympy.codegen.ffunctions import _literal
assert _test_args(_literal(1))
def test_sympy__codegen__ffunctions__literal_sp():
from sympy.codegen.ffunctions import literal_sp
assert _test_args(literal_sp(1))
def test_sympy__codegen__ffunctions__literal_dp():
from sympy.codegen.ffunctions import literal_dp
assert _test_args(literal_dp(1))
def test_sympy__vector__coordsysrect__CoordSys3D():
from sympy.vector.coordsysrect import CoordSys3D
assert _test_args(CoordSys3D('C'))
def test_sympy__vector__point__Point():
from sympy.vector.point import Point
assert _test_args(Point('P'))
def test_sympy__vector__basisdependent__BasisDependent():
from sympy.vector.basisdependent import BasisDependent
#These classes have been created to maintain an OOP hierarchy
#for Vectors and Dyadics. Are NOT meant to be initialized
def test_sympy__vector__basisdependent__BasisDependentMul():
from sympy.vector.basisdependent import BasisDependentMul
#These classes have been created to maintain an OOP hierarchy
#for Vectors and Dyadics. Are NOT meant to be initialized
def test_sympy__vector__basisdependent__BasisDependentAdd():
from sympy.vector.basisdependent import BasisDependentAdd
#These classes have been created to maintain an OOP hierarchy
#for Vectors and Dyadics. Are NOT meant to be initialized
def test_sympy__vector__basisdependent__BasisDependentZero():
from sympy.vector.basisdependent import BasisDependentZero
#These classes have been created to maintain an OOP hierarchy
#for Vectors and Dyadics. Are NOT meant to be initialized
def test_sympy__vector__vector__BaseVector():
from sympy.vector.vector import BaseVector
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
assert _test_args(BaseVector(0, C, ' ', ' '))
def test_sympy__vector__vector__VectorAdd():
from sympy.vector.vector import VectorAdd, VectorMul
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
from sympy.abc import a, b, c, x, y, z
v1 = a*C.i + b*C.j + c*C.k
v2 = x*C.i + y*C.j + z*C.k
assert _test_args(VectorAdd(v1, v2))
assert _test_args(VectorMul(x, v1))
def test_sympy__vector__vector__VectorMul():
from sympy.vector.vector import VectorMul
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
from sympy.abc import a
assert _test_args(VectorMul(a, C.i))
def test_sympy__vector__vector__VectorZero():
from sympy.vector.vector import VectorZero
assert _test_args(VectorZero())
def test_sympy__vector__vector__Vector():
from sympy.vector.vector import Vector
#Vector is never to be initialized using args
pass
def test_sympy__vector__vector__Cross():
from sympy.vector.vector import Cross
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
_test_args(Cross(C.i, C.j))
def test_sympy__vector__vector__Dot():
from sympy.vector.vector import Dot
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
_test_args(Dot(C.i, C.j))
def test_sympy__vector__dyadic__Dyadic():
from sympy.vector.dyadic import Dyadic
#Dyadic is never to be initialized using args
pass
def test_sympy__vector__dyadic__BaseDyadic():
from sympy.vector.dyadic import BaseDyadic
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
assert _test_args(BaseDyadic(C.i, C.j))
def test_sympy__vector__dyadic__DyadicMul():
from sympy.vector.dyadic import BaseDyadic, DyadicMul
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
assert _test_args(DyadicMul(3, BaseDyadic(C.i, C.j)))
def test_sympy__vector__dyadic__DyadicAdd():
from sympy.vector.dyadic import BaseDyadic, DyadicAdd
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
assert _test_args(2 * DyadicAdd(BaseDyadic(C.i, C.i),
BaseDyadic(C.i, C.j)))
def test_sympy__vector__dyadic__DyadicZero():
from sympy.vector.dyadic import DyadicZero
assert _test_args(DyadicZero())
def test_sympy__vector__deloperator__Del():
from sympy.vector.deloperator import Del
assert _test_args(Del())
def test_sympy__vector__operators__Curl():
from sympy.vector.operators import Curl
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
assert _test_args(Curl(C.i))
def test_sympy__vector__operators__Divergence():
from sympy.vector.operators import Divergence
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
assert _test_args(Divergence(C.i))
def test_sympy__vector__operators__Gradient():
from sympy.vector.operators import Gradient
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
assert _test_args(Gradient(C.x))
def test_sympy__vector__orienters__Orienter():
from sympy.vector.orienters import Orienter
#Not to be initialized
def test_sympy__vector__orienters__ThreeAngleOrienter():
from sympy.vector.orienters import ThreeAngleOrienter
#Not to be initialized
def test_sympy__vector__orienters__AxisOrienter():
from sympy.vector.orienters import AxisOrienter
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
assert _test_args(AxisOrienter(x, C.i))
def test_sympy__vector__orienters__BodyOrienter():
from sympy.vector.orienters import BodyOrienter
assert _test_args(BodyOrienter(x, y, z, '123'))
def test_sympy__vector__orienters__SpaceOrienter():
from sympy.vector.orienters import SpaceOrienter
assert _test_args(SpaceOrienter(x, y, z, '123'))
def test_sympy__vector__orienters__QuaternionOrienter():
from sympy.vector.orienters import QuaternionOrienter
a, b, c, d = symbols('a b c d')
assert _test_args(QuaternionOrienter(a, b, c, d))
def test_sympy__vector__scalar__BaseScalar():
from sympy.vector.scalar import BaseScalar
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
assert _test_args(BaseScalar(0, C, ' ', ' '))
def test_sympy__physics__wigner__Wigner3j():
from sympy.physics.wigner import Wigner3j
assert _test_args(Wigner3j(0, 0, 0, 0, 0, 0))
def test_sympy__integrals__rubi__symbol__matchpyWC():
from sympy.integrals.rubi.symbol import matchpyWC
assert _test_args(matchpyWC(1, True, 'a'))
| 31.652268 | 135 | 0.777854 |
# be instantiated, add it here anyway with @SKIP("abstract class) (see
# e.g. Function).
import os
import re
import warnings
import io
from sympy import (Basic, S, symbols, sqrt, sin, oo, Interval, exp, Lambda, pi,
Eq, log)
from sympy.core.compatibility import range
from sympy.utilities.pytest import XFAIL, SKIP
from sympy.utilities.exceptions import SymPyDeprecationWarning
x, y, z = symbols('x,y,z')
def test_all_classes_are_tested():
this = os.path.split(__file__)[0]
path = os.path.join(this, os.pardir, os.pardir)
sympy_path = os.path.abspath(path)
prefix = os.path.split(sympy_path)[0] + os.sep
re_cls = re.compile(r"^class ([A-Za-z][A-Za-z0-9_]*)\s*\(", re.MULTILINE)
modules = {}
for root, dirs, files in os.walk(sympy_path):
module = root.replace(prefix, "").replace(os.sep, ".")
for file in files:
if file.startswith(("_", "test_", "bench_")):
continue
if not file.endswith(".py"):
continue
with io.open(os.path.join(root, file), "r", encoding='utf-8') as f:
text = f.read()
submodule = module + '.' + file[:-3]
names = re_cls.findall(text)
if not names:
continue
try:
mod = __import__(submodule, fromlist=names)
except ImportError:
continue
def is_Basic(name):
cls = getattr(mod, name)
if hasattr(cls, '_sympy_deprecated_func'):
cls = cls._sympy_deprecated_func
return issubclass(cls, Basic)
names = list(filter(is_Basic, names))
if names:
modules[submodule] = names
ns = globals()
failed = []
for module, names in modules.items():
mod = module.replace('.', '__')
for name in names:
test = 'test_' + mod + '__' + name
if test not in ns:
failed.append(module + '.' + name)
# reset all SymPyDeprecationWarning into errors
warnings.simplefilter("error", category=SymPyDeprecationWarning)
assert not failed, "Missing classes: %s. Please add tests for these to sympy/core/tests/test_args.py." % ", ".join(failed)
def _test_args(obj):
return all(isinstance(arg, Basic) for arg in obj.args)
def test_sympy__assumptions__assume__AppliedPredicate():
from sympy.assumptions.assume import AppliedPredicate, Predicate
from sympy import Q
assert _test_args(AppliedPredicate(Predicate("test"), 2))
assert _test_args(Q.is_true(True))
def test_sympy__assumptions__assume__Predicate():
from sympy.assumptions.assume import Predicate
assert _test_args(Predicate("test"))
def test_sympy__assumptions__sathandlers__UnevaluatedOnFree():
from sympy.assumptions.sathandlers import UnevaluatedOnFree
from sympy import Q
assert _test_args(UnevaluatedOnFree(Q.positive))
assert _test_args(UnevaluatedOnFree(Q.positive(x)))
assert _test_args(UnevaluatedOnFree(Q.positive(x*y)))
def test_sympy__assumptions__sathandlers__AllArgs():
from sympy.assumptions.sathandlers import AllArgs
from sympy import Q
assert _test_args(AllArgs(Q.positive))
assert _test_args(AllArgs(Q.positive(x)))
assert _test_args(AllArgs(Q.positive(x*y)))
def test_sympy__assumptions__sathandlers__AnyArgs():
from sympy.assumptions.sathandlers import AnyArgs
from sympy import Q
assert _test_args(AnyArgs(Q.positive))
assert _test_args(AnyArgs(Q.positive(x)))
assert _test_args(AnyArgs(Q.positive(x*y)))
def test_sympy__assumptions__sathandlers__ExactlyOneArg():
from sympy.assumptions.sathandlers import ExactlyOneArg
from sympy import Q
assert _test_args(ExactlyOneArg(Q.positive))
assert _test_args(ExactlyOneArg(Q.positive(x)))
assert _test_args(ExactlyOneArg(Q.positive(x*y)))
def test_sympy__assumptions__sathandlers__CheckOldAssump():
from sympy.assumptions.sathandlers import CheckOldAssump
from sympy import Q
assert _test_args(CheckOldAssump(Q.positive))
assert _test_args(CheckOldAssump(Q.positive(x)))
assert _test_args(CheckOldAssump(Q.positive(x*y)))
def test_sympy__assumptions__sathandlers__CheckIsPrime():
from sympy.assumptions.sathandlers import CheckIsPrime
from sympy import Q
# Input must be a number
assert _test_args(CheckIsPrime(Q.positive))
assert _test_args(CheckIsPrime(Q.positive(5)))
@SKIP("abstract Class")
def test_sympy__codegen__ast__AugmentedAssignment():
from sympy.codegen.ast import AugmentedAssignment
assert _test_args(AugmentedAssignment(x, 1))
def test_sympy__codegen__ast__AddAugmentedAssignment():
from sympy.codegen.ast import AddAugmentedAssignment
assert _test_args(AddAugmentedAssignment(x, 1))
def test_sympy__codegen__ast__SubAugmentedAssignment():
from sympy.codegen.ast import SubAugmentedAssignment
assert _test_args(SubAugmentedAssignment(x, 1))
def test_sympy__codegen__ast__MulAugmentedAssignment():
from sympy.codegen.ast import MulAugmentedAssignment
assert _test_args(MulAugmentedAssignment(x, 1))
def test_sympy__codegen__ast__DivAugmentedAssignment():
from sympy.codegen.ast import DivAugmentedAssignment
assert _test_args(DivAugmentedAssignment(x, 1))
def test_sympy__codegen__ast__ModAugmentedAssignment():
from sympy.codegen.ast import ModAugmentedAssignment
assert _test_args(ModAugmentedAssignment(x, 1))
def test_sympy__codegen__ast__CodeBlock():
from sympy.codegen.ast import CodeBlock, Assignment
assert _test_args(CodeBlock(Assignment(x, 1), Assignment(y, 2)))
def test_sympy__codegen__ast__For():
from sympy.codegen.ast import For, CodeBlock, AddAugmentedAssignment
from sympy import Range
assert _test_args(For(x, Range(10), CodeBlock(AddAugmentedAssignment(y, 1))))
def test_sympy__codegen__ast__Token():
from sympy.codegen.ast import Token
assert _test_args(Token())
def test_sympy__codegen__ast__Type():
from sympy.codegen.ast import Type
assert _test_args(Type('float128'))
def test_sympy__codegen__ast__IntBaseType():
from sympy.codegen.ast import IntBaseType
assert _test_args(IntBaseType('bigint'))
def test_sympy__codegen__ast___SizedIntType():
from sympy.codegen.ast import _SizedIntType
assert _test_args(_SizedIntType('int128', 128))
def test_sympy__codegen__ast__SignedIntType():
from sympy.codegen.ast import SignedIntType
assert _test_args(SignedIntType('int128_with_sign', 128))
def test_sympy__codegen__ast__UnsignedIntType():
from sympy.codegen.ast import UnsignedIntType
assert _test_args(UnsignedIntType('unt128', 128))
def test_sympy__codegen__ast__FloatType():
from sympy.codegen.ast import FloatType
assert _test_args(FloatType('float242', 242, nmant=142, nexp=99))
def test_sympy__codegen__ast__ComplexType():
from sympy.codegen.ast import ComplexType
assert _test_args(ComplexType('complex42', 42, nmant=15, nexp=5))
def test_sympy__codegen__ast__Attribute():
from sympy.codegen.ast import Attribute
assert _test_args(Attribute('noexcept'))
def test_sympy__codegen__ast__Variable():
from sympy.codegen.ast import Variable, Type, value_const
assert _test_args(Variable(x))
assert _test_args(Variable(y, {value_const}, Type('float32')))
assert _test_args(Variable(z, type_=Type('float64')))
def test_sympy__codegen__ast__Pointer():
from sympy.codegen.ast import Pointer, Type, pointer_const
assert _test_args(Pointer(x))
assert _test_args(Pointer(y, type_=Type('float32')))
assert _test_args(Pointer(z, {pointer_const}, Type('float64')))
def test_sympy__codegen__ast__Declaration():
from sympy.codegen.ast import Declaration, Variable, Type
vx = Variable(x, type_=Type('float'))
assert _test_args(Declaration(vx))
assert _test_args(Declaration(vx, 3.0))
@XFAIL
def test_sympy__combinatorics__graycode__GrayCode():
from sympy.combinatorics.graycode import GrayCode
# an integer is given and returned from GrayCode as the arg
assert _test_args(GrayCode(3, start='100'))
assert _test_args(GrayCode(3, rank=1))
def test_sympy__combinatorics__subsets__Subset():
from sympy.combinatorics.subsets import Subset
assert _test_args(Subset([0, 1], [0, 1, 2, 3]))
assert _test_args(Subset(['c', 'd'], ['a', 'b', 'c', 'd']))
@XFAIL
def test_sympy__combinatorics__permutations__Permutation():
from sympy.combinatorics.permutations import Permutation
assert _test_args(Permutation([0, 1, 2, 3]))
def test_sympy__combinatorics__perm_groups__PermutationGroup():
from sympy.combinatorics.permutations import Permutation
from sympy.combinatorics.perm_groups import PermutationGroup
assert _test_args(PermutationGroup([Permutation([0, 1])]))
def test_sympy__combinatorics__polyhedron__Polyhedron():
from sympy.combinatorics.permutations import Permutation
from sympy.combinatorics.polyhedron import Polyhedron
from sympy.abc import w, x, y, z
pgroup = [Permutation([[0, 1, 2], [3]]),
Permutation([[0, 1, 3], [2]]),
Permutation([[0, 2, 3], [1]]),
Permutation([[1, 2, 3], [0]]),
Permutation([[0, 1], [2, 3]]),
Permutation([[0, 2], [1, 3]]),
Permutation([[0, 3], [1, 2]]),
Permutation([[0, 1, 2, 3]])]
corners = [w, x, y, z]
faces = [(w, x, y), (w, y, z), (w, z, x), (x, y, z)]
assert _test_args(Polyhedron(corners, faces, pgroup))
@XFAIL
def test_sympy__combinatorics__prufer__Prufer():
from sympy.combinatorics.prufer import Prufer
assert _test_args(Prufer([[0, 1], [0, 2], [0, 3]], 4))
def test_sympy__combinatorics__partitions__Partition():
from sympy.combinatorics.partitions import Partition
assert _test_args(Partition([1]))
@XFAIL
def test_sympy__combinatorics__partitions__IntegerPartition():
from sympy.combinatorics.partitions import IntegerPartition
assert _test_args(IntegerPartition([1]))
def test_sympy__concrete__products__Product():
from sympy.concrete.products import Product
assert _test_args(Product(x, (x, 0, 10)))
assert _test_args(Product(x, (x, 0, y), (y, 0, 10)))
@SKIP("abstract Class")
def test_sympy__concrete__expr_with_limits__ExprWithLimits():
from sympy.concrete.expr_with_limits import ExprWithLimits
assert _test_args(ExprWithLimits(x, (x, 0, 10)))
assert _test_args(ExprWithLimits(x*y, (x, 0, 10.),(y,1.,3)))
@SKIP("abstract Class")
def test_sympy__concrete__expr_with_limits__AddWithLimits():
from sympy.concrete.expr_with_limits import AddWithLimits
assert _test_args(AddWithLimits(x, (x, 0, 10)))
assert _test_args(AddWithLimits(x*y, (x, 0, 10),(y,1,3)))
@SKIP("abstract Class")
def test_sympy__concrete__expr_with_intlimits__ExprWithIntLimits():
from sympy.concrete.expr_with_intlimits import ExprWithIntLimits
assert _test_args(ExprWithIntLimits(x, (x, 0, 10)))
assert _test_args(ExprWithIntLimits(x*y, (x, 0, 10),(y,1,3)))
def test_sympy__concrete__summations__Sum():
from sympy.concrete.summations import Sum
assert _test_args(Sum(x, (x, 0, 10)))
assert _test_args(Sum(x, (x, 0, y), (y, 0, 10)))
def test_sympy__core__add__Add():
from sympy.core.add import Add
assert _test_args(Add(x, y, z, 2))
def test_sympy__core__basic__Atom():
from sympy.core.basic import Atom
assert _test_args(Atom())
def test_sympy__core__basic__Basic():
from sympy.core.basic import Basic
assert _test_args(Basic())
def test_sympy__core__containers__Dict():
from sympy.core.containers import Dict
assert _test_args(Dict({x: y, y: z}))
def test_sympy__core__containers__Tuple():
from sympy.core.containers import Tuple
assert _test_args(Tuple(x, y, z, 2))
def test_sympy__core__expr__AtomicExpr():
from sympy.core.expr import AtomicExpr
assert _test_args(AtomicExpr())
def test_sympy__core__expr__Expr():
from sympy.core.expr import Expr
assert _test_args(Expr())
def test_sympy__core__expr__UnevaluatedExpr():
from sympy.core.expr import UnevaluatedExpr
from sympy.abc import x
assert _test_args(UnevaluatedExpr(x))
def test_sympy__core__function__Application():
from sympy.core.function import Application
assert _test_args(Application(1, 2, 3))
def test_sympy__core__function__AppliedUndef():
from sympy.core.function import AppliedUndef
assert _test_args(AppliedUndef(1, 2, 3))
def test_sympy__core__function__Derivative():
from sympy.core.function import Derivative
assert _test_args(Derivative(2, x, y, 3))
@SKIP("abstract class")
def test_sympy__core__function__Function():
pass
def test_sympy__core__function__Lambda():
assert _test_args(Lambda((x, y), x + y + z))
def test_sympy__core__function__Subs():
from sympy.core.function import Subs
assert _test_args(Subs(x + y, x, 2))
def test_sympy__core__function__WildFunction():
from sympy.core.function import WildFunction
assert _test_args(WildFunction('f'))
def test_sympy__core__mod__Mod():
from sympy.core.mod import Mod
assert _test_args(Mod(x, 2))
def test_sympy__core__mul__Mul():
from sympy.core.mul import Mul
assert _test_args(Mul(2, x, y, z))
def test_sympy__core__numbers__Catalan():
from sympy.core.numbers import Catalan
assert _test_args(Catalan())
def test_sympy__core__numbers__ComplexInfinity():
from sympy.core.numbers import ComplexInfinity
assert _test_args(ComplexInfinity())
def test_sympy__core__numbers__EulerGamma():
from sympy.core.numbers import EulerGamma
assert _test_args(EulerGamma())
def test_sympy__core__numbers__Exp1():
from sympy.core.numbers import Exp1
assert _test_args(Exp1())
def test_sympy__core__numbers__Float():
from sympy.core.numbers import Float
assert _test_args(Float(1.23))
def test_sympy__core__numbers__GoldenRatio():
from sympy.core.numbers import GoldenRatio
assert _test_args(GoldenRatio())
def test_sympy__core__numbers__Half():
from sympy.core.numbers import Half
assert _test_args(Half())
def test_sympy__core__numbers__ImaginaryUnit():
from sympy.core.numbers import ImaginaryUnit
assert _test_args(ImaginaryUnit())
def test_sympy__core__numbers__Infinity():
from sympy.core.numbers import Infinity
assert _test_args(Infinity())
def test_sympy__core__numbers__Integer():
from sympy.core.numbers import Integer
assert _test_args(Integer(7))
@SKIP("abstract class")
def test_sympy__core__numbers__IntegerConstant():
pass
def test_sympy__core__numbers__NaN():
from sympy.core.numbers import NaN
assert _test_args(NaN())
def test_sympy__core__numbers__NegativeInfinity():
from sympy.core.numbers import NegativeInfinity
assert _test_args(NegativeInfinity())
def test_sympy__core__numbers__NegativeOne():
from sympy.core.numbers import NegativeOne
assert _test_args(NegativeOne())
def test_sympy__core__numbers__Number():
from sympy.core.numbers import Number
assert _test_args(Number(1, 7))
def test_sympy__core__numbers__NumberSymbol():
from sympy.core.numbers import NumberSymbol
assert _test_args(NumberSymbol())
def test_sympy__core__numbers__One():
from sympy.core.numbers import One
assert _test_args(One())
def test_sympy__core__numbers__Pi():
from sympy.core.numbers import Pi
assert _test_args(Pi())
def test_sympy__core__numbers__Rational():
from sympy.core.numbers import Rational
assert _test_args(Rational(1, 7))
@SKIP("abstract class")
def test_sympy__core__numbers__RationalConstant():
pass
def test_sympy__core__numbers__Zero():
from sympy.core.numbers import Zero
assert _test_args(Zero())
@SKIP("abstract class")
def test_sympy__core__operations__AssocOp():
pass
@SKIP("abstract class")
def test_sympy__core__operations__LatticeOp():
pass
def test_sympy__core__power__Pow():
from sympy.core.power import Pow
assert _test_args(Pow(x, 2))
def test_sympy__algebras__quaternion__Quaternion():
from sympy.algebras.quaternion import Quaternion
assert _test_args(Quaternion(x, 1, 2, 3))
def test_sympy__core__relational__Equality():
from sympy.core.relational import Equality
assert _test_args(Equality(x, 2))
def test_sympy__core__relational__GreaterThan():
from sympy.core.relational import GreaterThan
assert _test_args(GreaterThan(x, 2))
def test_sympy__core__relational__LessThan():
from sympy.core.relational import LessThan
assert _test_args(LessThan(x, 2))
@SKIP("abstract class")
def test_sympy__core__relational__Relational():
pass
def test_sympy__core__relational__StrictGreaterThan():
from sympy.core.relational import StrictGreaterThan
assert _test_args(StrictGreaterThan(x, 2))
def test_sympy__core__relational__StrictLessThan():
from sympy.core.relational import StrictLessThan
assert _test_args(StrictLessThan(x, 2))
def test_sympy__core__relational__Unequality():
from sympy.core.relational import Unequality
assert _test_args(Unequality(x, 2))
def test_sympy__sandbox__indexed_integrals__IndexedIntegral():
from sympy.tensor import IndexedBase, Idx
from sympy.sandbox.indexed_integrals import IndexedIntegral
A = IndexedBase('A')
i, j = symbols('i j', integer=True)
a1, a2 = symbols('a1:3', cls=Idx)
assert _test_args(IndexedIntegral(A[a1], A[a2]))
assert _test_args(IndexedIntegral(A[i], A[j]))
def test_sympy__calculus__util__AccumulationBounds():
from sympy.calculus.util import AccumulationBounds
assert _test_args(AccumulationBounds(0, 1))
def test_sympy__sets__ordinals__OmegaPower():
from sympy.sets.ordinals import OmegaPower
assert _test_args(OmegaPower(1, 1))
def test_sympy__sets__ordinals__Ordinal():
from sympy.sets.ordinals import Ordinal, OmegaPower
assert _test_args(Ordinal(OmegaPower(2, 1)))
def test_sympy__sets__ordinals__OrdinalOmega():
from sympy.sets.ordinals import OrdinalOmega
assert _test_args(OrdinalOmega())
def test_sympy__sets__ordinals__OrdinalZero():
from sympy.sets.ordinals import OrdinalZero
assert _test_args(OrdinalZero())
def test_sympy__sets__sets__EmptySet():
from sympy.sets.sets import EmptySet
assert _test_args(EmptySet())
def test_sympy__sets__sets__UniversalSet():
from sympy.sets.sets import UniversalSet
assert _test_args(UniversalSet())
def test_sympy__sets__sets__FiniteSet():
from sympy.sets.sets import FiniteSet
assert _test_args(FiniteSet(x, y, z))
def test_sympy__sets__sets__Interval():
from sympy.sets.sets import Interval
assert _test_args(Interval(0, 1))
def test_sympy__sets__sets__ProductSet():
from sympy.sets.sets import ProductSet, Interval
assert _test_args(ProductSet(Interval(0, 1), Interval(0, 1)))
@SKIP("does it make sense to test this?")
def test_sympy__sets__sets__Set():
from sympy.sets.sets import Set
assert _test_args(Set())
def test_sympy__sets__sets__Intersection():
from sympy.sets.sets import Intersection, Interval
assert _test_args(Intersection(Interval(0, 3), Interval(2, 4),
evaluate=False))
def test_sympy__sets__sets__Union():
from sympy.sets.sets import Union, Interval
assert _test_args(Union(Interval(0, 1), Interval(2, 3)))
def test_sympy__sets__sets__Complement():
from sympy.sets.sets import Complement
assert _test_args(Complement(Interval(0, 2), Interval(0, 1)))
def test_sympy__sets__sets__SymmetricDifference():
from sympy.sets.sets import FiniteSet, SymmetricDifference
assert _test_args(SymmetricDifference(FiniteSet(1, 2, 3), \
FiniteSet(2, 3, 4)))
def test_sympy__core__trace__Tr():
from sympy.core.trace import Tr
a, b = symbols('a b')
assert _test_args(Tr(a + b))
def test_sympy__sets__setexpr__SetExpr():
from sympy.sets.setexpr import SetExpr
assert _test_args(SetExpr(Interval(0, 1)))
def test_sympy__sets__fancysets__Naturals():
from sympy.sets.fancysets import Naturals
assert _test_args(Naturals())
def test_sympy__sets__fancysets__Naturals0():
from sympy.sets.fancysets import Naturals0
assert _test_args(Naturals0())
def test_sympy__sets__fancysets__Integers():
from sympy.sets.fancysets import Integers
assert _test_args(Integers())
def test_sympy__sets__fancysets__Reals():
from sympy.sets.fancysets import Reals
assert _test_args(Reals())
def test_sympy__sets__fancysets__Complexes():
from sympy.sets.fancysets import Complexes
assert _test_args(Complexes())
def test_sympy__sets__fancysets__ComplexRegion():
from sympy.sets.fancysets import ComplexRegion
from sympy import S
from sympy.sets import Interval
a = Interval(0, 1)
b = Interval(2, 3)
theta = Interval(0, 2*S.Pi)
assert _test_args(ComplexRegion(a*b))
assert _test_args(ComplexRegion(a*theta, polar=True))
def test_sympy__sets__fancysets__ImageSet():
from sympy.sets.fancysets import ImageSet
from sympy import S, Symbol
x = Symbol('x')
assert _test_args(ImageSet(Lambda(x, x**2), S.Naturals))
def test_sympy__sets__fancysets__Range():
from sympy.sets.fancysets import Range
assert _test_args(Range(1, 5, 1))
def test_sympy__sets__conditionset__ConditionSet():
from sympy.sets.conditionset import ConditionSet
from sympy import S, Symbol
x = Symbol('x')
assert _test_args(ConditionSet(x, Eq(x**2, 1), S.Reals))
def test_sympy__sets__contains__Contains():
from sympy.sets.fancysets import Range
from sympy.sets.contains import Contains
assert _test_args(Contains(x, Range(0, 10, 2)))
# STATS
from sympy.stats.crv_types import NormalDistribution
nd = NormalDistribution(0, 1)
from sympy.stats.frv_types import DieDistribution
die = DieDistribution(6)
def test_sympy__stats__crv__ContinuousDomain():
from sympy.stats.crv import ContinuousDomain
assert _test_args(ContinuousDomain({x}, Interval(-oo, oo)))
def test_sympy__stats__crv__SingleContinuousDomain():
from sympy.stats.crv import SingleContinuousDomain
assert _test_args(SingleContinuousDomain(x, Interval(-oo, oo)))
def test_sympy__stats__crv__ProductContinuousDomain():
from sympy.stats.crv import SingleContinuousDomain, ProductContinuousDomain
D = SingleContinuousDomain(x, Interval(-oo, oo))
E = SingleContinuousDomain(y, Interval(0, oo))
assert _test_args(ProductContinuousDomain(D, E))
def test_sympy__stats__crv__ConditionalContinuousDomain():
from sympy.stats.crv import (SingleContinuousDomain,
ConditionalContinuousDomain)
D = SingleContinuousDomain(x, Interval(-oo, oo))
assert _test_args(ConditionalContinuousDomain(D, x > 0))
def test_sympy__stats__crv__ContinuousPSpace():
from sympy.stats.crv import ContinuousPSpace, SingleContinuousDomain
D = SingleContinuousDomain(x, Interval(-oo, oo))
assert _test_args(ContinuousPSpace(D, nd))
def test_sympy__stats__crv__SingleContinuousPSpace():
from sympy.stats.crv import SingleContinuousPSpace
assert _test_args(SingleContinuousPSpace(x, nd))
def test_sympy__stats__crv__ProductContinuousPSpace():
from sympy.stats.crv import ProductContinuousPSpace, SingleContinuousPSpace
A = SingleContinuousPSpace(x, nd)
B = SingleContinuousPSpace(y, nd)
assert _test_args(ProductContinuousPSpace(A, B))
@SKIP("abstract class")
def test_sympy__stats__crv__SingleContinuousDistribution():
pass
def test_sympy__stats__drv__SingleDiscreteDomain():
from sympy.stats.drv import SingleDiscreteDomain
assert _test_args(SingleDiscreteDomain(x, S.Naturals))
def test_sympy__stats__drv__SingleDiscretePSpace():
from sympy.stats.drv import SingleDiscretePSpace
from sympy.stats.drv_types import PoissonDistribution
assert _test_args(SingleDiscretePSpace(x, PoissonDistribution(1)))
def test_sympy__stats__drv__DiscretePSpace():
from sympy.stats.drv import DiscretePSpace, SingleDiscreteDomain
density = Lambda(x, 2**(-x))
domain = SingleDiscreteDomain(x, S.Naturals)
assert _test_args(DiscretePSpace(domain, density))
def test_sympy__stats__drv__ConditionalDiscreteDomain():
from sympy.stats.drv import ConditionalDiscreteDomain, SingleDiscreteDomain
X = SingleDiscreteDomain(x, S.Naturals0)
assert _test_args(ConditionalDiscreteDomain(X, x > 2))
@SKIP("abstract class")
def test_sympy__stats__drv__SingleDiscreteDistribution():
pass
@SKIP("abstract class")
def test_sympy__stats__drv__DiscreteDomain():
pass
def test_sympy__stats__rv__RandomDomain():
from sympy.stats.rv import RandomDomain
from sympy.sets.sets import FiniteSet
assert _test_args(RandomDomain(FiniteSet(x), FiniteSet(1, 2, 3)))
def test_sympy__stats__rv__SingleDomain():
from sympy.stats.rv import SingleDomain
from sympy.sets.sets import FiniteSet
assert _test_args(SingleDomain(x, FiniteSet(1, 2, 3)))
def test_sympy__stats__rv__ConditionalDomain():
from sympy.stats.rv import ConditionalDomain, RandomDomain
from sympy.sets.sets import FiniteSet
D = RandomDomain(FiniteSet(x), FiniteSet(1, 2))
assert _test_args(ConditionalDomain(D, x > 1))
def test_sympy__stats__rv__PSpace():
from sympy.stats.rv import PSpace, RandomDomain
from sympy import FiniteSet
D = RandomDomain(FiniteSet(x), FiniteSet(1, 2, 3, 4, 5, 6))
assert _test_args(PSpace(D, die))
@SKIP("abstract Class")
def test_sympy__stats__rv__SinglePSpace():
pass
def test_sympy__stats__rv__RandomSymbol():
from sympy.stats.rv import RandomSymbol
from sympy.stats.crv import SingleContinuousPSpace
A = SingleContinuousPSpace(x, nd)
assert _test_args(RandomSymbol(x, A))
def test_sympy__stats__rv__ProductPSpace():
from sympy.stats.rv import ProductPSpace
from sympy.stats.crv import SingleContinuousPSpace
A = SingleContinuousPSpace(x, nd)
B = SingleContinuousPSpace(y, nd)
assert _test_args(ProductPSpace(A, B))
def test_sympy__stats__rv__ProductDomain():
from sympy.stats.rv import ProductDomain, SingleDomain
D = SingleDomain(x, Interval(-oo, oo))
E = SingleDomain(y, Interval(0, oo))
assert _test_args(ProductDomain(D, E))
def test_sympy__stats__symbolic_probability__Probability():
from sympy.stats.symbolic_probability import Probability
from sympy.stats import Normal
X = Normal('X', 0, 1)
assert _test_args(Probability(X > 0))
def test_sympy__stats__symbolic_probability__Expectation():
from sympy.stats.symbolic_probability import Expectation
from sympy.stats import Normal
X = Normal('X', 0, 1)
assert _test_args(Expectation(X > 0))
def test_sympy__stats__symbolic_probability__Covariance():
from sympy.stats.symbolic_probability import Covariance
from sympy.stats import Normal
X = Normal('X', 0, 1)
Y = Normal('Y', 0, 3)
assert _test_args(Covariance(X, Y))
def test_sympy__stats__symbolic_probability__Variance():
from sympy.stats.symbolic_probability import Variance
from sympy.stats import Normal
X = Normal('X', 0, 1)
assert _test_args(Variance(X))
def test_sympy__stats__frv_types__DiscreteUniformDistribution():
from sympy.stats.frv_types import DiscreteUniformDistribution
from sympy.core.containers import Tuple
assert _test_args(DiscreteUniformDistribution(Tuple(*list(range(6)))))
def test_sympy__stats__frv_types__DieDistribution():
from sympy.stats.frv_types import DieDistribution
assert _test_args(DieDistribution(6))
def test_sympy__stats__frv_types__BernoulliDistribution():
from sympy.stats.frv_types import BernoulliDistribution
assert _test_args(BernoulliDistribution(S.Half, 0, 1))
def test_sympy__stats__frv_types__BinomialDistribution():
from sympy.stats.frv_types import BinomialDistribution
assert _test_args(BinomialDistribution(5, S.Half, 1, 0))
def test_sympy__stats__frv_types__HypergeometricDistribution():
from sympy.stats.frv_types import HypergeometricDistribution
assert _test_args(HypergeometricDistribution(10, 5, 3))
def test_sympy__stats__frv_types__RademacherDistribution():
from sympy.stats.frv_types import RademacherDistribution
assert _test_args(RademacherDistribution())
def test_sympy__stats__frv__FiniteDomain():
from sympy.stats.frv import FiniteDomain
assert _test_args(FiniteDomain({(x, 1), (x, 2)})) # x can be 1 or 2
def test_sympy__stats__frv__SingleFiniteDomain():
from sympy.stats.frv import SingleFiniteDomain
assert _test_args(SingleFiniteDomain(x, {1, 2})) # x can be 1 or 2
def test_sympy__stats__frv__ProductFiniteDomain():
from sympy.stats.frv import SingleFiniteDomain, ProductFiniteDomain
xd = SingleFiniteDomain(x, {1, 2})
yd = SingleFiniteDomain(y, {1, 2})
assert _test_args(ProductFiniteDomain(xd, yd))
def test_sympy__stats__frv__ConditionalFiniteDomain():
from sympy.stats.frv import SingleFiniteDomain, ConditionalFiniteDomain
xd = SingleFiniteDomain(x, {1, 2})
assert _test_args(ConditionalFiniteDomain(xd, x > 1))
def test_sympy__stats__frv__FinitePSpace():
from sympy.stats.frv import FinitePSpace, SingleFiniteDomain
xd = SingleFiniteDomain(x, {1, 2, 3, 4, 5, 6})
p = 1.0/6
xd = SingleFiniteDomain(x, {1, 2})
assert _test_args(FinitePSpace(xd, {(x, 1): S.Half, (x, 2): S.Half}))
def test_sympy__stats__frv__SingleFinitePSpace():
from sympy.stats.frv import SingleFinitePSpace
from sympy import Symbol
assert _test_args(SingleFinitePSpace(Symbol('x'), die))
def test_sympy__stats__frv__ProductFinitePSpace():
from sympy.stats.frv import SingleFinitePSpace, ProductFinitePSpace
from sympy import Symbol
xp = SingleFinitePSpace(Symbol('x'), die)
yp = SingleFinitePSpace(Symbol('y'), die)
assert _test_args(ProductFinitePSpace(xp, yp))
@SKIP("abstract class")
def test_sympy__stats__frv__SingleFiniteDistribution():
pass
@SKIP("abstract class")
def test_sympy__stats__crv__ContinuousDistribution():
pass
def test_sympy__stats__frv_types__FiniteDistributionHandmade():
from sympy.stats.frv_types import FiniteDistributionHandmade
assert _test_args(FiniteDistributionHandmade({1: 1}))
def test_sympy__stats__crv__ContinuousDistributionHandmade():
from sympy.stats.crv import ContinuousDistributionHandmade
from sympy import Symbol, Interval
assert _test_args(ContinuousDistributionHandmade(Symbol('x'),
Interval(0, 2)))
def test_sympy__stats__rv__Density():
from sympy.stats.rv import Density
from sympy.stats.crv_types import Normal
assert _test_args(Density(Normal('x', 0, 1)))
def test_sympy__stats__crv_types__ArcsinDistribution():
from sympy.stats.crv_types import ArcsinDistribution
assert _test_args(ArcsinDistribution(0, 1))
def test_sympy__stats__crv_types__BeniniDistribution():
from sympy.stats.crv_types import BeniniDistribution
assert _test_args(BeniniDistribution(1, 1, 1))
def test_sympy__stats__crv_types__BetaDistribution():
from sympy.stats.crv_types import BetaDistribution
assert _test_args(BetaDistribution(1, 1))
def test_sympy__stats__crv_types__BetaPrimeDistribution():
from sympy.stats.crv_types import BetaPrimeDistribution
assert _test_args(BetaPrimeDistribution(1, 1))
def test_sympy__stats__crv_types__CauchyDistribution():
from sympy.stats.crv_types import CauchyDistribution
assert _test_args(CauchyDistribution(0, 1))
def test_sympy__stats__crv_types__ChiDistribution():
from sympy.stats.crv_types import ChiDistribution
assert _test_args(ChiDistribution(1))
def test_sympy__stats__crv_types__ChiNoncentralDistribution():
from sympy.stats.crv_types import ChiNoncentralDistribution
assert _test_args(ChiNoncentralDistribution(1,1))
def test_sympy__stats__crv_types__ChiSquaredDistribution():
from sympy.stats.crv_types import ChiSquaredDistribution
assert _test_args(ChiSquaredDistribution(1))
def test_sympy__stats__crv_types__DagumDistribution():
from sympy.stats.crv_types import DagumDistribution
assert _test_args(DagumDistribution(1, 1, 1))
def test_sympy__stats__crv_types__ExponentialDistribution():
from sympy.stats.crv_types import ExponentialDistribution
assert _test_args(ExponentialDistribution(1))
def test_sympy__stats__crv_types__FDistributionDistribution():
from sympy.stats.crv_types import FDistributionDistribution
assert _test_args(FDistributionDistribution(1, 1))
def test_sympy__stats__crv_types__FisherZDistribution():
from sympy.stats.crv_types import FisherZDistribution
assert _test_args(FisherZDistribution(1, 1))
def test_sympy__stats__crv_types__FrechetDistribution():
from sympy.stats.crv_types import FrechetDistribution
assert _test_args(FrechetDistribution(1, 1, 1))
def test_sympy__stats__crv_types__GammaInverseDistribution():
from sympy.stats.crv_types import GammaInverseDistribution
assert _test_args(GammaInverseDistribution(1, 1))
def test_sympy__stats__crv_types__GammaDistribution():
from sympy.stats.crv_types import GammaDistribution
assert _test_args(GammaDistribution(1, 1))
def test_sympy__stats__crv_types__GumbelDistribution():
from sympy.stats.crv_types import GumbelDistribution
assert _test_args(GumbelDistribution(1, 1))
def test_sympy__stats__crv_types__GompertzDistribution():
from sympy.stats.crv_types import GompertzDistribution
assert _test_args(GompertzDistribution(1, 1))
def test_sympy__stats__crv_types__KumaraswamyDistribution():
from sympy.stats.crv_types import KumaraswamyDistribution
assert _test_args(KumaraswamyDistribution(1, 1))
def test_sympy__stats__crv_types__LaplaceDistribution():
from sympy.stats.crv_types import LaplaceDistribution
assert _test_args(LaplaceDistribution(0, 1))
def test_sympy__stats__crv_types__LogisticDistribution():
from sympy.stats.crv_types import LogisticDistribution
assert _test_args(LogisticDistribution(0, 1))
def test_sympy__stats__crv_types__LogNormalDistribution():
from sympy.stats.crv_types import LogNormalDistribution
assert _test_args(LogNormalDistribution(0, 1))
def test_sympy__stats__crv_types__MaxwellDistribution():
from sympy.stats.crv_types import MaxwellDistribution
assert _test_args(MaxwellDistribution(1))
def test_sympy__stats__crv_types__NakagamiDistribution():
from sympy.stats.crv_types import NakagamiDistribution
assert _test_args(NakagamiDistribution(1, 1))
def test_sympy__stats__crv_types__NormalDistribution():
from sympy.stats.crv_types import NormalDistribution
assert _test_args(NormalDistribution(0, 1))
def test_sympy__stats__crv_types__ParetoDistribution():
from sympy.stats.crv_types import ParetoDistribution
assert _test_args(ParetoDistribution(1, 1))
def test_sympy__stats__crv_types__QuadraticUDistribution():
from sympy.stats.crv_types import QuadraticUDistribution
assert _test_args(QuadraticUDistribution(1, 2))
def test_sympy__stats__crv_types__RaisedCosineDistribution():
from sympy.stats.crv_types import RaisedCosineDistribution
assert _test_args(RaisedCosineDistribution(1, 1))
def test_sympy__stats__crv_types__RayleighDistribution():
from sympy.stats.crv_types import RayleighDistribution
assert _test_args(RayleighDistribution(1))
def test_sympy__stats__crv_types__ShiftedGompertzDistribution():
from sympy.stats.crv_types import ShiftedGompertzDistribution
assert _test_args(ShiftedGompertzDistribution(1, 1))
def test_sympy__stats__crv_types__StudentTDistribution():
from sympy.stats.crv_types import StudentTDistribution
assert _test_args(StudentTDistribution(1))
def test_sympy__stats__crv_types__TrapezoidalDistribution():
from sympy.stats.crv_types import TrapezoidalDistribution
assert _test_args(TrapezoidalDistribution(1, 2, 3, 4))
def test_sympy__stats__crv_types__TriangularDistribution():
from sympy.stats.crv_types import TriangularDistribution
assert _test_args(TriangularDistribution(-1, 0, 1))
def test_sympy__stats__crv_types__UniformDistribution():
from sympy.stats.crv_types import UniformDistribution
assert _test_args(UniformDistribution(0, 1))
def test_sympy__stats__crv_types__UniformSumDistribution():
from sympy.stats.crv_types import UniformSumDistribution
assert _test_args(UniformSumDistribution(1))
def test_sympy__stats__crv_types__VonMisesDistribution():
from sympy.stats.crv_types import VonMisesDistribution
assert _test_args(VonMisesDistribution(1, 1))
def test_sympy__stats__crv_types__WeibullDistribution():
from sympy.stats.crv_types import WeibullDistribution
assert _test_args(WeibullDistribution(1, 1))
def test_sympy__stats__crv_types__WignerSemicircleDistribution():
from sympy.stats.crv_types import WignerSemicircleDistribution
assert _test_args(WignerSemicircleDistribution(1))
def test_sympy__stats__drv_types__PoissonDistribution():
from sympy.stats.drv_types import PoissonDistribution
assert _test_args(PoissonDistribution(1))
def test_sympy__stats__drv_types__GeometricDistribution():
from sympy.stats.drv_types import GeometricDistribution
assert _test_args(GeometricDistribution(.5))
def test_sympy__core__symbol__Dummy():
from sympy.core.symbol import Dummy
assert _test_args(Dummy('t'))
def test_sympy__core__symbol__Symbol():
from sympy.core.symbol import Symbol
assert _test_args(Symbol('t'))
def test_sympy__core__symbol__Wild():
from sympy.core.symbol import Wild
assert _test_args(Wild('x', exclude=[x]))
@SKIP("abstract class")
def test_sympy__functions__combinatorial__factorials__CombinatorialFunction():
pass
def test_sympy__functions__combinatorial__factorials__FallingFactorial():
from sympy.functions.combinatorial.factorials import FallingFactorial
assert _test_args(FallingFactorial(2, x))
def test_sympy__functions__combinatorial__factorials__MultiFactorial():
from sympy.functions.combinatorial.factorials import MultiFactorial
assert _test_args(MultiFactorial(x))
def test_sympy__functions__combinatorial__factorials__RisingFactorial():
from sympy.functions.combinatorial.factorials import RisingFactorial
assert _test_args(RisingFactorial(2, x))
def test_sympy__functions__combinatorial__factorials__binomial():
from sympy.functions.combinatorial.factorials import binomial
assert _test_args(binomial(2, x))
def test_sympy__functions__combinatorial__factorials__subfactorial():
from sympy.functions.combinatorial.factorials import subfactorial
assert _test_args(subfactorial(1))
def test_sympy__functions__combinatorial__factorials__factorial():
from sympy.functions.combinatorial.factorials import factorial
assert _test_args(factorial(x))
def test_sympy__functions__combinatorial__factorials__factorial2():
from sympy.functions.combinatorial.factorials import factorial2
assert _test_args(factorial2(x))
def test_sympy__functions__combinatorial__numbers__bell():
from sympy.functions.combinatorial.numbers import bell
assert _test_args(bell(x, y))
def test_sympy__functions__combinatorial__numbers__bernoulli():
from sympy.functions.combinatorial.numbers import bernoulli
assert _test_args(bernoulli(x))
def test_sympy__functions__combinatorial__numbers__catalan():
from sympy.functions.combinatorial.numbers import catalan
assert _test_args(catalan(x))
def test_sympy__functions__combinatorial__numbers__genocchi():
from sympy.functions.combinatorial.numbers import genocchi
assert _test_args(genocchi(x))
def test_sympy__functions__combinatorial__numbers__euler():
from sympy.functions.combinatorial.numbers import euler
assert _test_args(euler(x))
def test_sympy__functions__combinatorial__numbers__fibonacci():
from sympy.functions.combinatorial.numbers import fibonacci
assert _test_args(fibonacci(x))
def test_sympy__functions__combinatorial__numbers__harmonic():
from sympy.functions.combinatorial.numbers import harmonic
assert _test_args(harmonic(x, 2))
def test_sympy__functions__combinatorial__numbers__lucas():
from sympy.functions.combinatorial.numbers import lucas
assert _test_args(lucas(x))
def test_sympy__functions__combinatorial__numbers__partition():
from sympy.core.symbol import Symbol
from sympy.functions.combinatorial.numbers import partition
assert _test_args(partition(Symbol('a', integer=True)))
def test_sympy__functions__elementary__complexes__Abs():
from sympy.functions.elementary.complexes import Abs
assert _test_args(Abs(x))
def test_sympy__functions__elementary__complexes__adjoint():
from sympy.functions.elementary.complexes import adjoint
assert _test_args(adjoint(x))
def test_sympy__functions__elementary__complexes__arg():
from sympy.functions.elementary.complexes import arg
assert _test_args(arg(x))
def test_sympy__functions__elementary__complexes__conjugate():
from sympy.functions.elementary.complexes import conjugate
assert _test_args(conjugate(x))
def test_sympy__functions__elementary__complexes__im():
from sympy.functions.elementary.complexes import im
assert _test_args(im(x))
def test_sympy__functions__elementary__complexes__re():
from sympy.functions.elementary.complexes import re
assert _test_args(re(x))
def test_sympy__functions__elementary__complexes__sign():
from sympy.functions.elementary.complexes import sign
assert _test_args(sign(x))
def test_sympy__functions__elementary__complexes__polar_lift():
from sympy.functions.elementary.complexes import polar_lift
assert _test_args(polar_lift(x))
def test_sympy__functions__elementary__complexes__periodic_argument():
from sympy.functions.elementary.complexes import periodic_argument
assert _test_args(periodic_argument(x, y))
def test_sympy__functions__elementary__complexes__principal_branch():
from sympy.functions.elementary.complexes import principal_branch
assert _test_args(principal_branch(x, y))
def test_sympy__functions__elementary__complexes__transpose():
from sympy.functions.elementary.complexes import transpose
assert _test_args(transpose(x))
def test_sympy__functions__elementary__exponential__LambertW():
from sympy.functions.elementary.exponential import LambertW
assert _test_args(LambertW(2))
@SKIP("abstract class")
def test_sympy__functions__elementary__exponential__ExpBase():
pass
def test_sympy__functions__elementary__exponential__exp():
from sympy.functions.elementary.exponential import exp
assert _test_args(exp(2))
def test_sympy__functions__elementary__exponential__exp_polar():
from sympy.functions.elementary.exponential import exp_polar
assert _test_args(exp_polar(2))
def test_sympy__functions__elementary__exponential__log():
from sympy.functions.elementary.exponential import log
assert _test_args(log(2))
@SKIP("abstract class")
def test_sympy__functions__elementary__hyperbolic__HyperbolicFunction():
pass
@SKIP("abstract class")
def test_sympy__functions__elementary__hyperbolic__ReciprocalHyperbolicFunction():
pass
@SKIP("abstract class")
def test_sympy__functions__elementary__hyperbolic__InverseHyperbolicFunction():
pass
def test_sympy__functions__elementary__hyperbolic__acosh():
from sympy.functions.elementary.hyperbolic import acosh
assert _test_args(acosh(2))
def test_sympy__functions__elementary__hyperbolic__acoth():
from sympy.functions.elementary.hyperbolic import acoth
assert _test_args(acoth(2))
def test_sympy__functions__elementary__hyperbolic__asinh():
from sympy.functions.elementary.hyperbolic import asinh
assert _test_args(asinh(2))
def test_sympy__functions__elementary__hyperbolic__atanh():
from sympy.functions.elementary.hyperbolic import atanh
assert _test_args(atanh(2))
def test_sympy__functions__elementary__hyperbolic__asech():
from sympy.functions.elementary.hyperbolic import asech
assert _test_args(asech(2))
def test_sympy__functions__elementary__hyperbolic__acsch():
from sympy.functions.elementary.hyperbolic import acsch
assert _test_args(acsch(2))
def test_sympy__functions__elementary__hyperbolic__cosh():
from sympy.functions.elementary.hyperbolic import cosh
assert _test_args(cosh(2))
def test_sympy__functions__elementary__hyperbolic__coth():
from sympy.functions.elementary.hyperbolic import coth
assert _test_args(coth(2))
def test_sympy__functions__elementary__hyperbolic__csch():
from sympy.functions.elementary.hyperbolic import csch
assert _test_args(csch(2))
def test_sympy__functions__elementary__hyperbolic__sech():
from sympy.functions.elementary.hyperbolic import sech
assert _test_args(sech(2))
def test_sympy__functions__elementary__hyperbolic__sinh():
from sympy.functions.elementary.hyperbolic import sinh
assert _test_args(sinh(2))
def test_sympy__functions__elementary__hyperbolic__tanh():
from sympy.functions.elementary.hyperbolic import tanh
assert _test_args(tanh(2))
@SKIP("does this work at all?")
def test_sympy__functions__elementary__integers__RoundFunction():
from sympy.functions.elementary.integers import RoundFunction
assert _test_args(RoundFunction())
def test_sympy__functions__elementary__integers__ceiling():
from sympy.functions.elementary.integers import ceiling
assert _test_args(ceiling(x))
def test_sympy__functions__elementary__integers__floor():
from sympy.functions.elementary.integers import floor
assert _test_args(floor(x))
def test_sympy__functions__elementary__integers__frac():
from sympy.functions.elementary.integers import frac
assert _test_args(frac(x))
def test_sympy__functions__elementary__miscellaneous__IdentityFunction():
from sympy.functions.elementary.miscellaneous import IdentityFunction
assert _test_args(IdentityFunction())
def test_sympy__functions__elementary__miscellaneous__Max():
from sympy.functions.elementary.miscellaneous import Max
assert _test_args(Max(x, 2))
def test_sympy__functions__elementary__miscellaneous__Min():
from sympy.functions.elementary.miscellaneous import Min
assert _test_args(Min(x, 2))
@SKIP("abstract class")
def test_sympy__functions__elementary__miscellaneous__MinMaxBase():
pass
def test_sympy__functions__elementary__piecewise__ExprCondPair():
from sympy.functions.elementary.piecewise import ExprCondPair
assert _test_args(ExprCondPair(1, True))
def test_sympy__functions__elementary__piecewise__Piecewise():
from sympy.functions.elementary.piecewise import Piecewise
assert _test_args(Piecewise((1, x >= 0), (0, True)))
@SKIP("abstract class")
def test_sympy__functions__elementary__trigonometric__TrigonometricFunction():
pass
@SKIP("abstract class")
def test_sympy__functions__elementary__trigonometric__ReciprocalTrigonometricFunction():
pass
@SKIP("abstract class")
def test_sympy__functions__elementary__trigonometric__InverseTrigonometricFunction():
pass
def test_sympy__functions__elementary__trigonometric__acos():
from sympy.functions.elementary.trigonometric import acos
assert _test_args(acos(2))
def test_sympy__functions__elementary__trigonometric__acot():
from sympy.functions.elementary.trigonometric import acot
assert _test_args(acot(2))
def test_sympy__functions__elementary__trigonometric__asin():
from sympy.functions.elementary.trigonometric import asin
assert _test_args(asin(2))
def test_sympy__functions__elementary__trigonometric__asec():
from sympy.functions.elementary.trigonometric import asec
assert _test_args(asec(2))
def test_sympy__functions__elementary__trigonometric__acsc():
from sympy.functions.elementary.trigonometric import acsc
assert _test_args(acsc(2))
def test_sympy__functions__elementary__trigonometric__atan():
from sympy.functions.elementary.trigonometric import atan
assert _test_args(atan(2))
def test_sympy__functions__elementary__trigonometric__atan2():
from sympy.functions.elementary.trigonometric import atan2
assert _test_args(atan2(2, 3))
def test_sympy__functions__elementary__trigonometric__cos():
from sympy.functions.elementary.trigonometric import cos
assert _test_args(cos(2))
def test_sympy__functions__elementary__trigonometric__csc():
from sympy.functions.elementary.trigonometric import csc
assert _test_args(csc(2))
def test_sympy__functions__elementary__trigonometric__cot():
from sympy.functions.elementary.trigonometric import cot
assert _test_args(cot(2))
def test_sympy__functions__elementary__trigonometric__sin():
assert _test_args(sin(2))
def test_sympy__functions__elementary__trigonometric__sinc():
from sympy.functions.elementary.trigonometric import sinc
assert _test_args(sinc(2))
def test_sympy__functions__elementary__trigonometric__sec():
from sympy.functions.elementary.trigonometric import sec
assert _test_args(sec(2))
def test_sympy__functions__elementary__trigonometric__tan():
from sympy.functions.elementary.trigonometric import tan
assert _test_args(tan(2))
@SKIP("abstract class")
def test_sympy__functions__special__bessel__BesselBase():
pass
@SKIP("abstract class")
def test_sympy__functions__special__bessel__SphericalBesselBase():
pass
@SKIP("abstract class")
def test_sympy__functions__special__bessel__SphericalHankelBase():
pass
def test_sympy__functions__special__bessel__besseli():
from sympy.functions.special.bessel import besseli
assert _test_args(besseli(x, 1))
def test_sympy__functions__special__bessel__besselj():
from sympy.functions.special.bessel import besselj
assert _test_args(besselj(x, 1))
def test_sympy__functions__special__bessel__besselk():
from sympy.functions.special.bessel import besselk
assert _test_args(besselk(x, 1))
def test_sympy__functions__special__bessel__bessely():
from sympy.functions.special.bessel import bessely
assert _test_args(bessely(x, 1))
def test_sympy__functions__special__bessel__hankel1():
from sympy.functions.special.bessel import hankel1
assert _test_args(hankel1(x, 1))
def test_sympy__functions__special__bessel__hankel2():
from sympy.functions.special.bessel import hankel2
assert _test_args(hankel2(x, 1))
def test_sympy__functions__special__bessel__jn():
from sympy.functions.special.bessel import jn
assert _test_args(jn(0, x))
def test_sympy__functions__special__bessel__yn():
from sympy.functions.special.bessel import yn
assert _test_args(yn(0, x))
def test_sympy__functions__special__bessel__hn1():
from sympy.functions.special.bessel import hn1
assert _test_args(hn1(0, x))
def test_sympy__functions__special__bessel__hn2():
from sympy.functions.special.bessel import hn2
assert _test_args(hn2(0, x))
def test_sympy__functions__special__bessel__AiryBase():
pass
def test_sympy__functions__special__bessel__airyai():
from sympy.functions.special.bessel import airyai
assert _test_args(airyai(2))
def test_sympy__functions__special__bessel__airybi():
from sympy.functions.special.bessel import airybi
assert _test_args(airybi(2))
def test_sympy__functions__special__bessel__airyaiprime():
from sympy.functions.special.bessel import airyaiprime
assert _test_args(airyaiprime(2))
def test_sympy__functions__special__bessel__airybiprime():
from sympy.functions.special.bessel import airybiprime
assert _test_args(airybiprime(2))
def test_sympy__functions__special__elliptic_integrals__elliptic_k():
from sympy.functions.special.elliptic_integrals import elliptic_k as K
assert _test_args(K(x))
def test_sympy__functions__special__elliptic_integrals__elliptic_f():
from sympy.functions.special.elliptic_integrals import elliptic_f as F
assert _test_args(F(x, y))
def test_sympy__functions__special__elliptic_integrals__elliptic_e():
from sympy.functions.special.elliptic_integrals import elliptic_e as E
assert _test_args(E(x))
assert _test_args(E(x, y))
def test_sympy__functions__special__elliptic_integrals__elliptic_pi():
from sympy.functions.special.elliptic_integrals import elliptic_pi as P
assert _test_args(P(x, y))
assert _test_args(P(x, y, z))
def test_sympy__functions__special__delta_functions__DiracDelta():
from sympy.functions.special.delta_functions import DiracDelta
assert _test_args(DiracDelta(x, 1))
def test_sympy__functions__special__singularity_functions__SingularityFunction():
from sympy.functions.special.singularity_functions import SingularityFunction
assert _test_args(SingularityFunction(x, y, z))
def test_sympy__functions__special__delta_functions__Heaviside():
from sympy.functions.special.delta_functions import Heaviside
assert _test_args(Heaviside(x))
def test_sympy__functions__special__error_functions__erf():
from sympy.functions.special.error_functions import erf
assert _test_args(erf(2))
def test_sympy__functions__special__error_functions__erfc():
from sympy.functions.special.error_functions import erfc
assert _test_args(erfc(2))
def test_sympy__functions__special__error_functions__erfi():
from sympy.functions.special.error_functions import erfi
assert _test_args(erfi(2))
def test_sympy__functions__special__error_functions__erf2():
from sympy.functions.special.error_functions import erf2
assert _test_args(erf2(2, 3))
def test_sympy__functions__special__error_functions__erfinv():
from sympy.functions.special.error_functions import erfinv
assert _test_args(erfinv(2))
def test_sympy__functions__special__error_functions__erfcinv():
from sympy.functions.special.error_functions import erfcinv
assert _test_args(erfcinv(2))
def test_sympy__functions__special__error_functions__erf2inv():
from sympy.functions.special.error_functions import erf2inv
assert _test_args(erf2inv(2, 3))
@SKIP("abstract class")
def test_sympy__functions__special__error_functions__FresnelIntegral():
pass
def test_sympy__functions__special__error_functions__fresnels():
from sympy.functions.special.error_functions import fresnels
assert _test_args(fresnels(2))
def test_sympy__functions__special__error_functions__fresnelc():
from sympy.functions.special.error_functions import fresnelc
assert _test_args(fresnelc(2))
def test_sympy__functions__special__error_functions__erfs():
from sympy.functions.special.error_functions import _erfs
assert _test_args(_erfs(2))
def test_sympy__functions__special__error_functions__Ei():
from sympy.functions.special.error_functions import Ei
assert _test_args(Ei(2))
def test_sympy__functions__special__error_functions__li():
from sympy.functions.special.error_functions import li
assert _test_args(li(2))
def test_sympy__functions__special__error_functions__Li():
from sympy.functions.special.error_functions import Li
assert _test_args(Li(2))
@SKIP("abstract class")
def test_sympy__functions__special__error_functions__TrigonometricIntegral():
pass
def test_sympy__functions__special__error_functions__Si():
from sympy.functions.special.error_functions import Si
assert _test_args(Si(2))
def test_sympy__functions__special__error_functions__Ci():
from sympy.functions.special.error_functions import Ci
assert _test_args(Ci(2))
def test_sympy__functions__special__error_functions__Shi():
from sympy.functions.special.error_functions import Shi
assert _test_args(Shi(2))
def test_sympy__functions__special__error_functions__Chi():
from sympy.functions.special.error_functions import Chi
assert _test_args(Chi(2))
def test_sympy__functions__special__error_functions__expint():
from sympy.functions.special.error_functions import expint
assert _test_args(expint(y, x))
def test_sympy__functions__special__gamma_functions__gamma():
from sympy.functions.special.gamma_functions import gamma
assert _test_args(gamma(x))
def test_sympy__functions__special__gamma_functions__loggamma():
from sympy.functions.special.gamma_functions import loggamma
assert _test_args(loggamma(2))
def test_sympy__functions__special__gamma_functions__lowergamma():
from sympy.functions.special.gamma_functions import lowergamma
assert _test_args(lowergamma(x, 2))
def test_sympy__functions__special__gamma_functions__polygamma():
from sympy.functions.special.gamma_functions import polygamma
assert _test_args(polygamma(x, 2))
def test_sympy__functions__special__gamma_functions__uppergamma():
from sympy.functions.special.gamma_functions import uppergamma
assert _test_args(uppergamma(x, 2))
def test_sympy__functions__special__beta_functions__beta():
from sympy.functions.special.beta_functions import beta
assert _test_args(beta(x, x))
def test_sympy__functions__special__mathieu_functions__MathieuBase():
pass
def test_sympy__functions__special__mathieu_functions__mathieus():
from sympy.functions.special.mathieu_functions import mathieus
assert _test_args(mathieus(1, 1, 1))
def test_sympy__functions__special__mathieu_functions__mathieuc():
from sympy.functions.special.mathieu_functions import mathieuc
assert _test_args(mathieuc(1, 1, 1))
def test_sympy__functions__special__mathieu_functions__mathieusprime():
from sympy.functions.special.mathieu_functions import mathieusprime
assert _test_args(mathieusprime(1, 1, 1))
def test_sympy__functions__special__mathieu_functions__mathieucprime():
from sympy.functions.special.mathieu_functions import mathieucprime
assert _test_args(mathieucprime(1, 1, 1))
@SKIP("abstract class")
def test_sympy__functions__special__hyper__TupleParametersBase():
pass
@SKIP("abstract class")
def test_sympy__functions__special__hyper__TupleArg():
pass
def test_sympy__functions__special__hyper__hyper():
from sympy.functions.special.hyper import hyper
assert _test_args(hyper([1, 2, 3], [4, 5], x))
def test_sympy__functions__special__hyper__meijerg():
from sympy.functions.special.hyper import meijerg
assert _test_args(meijerg([1, 2, 3], [4, 5], [6], [], x))
@SKIP("abstract class")
def test_sympy__functions__special__hyper__HyperRep():
pass
def test_sympy__functions__special__hyper__HyperRep_power1():
from sympy.functions.special.hyper import HyperRep_power1
assert _test_args(HyperRep_power1(x, y))
def test_sympy__functions__special__hyper__HyperRep_power2():
from sympy.functions.special.hyper import HyperRep_power2
assert _test_args(HyperRep_power2(x, y))
def test_sympy__functions__special__hyper__HyperRep_log1():
from sympy.functions.special.hyper import HyperRep_log1
assert _test_args(HyperRep_log1(x))
def test_sympy__functions__special__hyper__HyperRep_atanh():
from sympy.functions.special.hyper import HyperRep_atanh
assert _test_args(HyperRep_atanh(x))
def test_sympy__functions__special__hyper__HyperRep_asin1():
from sympy.functions.special.hyper import HyperRep_asin1
assert _test_args(HyperRep_asin1(x))
def test_sympy__functions__special__hyper__HyperRep_asin2():
from sympy.functions.special.hyper import HyperRep_asin2
assert _test_args(HyperRep_asin2(x))
def test_sympy__functions__special__hyper__HyperRep_sqrts1():
from sympy.functions.special.hyper import HyperRep_sqrts1
assert _test_args(HyperRep_sqrts1(x, y))
def test_sympy__functions__special__hyper__HyperRep_sqrts2():
from sympy.functions.special.hyper import HyperRep_sqrts2
assert _test_args(HyperRep_sqrts2(x, y))
def test_sympy__functions__special__hyper__HyperRep_log2():
from sympy.functions.special.hyper import HyperRep_log2
assert _test_args(HyperRep_log2(x))
def test_sympy__functions__special__hyper__HyperRep_cosasin():
from sympy.functions.special.hyper import HyperRep_cosasin
assert _test_args(HyperRep_cosasin(x, y))
def test_sympy__functions__special__hyper__HyperRep_sinasin():
from sympy.functions.special.hyper import HyperRep_sinasin
assert _test_args(HyperRep_sinasin(x, y))
@SKIP("abstract class")
def test_sympy__functions__special__polynomials__OrthogonalPolynomial():
pass
def test_sympy__functions__special__polynomials__jacobi():
from sympy.functions.special.polynomials import jacobi
assert _test_args(jacobi(x, 2, 2, 2))
def test_sympy__functions__special__polynomials__gegenbauer():
from sympy.functions.special.polynomials import gegenbauer
assert _test_args(gegenbauer(x, 2, 2))
def test_sympy__functions__special__polynomials__chebyshevt():
from sympy.functions.special.polynomials import chebyshevt
assert _test_args(chebyshevt(x, 2))
def test_sympy__functions__special__polynomials__chebyshevt_root():
from sympy.functions.special.polynomials import chebyshevt_root
assert _test_args(chebyshevt_root(3, 2))
def test_sympy__functions__special__polynomials__chebyshevu():
from sympy.functions.special.polynomials import chebyshevu
assert _test_args(chebyshevu(x, 2))
def test_sympy__functions__special__polynomials__chebyshevu_root():
from sympy.functions.special.polynomials import chebyshevu_root
assert _test_args(chebyshevu_root(3, 2))
def test_sympy__functions__special__polynomials__hermite():
from sympy.functions.special.polynomials import hermite
assert _test_args(hermite(x, 2))
def test_sympy__functions__special__polynomials__legendre():
from sympy.functions.special.polynomials import legendre
assert _test_args(legendre(x, 2))
def test_sympy__functions__special__polynomials__assoc_legendre():
from sympy.functions.special.polynomials import assoc_legendre
assert _test_args(assoc_legendre(x, 0, y))
def test_sympy__functions__special__polynomials__laguerre():
from sympy.functions.special.polynomials import laguerre
assert _test_args(laguerre(x, 2))
def test_sympy__functions__special__polynomials__assoc_laguerre():
from sympy.functions.special.polynomials import assoc_laguerre
assert _test_args(assoc_laguerre(x, 0, y))
def test_sympy__functions__special__spherical_harmonics__Ynm():
from sympy.functions.special.spherical_harmonics import Ynm
assert _test_args(Ynm(1, 1, x, y))
def test_sympy__functions__special__spherical_harmonics__Znm():
from sympy.functions.special.spherical_harmonics import Znm
assert _test_args(Znm(1, 1, x, y))
def test_sympy__functions__special__tensor_functions__LeviCivita():
from sympy.functions.special.tensor_functions import LeviCivita
assert _test_args(LeviCivita(x, y, 2))
def test_sympy__functions__special__tensor_functions__KroneckerDelta():
from sympy.functions.special.tensor_functions import KroneckerDelta
assert _test_args(KroneckerDelta(x, y))
def test_sympy__functions__special__zeta_functions__dirichlet_eta():
from sympy.functions.special.zeta_functions import dirichlet_eta
assert _test_args(dirichlet_eta(x))
def test_sympy__functions__special__zeta_functions__zeta():
from sympy.functions.special.zeta_functions import zeta
assert _test_args(zeta(101))
def test_sympy__functions__special__zeta_functions__lerchphi():
from sympy.functions.special.zeta_functions import lerchphi
assert _test_args(lerchphi(x, y, z))
def test_sympy__functions__special__zeta_functions__polylog():
from sympy.functions.special.zeta_functions import polylog
assert _test_args(polylog(x, y))
def test_sympy__functions__special__zeta_functions__stieltjes():
from sympy.functions.special.zeta_functions import stieltjes
assert _test_args(stieltjes(x, y))
def test_sympy__integrals__integrals__Integral():
from sympy.integrals.integrals import Integral
assert _test_args(Integral(2, (x, 0, 1)))
def test_sympy__integrals__risch__NonElementaryIntegral():
from sympy.integrals.risch import NonElementaryIntegral
assert _test_args(NonElementaryIntegral(exp(-x**2), x))
@SKIP("abstract class")
def test_sympy__integrals__transforms__IntegralTransform():
pass
def test_sympy__integrals__transforms__MellinTransform():
from sympy.integrals.transforms import MellinTransform
assert _test_args(MellinTransform(2, x, y))
def test_sympy__integrals__transforms__InverseMellinTransform():
from sympy.integrals.transforms import InverseMellinTransform
assert _test_args(InverseMellinTransform(2, x, y, 0, 1))
def test_sympy__integrals__transforms__LaplaceTransform():
from sympy.integrals.transforms import LaplaceTransform
assert _test_args(LaplaceTransform(2, x, y))
def test_sympy__integrals__transforms__InverseLaplaceTransform():
from sympy.integrals.transforms import InverseLaplaceTransform
assert _test_args(InverseLaplaceTransform(2, x, y, 0))
@SKIP("abstract class")
def test_sympy__integrals__transforms__FourierTypeTransform():
pass
def test_sympy__integrals__transforms__InverseFourierTransform():
from sympy.integrals.transforms import InverseFourierTransform
assert _test_args(InverseFourierTransform(2, x, y))
def test_sympy__integrals__transforms__FourierTransform():
from sympy.integrals.transforms import FourierTransform
assert _test_args(FourierTransform(2, x, y))
@SKIP("abstract class")
def test_sympy__integrals__transforms__SineCosineTypeTransform():
pass
def test_sympy__integrals__transforms__InverseSineTransform():
from sympy.integrals.transforms import InverseSineTransform
assert _test_args(InverseSineTransform(2, x, y))
def test_sympy__integrals__transforms__SineTransform():
from sympy.integrals.transforms import SineTransform
assert _test_args(SineTransform(2, x, y))
def test_sympy__integrals__transforms__InverseCosineTransform():
from sympy.integrals.transforms import InverseCosineTransform
assert _test_args(InverseCosineTransform(2, x, y))
def test_sympy__integrals__transforms__CosineTransform():
from sympy.integrals.transforms import CosineTransform
assert _test_args(CosineTransform(2, x, y))
@SKIP("abstract class")
def test_sympy__integrals__transforms__HankelTypeTransform():
pass
def test_sympy__integrals__transforms__InverseHankelTransform():
from sympy.integrals.transforms import InverseHankelTransform
assert _test_args(InverseHankelTransform(2, x, y, 0))
def test_sympy__integrals__transforms__HankelTransform():
from sympy.integrals.transforms import HankelTransform
assert _test_args(HankelTransform(2, x, y, 0))
@XFAIL
def test_sympy__liealgebras__cartan_type__CartanType_generator():
from sympy.liealgebras.cartan_type import CartanType_generator
assert _test_args(CartanType_generator("A2"))
@XFAIL
def test_sympy__liealgebras__cartan_type__Standard_Cartan():
from sympy.liealgebras.cartan_type import Standard_Cartan
assert _test_args(Standard_Cartan("A", 2))
@XFAIL
def test_sympy__liealgebras__weyl_group__WeylGroup():
from sympy.liealgebras.weyl_group import WeylGroup
assert _test_args(WeylGroup("B4"))
@XFAIL
def test_sympy__liealgebras__root_system__RootSystem():
from sympy.liealgebras.root_system import RootSystem
assert _test_args(RootSystem("A2"))
@XFAIL
def test_sympy__liealgebras__type_a__TypeA():
from sympy.liealgebras.type_a import TypeA
assert _test_args(TypeA(2))
@XFAIL
def test_sympy__liealgebras__type_b__TypeB():
from sympy.liealgebras.type_b import TypeB
assert _test_args(TypeB(4))
@XFAIL
def test_sympy__liealgebras__type_c__TypeC():
from sympy.liealgebras.type_c import TypeC
assert _test_args(TypeC(4))
@XFAIL
def test_sympy__liealgebras__type_d__TypeD():
from sympy.liealgebras.type_d import TypeD
assert _test_args(TypeD(4))
@XFAIL
def test_sympy__liealgebras__type_e__TypeE():
from sympy.liealgebras.type_e import TypeE
assert _test_args(TypeE(6))
@XFAIL
def test_sympy__liealgebras__type_f__TypeF():
from sympy.liealgebras.type_f import TypeF
assert _test_args(TypeF(4))
@XFAIL
def test_sympy__liealgebras__type_g__TypeG():
from sympy.liealgebras.type_g import TypeG
assert _test_args(TypeG(2))
def test_sympy__logic__boolalg__And():
from sympy.logic.boolalg import And
assert _test_args(And(x, y, 1))
@SKIP("abstract class")
def test_sympy__logic__boolalg__Boolean():
pass
def test_sympy__logic__boolalg__BooleanFunction():
from sympy.logic.boolalg import BooleanFunction
assert _test_args(BooleanFunction(1, 2, 3))
@SKIP("abstract class")
def test_sympy__logic__boolalg__BooleanAtom():
pass
def test_sympy__logic__boolalg__BooleanTrue():
from sympy.logic.boolalg import true
assert _test_args(true)
def test_sympy__logic__boolalg__BooleanFalse():
from sympy.logic.boolalg import false
assert _test_args(false)
def test_sympy__logic__boolalg__Equivalent():
from sympy.logic.boolalg import Equivalent
assert _test_args(Equivalent(x, 2))
def test_sympy__logic__boolalg__ITE():
from sympy.logic.boolalg import ITE
assert _test_args(ITE(x, y, 1))
def test_sympy__logic__boolalg__Implies():
from sympy.logic.boolalg import Implies
assert _test_args(Implies(x, y))
def test_sympy__logic__boolalg__Nand():
from sympy.logic.boolalg import Nand
assert _test_args(Nand(x, y, 1))
def test_sympy__logic__boolalg__Nor():
from sympy.logic.boolalg import Nor
assert _test_args(Nor(x, y))
def test_sympy__logic__boolalg__Not():
from sympy.logic.boolalg import Not
assert _test_args(Not(x))
def test_sympy__logic__boolalg__Or():
from sympy.logic.boolalg import Or
assert _test_args(Or(x, y))
def test_sympy__logic__boolalg__Xor():
from sympy.logic.boolalg import Xor
assert _test_args(Xor(x, y, 2))
def test_sympy__logic__boolalg__Xnor():
from sympy.logic.boolalg import Xnor
assert _test_args(Xnor(x, y, 2))
def test_sympy__matrices__matrices__DeferredVector():
from sympy.matrices.matrices import DeferredVector
assert _test_args(DeferredVector("X"))
@SKIP("abstract class")
def test_sympy__matrices__expressions__matexpr__MatrixBase():
pass
def test_sympy__matrices__immutable__ImmutableDenseMatrix():
from sympy.matrices.immutable import ImmutableDenseMatrix
m = ImmutableDenseMatrix([[1, 2], [3, 4]])
assert _test_args(m)
assert _test_args(Basic(*list(m)))
m = ImmutableDenseMatrix(1, 1, [1])
assert _test_args(m)
assert _test_args(Basic(*list(m)))
m = ImmutableDenseMatrix(2, 2, lambda i, j: 1)
assert m[0, 0] is S.One
m = ImmutableDenseMatrix(2, 2, lambda i, j: 1/(1 + i) + 1/(1 + j))
assert m[1, 1] is S.One # true div. will give 1.0 if i,j not sympified
assert _test_args(m)
assert _test_args(Basic(*list(m)))
def test_sympy__matrices__immutable__ImmutableSparseMatrix():
from sympy.matrices.immutable import ImmutableSparseMatrix
m = ImmutableSparseMatrix([[1, 2], [3, 4]])
assert _test_args(m)
assert _test_args(Basic(*list(m)))
m = ImmutableSparseMatrix(1, 1, {(0, 0): 1})
assert _test_args(m)
assert _test_args(Basic(*list(m)))
m = ImmutableSparseMatrix(1, 1, [1])
assert _test_args(m)
assert _test_args(Basic(*list(m)))
m = ImmutableSparseMatrix(2, 2, lambda i, j: 1)
assert m[0, 0] is S.One
m = ImmutableSparseMatrix(2, 2, lambda i, j: 1/(1 + i) + 1/(1 + j))
assert m[1, 1] is S.One # true div. will give 1.0 if i,j not sympified
assert _test_args(m)
assert _test_args(Basic(*list(m)))
def test_sympy__matrices__expressions__slice__MatrixSlice():
from sympy.matrices.expressions.slice import MatrixSlice
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('X', 4, 4)
assert _test_args(MatrixSlice(X, (0, 2), (0, 2)))
def test_sympy__matrices__expressions__blockmatrix__BlockDiagMatrix():
from sympy.matrices.expressions.blockmatrix import BlockDiagMatrix
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('X', x, x)
Y = MatrixSymbol('Y', y, y)
assert _test_args(BlockDiagMatrix(X, Y))
def test_sympy__matrices__expressions__blockmatrix__BlockMatrix():
from sympy.matrices.expressions.blockmatrix import BlockMatrix
from sympy.matrices.expressions import MatrixSymbol, ZeroMatrix
X = MatrixSymbol('X', x, x)
Y = MatrixSymbol('Y', y, y)
Z = MatrixSymbol('Z', x, y)
O = ZeroMatrix(y, x)
assert _test_args(BlockMatrix([[X, Z], [O, Y]]))
def test_sympy__matrices__expressions__inverse__Inverse():
from sympy.matrices.expressions.inverse import Inverse
from sympy.matrices.expressions import MatrixSymbol
assert _test_args(Inverse(MatrixSymbol('A', 3, 3)))
def test_sympy__matrices__expressions__matadd__MatAdd():
from sympy.matrices.expressions.matadd import MatAdd
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('X', x, y)
Y = MatrixSymbol('Y', x, y)
assert _test_args(MatAdd(X, Y))
def test_sympy__matrices__expressions__matexpr__Identity():
from sympy.matrices.expressions.matexpr import Identity
assert _test_args(Identity(3))
@SKIP("abstract class")
def test_sympy__matrices__expressions__matexpr__MatrixExpr():
pass
def test_sympy__matrices__expressions__matexpr__MatrixElement():
from sympy.matrices.expressions.matexpr import MatrixSymbol, MatrixElement
from sympy import S
assert _test_args(MatrixElement(MatrixSymbol('A', 3, 5), S(2), S(3)))
@XFAIL
def test_sympy__matrices__expressions__matexpr__MatrixSymbol():
from sympy.matrices.expressions.matexpr import MatrixSymbol
assert _test_args(MatrixSymbol('A', 3, 5))
def test_sympy__matrices__expressions__matexpr__ZeroMatrix():
from sympy.matrices.expressions.matexpr import ZeroMatrix
assert _test_args(ZeroMatrix(3, 5))
def test_sympy__matrices__expressions__matmul__MatMul():
from sympy.matrices.expressions.matmul import MatMul
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('X', x, y)
Y = MatrixSymbol('Y', y, x)
assert _test_args(MatMul(X, Y))
def test_sympy__matrices__expressions__dotproduct__DotProduct():
from sympy.matrices.expressions.dotproduct import DotProduct
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('X', x, 1)
Y = MatrixSymbol('Y', x, 1)
assert _test_args(DotProduct(X, Y))
def test_sympy__matrices__expressions__diagonal__DiagonalMatrix():
from sympy.matrices.expressions.diagonal import DiagonalMatrix
from sympy.matrices.expressions import MatrixSymbol
x = MatrixSymbol('x', 10, 1)
assert _test_args(DiagonalMatrix(x))
def test_sympy__matrices__expressions__diagonal__DiagonalOf():
from sympy.matrices.expressions.diagonal import DiagonalOf
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('x', 10, 10)
assert _test_args(DiagonalOf(X))
def test_sympy__matrices__expressions__hadamard__HadamardProduct():
from sympy.matrices.expressions.hadamard import HadamardProduct
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('X', x, y)
Y = MatrixSymbol('Y', x, y)
assert _test_args(HadamardProduct(X, Y))
def test_sympy__matrices__expressions__kronecker__KroneckerProduct():
from sympy.matrices.expressions.kronecker import KroneckerProduct
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('X', x, y)
Y = MatrixSymbol('Y', x, y)
assert _test_args(KroneckerProduct(X, Y))
def test_sympy__matrices__expressions__matpow__MatPow():
from sympy.matrices.expressions.matpow import MatPow
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('X', x, x)
assert _test_args(MatPow(X, 2))
def test_sympy__matrices__expressions__transpose__Transpose():
from sympy.matrices.expressions.transpose import Transpose
from sympy.matrices.expressions import MatrixSymbol
assert _test_args(Transpose(MatrixSymbol('A', 3, 5)))
def test_sympy__matrices__expressions__adjoint__Adjoint():
from sympy.matrices.expressions.adjoint import Adjoint
from sympy.matrices.expressions import MatrixSymbol
assert _test_args(Adjoint(MatrixSymbol('A', 3, 5)))
def test_sympy__matrices__expressions__trace__Trace():
from sympy.matrices.expressions.trace import Trace
from sympy.matrices.expressions import MatrixSymbol
assert _test_args(Trace(MatrixSymbol('A', 3, 3)))
def test_sympy__matrices__expressions__determinant__Determinant():
from sympy.matrices.expressions.determinant import Determinant
from sympy.matrices.expressions import MatrixSymbol
assert _test_args(Determinant(MatrixSymbol('A', 3, 3)))
def test_sympy__matrices__expressions__funcmatrix__FunctionMatrix():
from sympy.matrices.expressions.funcmatrix import FunctionMatrix
from sympy import symbols
i, j = symbols('i,j')
assert _test_args(FunctionMatrix(3, 3, Lambda((i, j), i - j) ))
def test_sympy__matrices__expressions__fourier__DFT():
from sympy.matrices.expressions.fourier import DFT
from sympy import S
assert _test_args(DFT(S(2)))
def test_sympy__matrices__expressions__fourier__IDFT():
from sympy.matrices.expressions.fourier import IDFT
from sympy import S
assert _test_args(IDFT(S(2)))
from sympy.matrices.expressions import MatrixSymbol
X = MatrixSymbol('X', 10, 10)
def test_sympy__matrices__expressions__factorizations__LofLU():
from sympy.matrices.expressions.factorizations import LofLU
assert _test_args(LofLU(X))
def test_sympy__matrices__expressions__factorizations__UofLU():
from sympy.matrices.expressions.factorizations import UofLU
assert _test_args(UofLU(X))
def test_sympy__matrices__expressions__factorizations__QofQR():
from sympy.matrices.expressions.factorizations import QofQR
assert _test_args(QofQR(X))
def test_sympy__matrices__expressions__factorizations__RofQR():
from sympy.matrices.expressions.factorizations import RofQR
assert _test_args(RofQR(X))
def test_sympy__matrices__expressions__factorizations__LofCholesky():
from sympy.matrices.expressions.factorizations import LofCholesky
assert _test_args(LofCholesky(X))
def test_sympy__matrices__expressions__factorizations__UofCholesky():
from sympy.matrices.expressions.factorizations import UofCholesky
assert _test_args(UofCholesky(X))
def test_sympy__matrices__expressions__factorizations__EigenVectors():
from sympy.matrices.expressions.factorizations import EigenVectors
assert _test_args(EigenVectors(X))
def test_sympy__matrices__expressions__factorizations__EigenValues():
from sympy.matrices.expressions.factorizations import EigenValues
assert _test_args(EigenValues(X))
def test_sympy__matrices__expressions__factorizations__UofSVD():
from sympy.matrices.expressions.factorizations import UofSVD
assert _test_args(UofSVD(X))
def test_sympy__matrices__expressions__factorizations__VofSVD():
from sympy.matrices.expressions.factorizations import VofSVD
assert _test_args(VofSVD(X))
def test_sympy__matrices__expressions__factorizations__SofSVD():
from sympy.matrices.expressions.factorizations import SofSVD
assert _test_args(SofSVD(X))
@SKIP("abstract class")
def test_sympy__matrices__expressions__factorizations__Factorization():
pass
def test_sympy__physics__vector__frame__CoordinateSym():
from sympy.physics.vector import CoordinateSym
from sympy.physics.vector import ReferenceFrame
assert _test_args(CoordinateSym('R_x', ReferenceFrame('R'), 0))
def test_sympy__physics__paulialgebra__Pauli():
from sympy.physics.paulialgebra import Pauli
assert _test_args(Pauli(1))
def test_sympy__physics__quantum__anticommutator__AntiCommutator():
from sympy.physics.quantum.anticommutator import AntiCommutator
assert _test_args(AntiCommutator(x, y))
def test_sympy__physics__quantum__cartesian__PositionBra3D():
from sympy.physics.quantum.cartesian import PositionBra3D
assert _test_args(PositionBra3D(x, y, z))
def test_sympy__physics__quantum__cartesian__PositionKet3D():
from sympy.physics.quantum.cartesian import PositionKet3D
assert _test_args(PositionKet3D(x, y, z))
def test_sympy__physics__quantum__cartesian__PositionState3D():
from sympy.physics.quantum.cartesian import PositionState3D
assert _test_args(PositionState3D(x, y, z))
def test_sympy__physics__quantum__cartesian__PxBra():
from sympy.physics.quantum.cartesian import PxBra
assert _test_args(PxBra(x, y, z))
def test_sympy__physics__quantum__cartesian__PxKet():
from sympy.physics.quantum.cartesian import PxKet
assert _test_args(PxKet(x, y, z))
def test_sympy__physics__quantum__cartesian__PxOp():
from sympy.physics.quantum.cartesian import PxOp
assert _test_args(PxOp(x, y, z))
def test_sympy__physics__quantum__cartesian__XBra():
from sympy.physics.quantum.cartesian import XBra
assert _test_args(XBra(x))
def test_sympy__physics__quantum__cartesian__XKet():
from sympy.physics.quantum.cartesian import XKet
assert _test_args(XKet(x))
def test_sympy__physics__quantum__cartesian__XOp():
from sympy.physics.quantum.cartesian import XOp
assert _test_args(XOp(x))
def test_sympy__physics__quantum__cartesian__YOp():
from sympy.physics.quantum.cartesian import YOp
assert _test_args(YOp(x))
def test_sympy__physics__quantum__cartesian__ZOp():
from sympy.physics.quantum.cartesian import ZOp
assert _test_args(ZOp(x))
def test_sympy__physics__quantum__cg__CG():
from sympy.physics.quantum.cg import CG
from sympy import S
assert _test_args(CG(S(3)/2, S(3)/2, S(1)/2, -S(1)/2, 1, 1))
def test_sympy__physics__quantum__cg__Wigner3j():
from sympy.physics.quantum.cg import Wigner3j
assert _test_args(Wigner3j(6, 0, 4, 0, 2, 0))
def test_sympy__physics__quantum__cg__Wigner6j():
from sympy.physics.quantum.cg import Wigner6j
assert _test_args(Wigner6j(1, 2, 3, 2, 1, 2))
def test_sympy__physics__quantum__cg__Wigner9j():
from sympy.physics.quantum.cg import Wigner9j
assert _test_args(Wigner9j(2, 1, 1, S(3)/2, S(1)/2, 1, S(1)/2, S(1)/2, 0))
def test_sympy__physics__quantum__circuitplot__Mz():
from sympy.physics.quantum.circuitplot import Mz
assert _test_args(Mz(0))
def test_sympy__physics__quantum__circuitplot__Mx():
from sympy.physics.quantum.circuitplot import Mx
assert _test_args(Mx(0))
def test_sympy__physics__quantum__commutator__Commutator():
from sympy.physics.quantum.commutator import Commutator
A, B = symbols('A,B', commutative=False)
assert _test_args(Commutator(A, B))
def test_sympy__physics__quantum__constants__HBar():
from sympy.physics.quantum.constants import HBar
assert _test_args(HBar())
def test_sympy__physics__quantum__dagger__Dagger():
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.state import Ket
assert _test_args(Dagger(Dagger(Ket('psi'))))
def test_sympy__physics__quantum__gate__CGate():
from sympy.physics.quantum.gate import CGate, Gate
assert _test_args(CGate((0, 1), Gate(2)))
def test_sympy__physics__quantum__gate__CGateS():
from sympy.physics.quantum.gate import CGateS, Gate
assert _test_args(CGateS((0, 1), Gate(2)))
def test_sympy__physics__quantum__gate__CNotGate():
from sympy.physics.quantum.gate import CNotGate
assert _test_args(CNotGate(0, 1))
def test_sympy__physics__quantum__gate__Gate():
from sympy.physics.quantum.gate import Gate
assert _test_args(Gate(0))
def test_sympy__physics__quantum__gate__HadamardGate():
from sympy.physics.quantum.gate import HadamardGate
assert _test_args(HadamardGate(0))
def test_sympy__physics__quantum__gate__IdentityGate():
from sympy.physics.quantum.gate import IdentityGate
assert _test_args(IdentityGate(0))
def test_sympy__physics__quantum__gate__OneQubitGate():
from sympy.physics.quantum.gate import OneQubitGate
assert _test_args(OneQubitGate(0))
def test_sympy__physics__quantum__gate__PhaseGate():
from sympy.physics.quantum.gate import PhaseGate
assert _test_args(PhaseGate(0))
def test_sympy__physics__quantum__gate__SwapGate():
from sympy.physics.quantum.gate import SwapGate
assert _test_args(SwapGate(0, 1))
def test_sympy__physics__quantum__gate__TGate():
from sympy.physics.quantum.gate import TGate
assert _test_args(TGate(0))
def test_sympy__physics__quantum__gate__TwoQubitGate():
from sympy.physics.quantum.gate import TwoQubitGate
assert _test_args(TwoQubitGate(0))
def test_sympy__physics__quantum__gate__UGate():
from sympy.physics.quantum.gate import UGate
from sympy.matrices.immutable import ImmutableDenseMatrix
from sympy import Integer, Tuple
assert _test_args(
UGate(Tuple(Integer(1)), ImmutableDenseMatrix([[1, 0], [0, 2]])))
def test_sympy__physics__quantum__gate__XGate():
from sympy.physics.quantum.gate import XGate
assert _test_args(XGate(0))
def test_sympy__physics__quantum__gate__YGate():
from sympy.physics.quantum.gate import YGate
assert _test_args(YGate(0))
def test_sympy__physics__quantum__gate__ZGate():
from sympy.physics.quantum.gate import ZGate
assert _test_args(ZGate(0))
@SKIP("TODO: sympy.physics")
def test_sympy__physics__quantum__grover__OracleGate():
from sympy.physics.quantum.grover import OracleGate
assert _test_args(OracleGate())
def test_sympy__physics__quantum__grover__WGate():
from sympy.physics.quantum.grover import WGate
assert _test_args(WGate(1))
def test_sympy__physics__quantum__hilbert__ComplexSpace():
from sympy.physics.quantum.hilbert import ComplexSpace
assert _test_args(ComplexSpace(x))
def test_sympy__physics__quantum__hilbert__DirectSumHilbertSpace():
from sympy.physics.quantum.hilbert import DirectSumHilbertSpace, ComplexSpace, FockSpace
c = ComplexSpace(2)
f = FockSpace()
assert _test_args(DirectSumHilbertSpace(c, f))
def test_sympy__physics__quantum__hilbert__FockSpace():
from sympy.physics.quantum.hilbert import FockSpace
assert _test_args(FockSpace())
def test_sympy__physics__quantum__hilbert__HilbertSpace():
from sympy.physics.quantum.hilbert import HilbertSpace
assert _test_args(HilbertSpace())
def test_sympy__physics__quantum__hilbert__L2():
from sympy.physics.quantum.hilbert import L2
from sympy import oo, Interval
assert _test_args(L2(Interval(0, oo)))
def test_sympy__physics__quantum__hilbert__TensorPowerHilbertSpace():
from sympy.physics.quantum.hilbert import TensorPowerHilbertSpace, FockSpace
f = FockSpace()
assert _test_args(TensorPowerHilbertSpace(f, 2))
def test_sympy__physics__quantum__hilbert__TensorProductHilbertSpace():
from sympy.physics.quantum.hilbert import TensorProductHilbertSpace, FockSpace, ComplexSpace
c = ComplexSpace(2)
f = FockSpace()
assert _test_args(TensorProductHilbertSpace(f, c))
def test_sympy__physics__quantum__innerproduct__InnerProduct():
from sympy.physics.quantum import Bra, Ket, InnerProduct
b = Bra('b')
k = Ket('k')
assert _test_args(InnerProduct(b, k))
def test_sympy__physics__quantum__operator__DifferentialOperator():
from sympy.physics.quantum.operator import DifferentialOperator
from sympy import Derivative, Function
f = Function('f')
assert _test_args(DifferentialOperator(1/x*Derivative(f(x), x), f(x)))
def test_sympy__physics__quantum__operator__HermitianOperator():
from sympy.physics.quantum.operator import HermitianOperator
assert _test_args(HermitianOperator('H'))
def test_sympy__physics__quantum__operator__IdentityOperator():
from sympy.physics.quantum.operator import IdentityOperator
assert _test_args(IdentityOperator(5))
def test_sympy__physics__quantum__operator__Operator():
from sympy.physics.quantum.operator import Operator
assert _test_args(Operator('A'))
def test_sympy__physics__quantum__operator__OuterProduct():
from sympy.physics.quantum.operator import OuterProduct
from sympy.physics.quantum import Ket, Bra
b = Bra('b')
k = Ket('k')
assert _test_args(OuterProduct(k, b))
def test_sympy__physics__quantum__operator__UnitaryOperator():
from sympy.physics.quantum.operator import UnitaryOperator
assert _test_args(UnitaryOperator('U'))
def test_sympy__physics__quantum__piab__PIABBra():
from sympy.physics.quantum.piab import PIABBra
assert _test_args(PIABBra('B'))
def test_sympy__physics__quantum__boson__BosonOp():
from sympy.physics.quantum.boson import BosonOp
assert _test_args(BosonOp('a'))
assert _test_args(BosonOp('a', False))
def test_sympy__physics__quantum__boson__BosonFockKet():
from sympy.physics.quantum.boson import BosonFockKet
assert _test_args(BosonFockKet(1))
def test_sympy__physics__quantum__boson__BosonFockBra():
from sympy.physics.quantum.boson import BosonFockBra
assert _test_args(BosonFockBra(1))
def test_sympy__physics__quantum__boson__BosonCoherentKet():
from sympy.physics.quantum.boson import BosonCoherentKet
assert _test_args(BosonCoherentKet(1))
def test_sympy__physics__quantum__boson__BosonCoherentBra():
from sympy.physics.quantum.boson import BosonCoherentBra
assert _test_args(BosonCoherentBra(1))
def test_sympy__physics__quantum__fermion__FermionOp():
from sympy.physics.quantum.fermion import FermionOp
assert _test_args(FermionOp('c'))
assert _test_args(FermionOp('c', False))
def test_sympy__physics__quantum__fermion__FermionFockKet():
from sympy.physics.quantum.fermion import FermionFockKet
assert _test_args(FermionFockKet(1))
def test_sympy__physics__quantum__fermion__FermionFockBra():
from sympy.physics.quantum.fermion import FermionFockBra
assert _test_args(FermionFockBra(1))
def test_sympy__physics__quantum__pauli__SigmaOpBase():
from sympy.physics.quantum.pauli import SigmaOpBase
assert _test_args(SigmaOpBase())
def test_sympy__physics__quantum__pauli__SigmaX():
from sympy.physics.quantum.pauli import SigmaX
assert _test_args(SigmaX())
def test_sympy__physics__quantum__pauli__SigmaY():
from sympy.physics.quantum.pauli import SigmaY
assert _test_args(SigmaY())
def test_sympy__physics__quantum__pauli__SigmaZ():
from sympy.physics.quantum.pauli import SigmaZ
assert _test_args(SigmaZ())
def test_sympy__physics__quantum__pauli__SigmaMinus():
from sympy.physics.quantum.pauli import SigmaMinus
assert _test_args(SigmaMinus())
def test_sympy__physics__quantum__pauli__SigmaPlus():
from sympy.physics.quantum.pauli import SigmaPlus
assert _test_args(SigmaPlus())
def test_sympy__physics__quantum__pauli__SigmaZKet():
from sympy.physics.quantum.pauli import SigmaZKet
assert _test_args(SigmaZKet(0))
def test_sympy__physics__quantum__pauli__SigmaZBra():
from sympy.physics.quantum.pauli import SigmaZBra
assert _test_args(SigmaZBra(0))
def test_sympy__physics__quantum__piab__PIABHamiltonian():
from sympy.physics.quantum.piab import PIABHamiltonian
assert _test_args(PIABHamiltonian('P'))
def test_sympy__physics__quantum__piab__PIABKet():
from sympy.physics.quantum.piab import PIABKet
assert _test_args(PIABKet('K'))
def test_sympy__physics__quantum__qexpr__QExpr():
from sympy.physics.quantum.qexpr import QExpr
assert _test_args(QExpr(0))
def test_sympy__physics__quantum__qft__Fourier():
from sympy.physics.quantum.qft import Fourier
assert _test_args(Fourier(0, 1))
def test_sympy__physics__quantum__qft__IQFT():
from sympy.physics.quantum.qft import IQFT
assert _test_args(IQFT(0, 1))
def test_sympy__physics__quantum__qft__QFT():
from sympy.physics.quantum.qft import QFT
assert _test_args(QFT(0, 1))
def test_sympy__physics__quantum__qft__RkGate():
from sympy.physics.quantum.qft import RkGate
assert _test_args(RkGate(0, 1))
def test_sympy__physics__quantum__qubit__IntQubit():
from sympy.physics.quantum.qubit import IntQubit
assert _test_args(IntQubit(0))
def test_sympy__physics__quantum__qubit__IntQubitBra():
from sympy.physics.quantum.qubit import IntQubitBra
assert _test_args(IntQubitBra(0))
def test_sympy__physics__quantum__qubit__IntQubitState():
from sympy.physics.quantum.qubit import IntQubitState, QubitState
assert _test_args(IntQubitState(QubitState(0, 1)))
def test_sympy__physics__quantum__qubit__Qubit():
from sympy.physics.quantum.qubit import Qubit
assert _test_args(Qubit(0, 0, 0))
def test_sympy__physics__quantum__qubit__QubitBra():
from sympy.physics.quantum.qubit import QubitBra
assert _test_args(QubitBra('1', 0))
def test_sympy__physics__quantum__qubit__QubitState():
from sympy.physics.quantum.qubit import QubitState
assert _test_args(QubitState(0, 1))
def test_sympy__physics__quantum__density__Density():
from sympy.physics.quantum.density import Density
from sympy.physics.quantum.state import Ket
assert _test_args(Density([Ket(0), 0.5], [Ket(1), 0.5]))
@SKIP("TODO: sympy.physics.quantum.shor: Cmod Not Implemented")
def test_sympy__physics__quantum__shor__CMod():
from sympy.physics.quantum.shor import CMod
assert _test_args(CMod())
def test_sympy__physics__quantum__spin__CoupledSpinState():
from sympy.physics.quantum.spin import CoupledSpinState
assert _test_args(CoupledSpinState(1, 0, (1, 1)))
assert _test_args(CoupledSpinState(1, 0, (1, S(1)/2, S(1)/2)))
assert _test_args(CoupledSpinState(
1, 0, (1, S(1)/2, S(1)/2), ((2, 3, S(1)/2), (1, 2, 1)) ))
j, m, j1, j2, j3, j12, x = symbols('j m j1:4 j12 x')
assert CoupledSpinState(
j, m, (j1, j2, j3)).subs(j2, x) == CoupledSpinState(j, m, (j1, x, j3))
assert CoupledSpinState(j, m, (j1, j2, j3), ((1, 3, j12), (1, 2, j)) ).subs(j12, x) == \
CoupledSpinState(j, m, (j1, j2, j3), ((1, 3, x), (1, 2, j)) )
def test_sympy__physics__quantum__spin__J2Op():
from sympy.physics.quantum.spin import J2Op
assert _test_args(J2Op('J'))
def test_sympy__physics__quantum__spin__JminusOp():
from sympy.physics.quantum.spin import JminusOp
assert _test_args(JminusOp('J'))
def test_sympy__physics__quantum__spin__JplusOp():
from sympy.physics.quantum.spin import JplusOp
assert _test_args(JplusOp('J'))
def test_sympy__physics__quantum__spin__JxBra():
from sympy.physics.quantum.spin import JxBra
assert _test_args(JxBra(1, 0))
def test_sympy__physics__quantum__spin__JxBraCoupled():
from sympy.physics.quantum.spin import JxBraCoupled
assert _test_args(JxBraCoupled(1, 0, (1, 1)))
def test_sympy__physics__quantum__spin__JxKet():
from sympy.physics.quantum.spin import JxKet
assert _test_args(JxKet(1, 0))
def test_sympy__physics__quantum__spin__JxKetCoupled():
from sympy.physics.quantum.spin import JxKetCoupled
assert _test_args(JxKetCoupled(1, 0, (1, 1)))
def test_sympy__physics__quantum__spin__JxOp():
from sympy.physics.quantum.spin import JxOp
assert _test_args(JxOp('J'))
def test_sympy__physics__quantum__spin__JyBra():
from sympy.physics.quantum.spin import JyBra
assert _test_args(JyBra(1, 0))
def test_sympy__physics__quantum__spin__JyBraCoupled():
from sympy.physics.quantum.spin import JyBraCoupled
assert _test_args(JyBraCoupled(1, 0, (1, 1)))
def test_sympy__physics__quantum__spin__JyKet():
from sympy.physics.quantum.spin import JyKet
assert _test_args(JyKet(1, 0))
def test_sympy__physics__quantum__spin__JyKetCoupled():
from sympy.physics.quantum.spin import JyKetCoupled
assert _test_args(JyKetCoupled(1, 0, (1, 1)))
def test_sympy__physics__quantum__spin__JyOp():
from sympy.physics.quantum.spin import JyOp
assert _test_args(JyOp('J'))
def test_sympy__physics__quantum__spin__JzBra():
from sympy.physics.quantum.spin import JzBra
assert _test_args(JzBra(1, 0))
def test_sympy__physics__quantum__spin__JzBraCoupled():
from sympy.physics.quantum.spin import JzBraCoupled
assert _test_args(JzBraCoupled(1, 0, (1, 1)))
def test_sympy__physics__quantum__spin__JzKet():
from sympy.physics.quantum.spin import JzKet
assert _test_args(JzKet(1, 0))
def test_sympy__physics__quantum__spin__JzKetCoupled():
from sympy.physics.quantum.spin import JzKetCoupled
assert _test_args(JzKetCoupled(1, 0, (1, 1)))
def test_sympy__physics__quantum__spin__JzOp():
from sympy.physics.quantum.spin import JzOp
assert _test_args(JzOp('J'))
def test_sympy__physics__quantum__spin__Rotation():
from sympy.physics.quantum.spin import Rotation
assert _test_args(Rotation(pi, 0, pi/2))
def test_sympy__physics__quantum__spin__SpinState():
from sympy.physics.quantum.spin import SpinState
assert _test_args(SpinState(1, 0))
def test_sympy__physics__quantum__spin__WignerD():
from sympy.physics.quantum.spin import WignerD
assert _test_args(WignerD(0, 1, 2, 3, 4, 5))
def test_sympy__physics__quantum__state__Bra():
from sympy.physics.quantum.state import Bra
assert _test_args(Bra(0))
def test_sympy__physics__quantum__state__BraBase():
from sympy.physics.quantum.state import BraBase
assert _test_args(BraBase(0))
def test_sympy__physics__quantum__state__Ket():
from sympy.physics.quantum.state import Ket
assert _test_args(Ket(0))
def test_sympy__physics__quantum__state__KetBase():
from sympy.physics.quantum.state import KetBase
assert _test_args(KetBase(0))
def test_sympy__physics__quantum__state__State():
from sympy.physics.quantum.state import State
assert _test_args(State(0))
def test_sympy__physics__quantum__state__StateBase():
from sympy.physics.quantum.state import StateBase
assert _test_args(StateBase(0))
def test_sympy__physics__quantum__state__TimeDepBra():
from sympy.physics.quantum.state import TimeDepBra
assert _test_args(TimeDepBra('psi', 't'))
def test_sympy__physics__quantum__state__TimeDepKet():
from sympy.physics.quantum.state import TimeDepKet
assert _test_args(TimeDepKet('psi', 't'))
def test_sympy__physics__quantum__state__TimeDepState():
from sympy.physics.quantum.state import TimeDepState
assert _test_args(TimeDepState('psi', 't'))
def test_sympy__physics__quantum__state__Wavefunction():
from sympy.physics.quantum.state import Wavefunction
from sympy.functions import sin
from sympy import Piecewise
n = 1
L = 1
g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True))
assert _test_args(Wavefunction(g, x))
def test_sympy__physics__quantum__tensorproduct__TensorProduct():
from sympy.physics.quantum.tensorproduct import TensorProduct
assert _test_args(TensorProduct(x, y))
def test_sympy__physics__quantum__identitysearch__GateIdentity():
from sympy.physics.quantum.gate import X
from sympy.physics.quantum.identitysearch import GateIdentity
assert _test_args(GateIdentity(X(0), X(0)))
def test_sympy__physics__quantum__sho1d__SHOOp():
from sympy.physics.quantum.sho1d import SHOOp
assert _test_args(SHOOp('a'))
def test_sympy__physics__quantum__sho1d__RaisingOp():
from sympy.physics.quantum.sho1d import RaisingOp
assert _test_args(RaisingOp('a'))
def test_sympy__physics__quantum__sho1d__LoweringOp():
from sympy.physics.quantum.sho1d import LoweringOp
assert _test_args(LoweringOp('a'))
def test_sympy__physics__quantum__sho1d__NumberOp():
from sympy.physics.quantum.sho1d import NumberOp
assert _test_args(NumberOp('N'))
def test_sympy__physics__quantum__sho1d__Hamiltonian():
from sympy.physics.quantum.sho1d import Hamiltonian
assert _test_args(Hamiltonian('H'))
def test_sympy__physics__quantum__sho1d__SHOState():
from sympy.physics.quantum.sho1d import SHOState
assert _test_args(SHOState(0))
def test_sympy__physics__quantum__sho1d__SHOKet():
from sympy.physics.quantum.sho1d import SHOKet
assert _test_args(SHOKet(0))
def test_sympy__physics__quantum__sho1d__SHOBra():
from sympy.physics.quantum.sho1d import SHOBra
assert _test_args(SHOBra(0))
def test_sympy__physics__secondquant__AnnihilateBoson():
from sympy.physics.secondquant import AnnihilateBoson
assert _test_args(AnnihilateBoson(0))
def test_sympy__physics__secondquant__AnnihilateFermion():
from sympy.physics.secondquant import AnnihilateFermion
assert _test_args(AnnihilateFermion(0))
@SKIP("abstract class")
def test_sympy__physics__secondquant__Annihilator():
pass
def test_sympy__physics__secondquant__AntiSymmetricTensor():
from sympy.physics.secondquant import AntiSymmetricTensor
i, j = symbols('i j', below_fermi=True)
a, b = symbols('a b', above_fermi=True)
assert _test_args(AntiSymmetricTensor('v', (a, i), (b, j)))
def test_sympy__physics__secondquant__BosonState():
from sympy.physics.secondquant import BosonState
assert _test_args(BosonState((0, 1)))
@SKIP("abstract class")
def test_sympy__physics__secondquant__BosonicOperator():
pass
def test_sympy__physics__secondquant__Commutator():
from sympy.physics.secondquant import Commutator
assert _test_args(Commutator(x, y))
def test_sympy__physics__secondquant__CreateBoson():
from sympy.physics.secondquant import CreateBoson
assert _test_args(CreateBoson(0))
def test_sympy__physics__secondquant__CreateFermion():
from sympy.physics.secondquant import CreateFermion
assert _test_args(CreateFermion(0))
@SKIP("abstract class")
def test_sympy__physics__secondquant__Creator():
pass
def test_sympy__physics__secondquant__Dagger():
from sympy.physics.secondquant import Dagger
from sympy import I
assert _test_args(Dagger(2*I))
def test_sympy__physics__secondquant__FermionState():
from sympy.physics.secondquant import FermionState
assert _test_args(FermionState((0, 1)))
def test_sympy__physics__secondquant__FermionicOperator():
from sympy.physics.secondquant import FermionicOperator
assert _test_args(FermionicOperator(0))
def test_sympy__physics__secondquant__FockState():
from sympy.physics.secondquant import FockState
assert _test_args(FockState((0, 1)))
def test_sympy__physics__secondquant__FockStateBosonBra():
from sympy.physics.secondquant import FockStateBosonBra
assert _test_args(FockStateBosonBra((0, 1)))
def test_sympy__physics__secondquant__FockStateBosonKet():
from sympy.physics.secondquant import FockStateBosonKet
assert _test_args(FockStateBosonKet((0, 1)))
def test_sympy__physics__secondquant__FockStateBra():
from sympy.physics.secondquant import FockStateBra
assert _test_args(FockStateBra((0, 1)))
def test_sympy__physics__secondquant__FockStateFermionBra():
from sympy.physics.secondquant import FockStateFermionBra
assert _test_args(FockStateFermionBra((0, 1)))
def test_sympy__physics__secondquant__FockStateFermionKet():
from sympy.physics.secondquant import FockStateFermionKet
assert _test_args(FockStateFermionKet((0, 1)))
def test_sympy__physics__secondquant__FockStateKet():
from sympy.physics.secondquant import FockStateKet
assert _test_args(FockStateKet((0, 1)))
def test_sympy__physics__secondquant__InnerProduct():
from sympy.physics.secondquant import InnerProduct
from sympy.physics.secondquant import FockStateKet, FockStateBra
assert _test_args(InnerProduct(FockStateBra((0, 1)), FockStateKet((0, 1))))
def test_sympy__physics__secondquant__NO():
from sympy.physics.secondquant import NO, F, Fd
assert _test_args(NO(Fd(x)*F(y)))
def test_sympy__physics__secondquant__PermutationOperator():
from sympy.physics.secondquant import PermutationOperator
assert _test_args(PermutationOperator(0, 1))
def test_sympy__physics__secondquant__SqOperator():
from sympy.physics.secondquant import SqOperator
assert _test_args(SqOperator(0))
def test_sympy__physics__secondquant__TensorSymbol():
from sympy.physics.secondquant import TensorSymbol
assert _test_args(TensorSymbol(x))
def test_sympy__physics__units__dimensions__Dimension():
from sympy.physics.units.dimensions import Dimension
assert _test_args(Dimension("length", "L"))
def test_sympy__physics__units__dimensions__DimensionSystem():
from sympy.physics.units.dimensions import DimensionSystem
from sympy.physics.units.dimensions import length, time, velocity
assert _test_args(DimensionSystem((length, time), (velocity,)))
def test_sympy__physics__units__quantities__Quantity():
from sympy.physics.units.quantities import Quantity
from sympy.physics.units import length
assert _test_args(Quantity("dam"))
def test_sympy__physics__units__prefixes__Prefix():
from sympy.physics.units.prefixes import Prefix
assert _test_args(Prefix('kilo', 'k', 3))
def test_sympy__core__numbers__AlgebraicNumber():
from sympy.core.numbers import AlgebraicNumber
assert _test_args(AlgebraicNumber(sqrt(2), [1, 2, 3]))
def test_sympy__polys__polytools__GroebnerBasis():
from sympy.polys.polytools import GroebnerBasis
assert _test_args(GroebnerBasis([x, y, z], x, y, z))
def test_sympy__polys__polytools__Poly():
from sympy.polys.polytools import Poly
assert _test_args(Poly(2, x, y))
def test_sympy__polys__polytools__PurePoly():
from sympy.polys.polytools import PurePoly
assert _test_args(PurePoly(2, x, y))
@SKIP('abstract class')
def test_sympy__polys__rootoftools__RootOf():
pass
def test_sympy__polys__rootoftools__ComplexRootOf():
from sympy.polys.rootoftools import ComplexRootOf
assert _test_args(ComplexRootOf(x**3 + x + 1, 0))
def test_sympy__polys__rootoftools__RootSum():
from sympy.polys.rootoftools import RootSum
assert _test_args(RootSum(x**3 + x + 1, sin))
def test_sympy__series__limits__Limit():
from sympy.series.limits import Limit
assert _test_args(Limit(x, x, 0, dir='-'))
def test_sympy__series__order__Order():
from sympy.series.order import Order
assert _test_args(Order(1, x, y))
@SKIP('Abstract Class')
def test_sympy__series__sequences__SeqBase():
pass
def test_sympy__series__sequences__EmptySequence():
from sympy.series.sequences import EmptySequence
assert _test_args(EmptySequence())
@SKIP('Abstract Class')
def test_sympy__series__sequences__SeqExpr():
pass
def test_sympy__series__sequences__SeqPer():
from sympy.series.sequences import SeqPer
assert _test_args(SeqPer((1, 2, 3), (0, 10)))
def test_sympy__series__sequences__SeqFormula():
from sympy.series.sequences import SeqFormula
assert _test_args(SeqFormula(x**2, (0, 10)))
def test_sympy__series__sequences__SeqExprOp():
from sympy.series.sequences import SeqExprOp, sequence
s1 = sequence((1, 2, 3))
s2 = sequence(x**2)
assert _test_args(SeqExprOp(s1, s2))
def test_sympy__series__sequences__SeqAdd():
from sympy.series.sequences import SeqAdd, sequence
s1 = sequence((1, 2, 3))
s2 = sequence(x**2)
assert _test_args(SeqAdd(s1, s2))
def test_sympy__series__sequences__SeqMul():
from sympy.series.sequences import SeqMul, sequence
s1 = sequence((1, 2, 3))
s2 = sequence(x**2)
assert _test_args(SeqMul(s1, s2))
@SKIP('Abstract Class')
def test_sympy__series__series_class__SeriesBase():
pass
def test_sympy__series__fourier__FourierSeries():
from sympy.series.fourier import fourier_series
assert _test_args(fourier_series(x, (x, -pi, pi)))
def test_sympy__series__formal__FormalPowerSeries():
from sympy.series.formal import fps
assert _test_args(fps(log(1 + x), x))
def test_sympy__simplify__hyperexpand__Hyper_Function():
from sympy.simplify.hyperexpand import Hyper_Function
assert _test_args(Hyper_Function([2], [1]))
def test_sympy__simplify__hyperexpand__G_Function():
from sympy.simplify.hyperexpand import G_Function
assert _test_args(G_Function([2], [1], [], []))
@SKIP("abstract class")
def test_sympy__tensor__array__ndim_array__ImmutableNDimArray():
pass
def test_sympy__tensor__array__dense_ndim_array__ImmutableDenseNDimArray():
from sympy.tensor.array.dense_ndim_array import ImmutableDenseNDimArray
densarr = ImmutableDenseNDimArray(range(10, 34), (2, 3, 4))
assert _test_args(densarr)
def test_sympy__tensor__array__sparse_ndim_array__ImmutableSparseNDimArray():
from sympy.tensor.array.sparse_ndim_array import ImmutableSparseNDimArray
sparr = ImmutableSparseNDimArray(range(10, 34), (2, 3, 4))
assert _test_args(sparr)
def test_sympy__tensor__functions__TensorProduct():
from sympy.tensor.functions import TensorProduct
tp = TensorProduct(3, 4, evaluate=False)
assert _test_args(tp)
def test_sympy__tensor__indexed__Idx():
from sympy.tensor.indexed import Idx
assert _test_args(Idx('test'))
assert _test_args(Idx(1, (0, 10)))
def test_sympy__tensor__indexed__Indexed():
from sympy.tensor.indexed import Indexed, Idx
assert _test_args(Indexed('A', Idx('i'), Idx('j')))
def test_sympy__tensor__indexed__IndexedBase():
from sympy.tensor.indexed import IndexedBase
assert _test_args(IndexedBase('A', shape=(x, y)))
assert _test_args(IndexedBase('A', 1))
assert _test_args(IndexedBase('A')[0, 1])
def test_sympy__tensor__tensor__TensorIndexType():
from sympy.tensor.tensor import TensorIndexType
assert _test_args(TensorIndexType('Lorentz', metric=False))
def test_sympy__tensor__tensor__TensorSymmetry():
from sympy.tensor.tensor import TensorSymmetry, get_symmetric_group_sgs
assert _test_args(TensorSymmetry(get_symmetric_group_sgs(2)))
def test_sympy__tensor__tensor__TensorType():
from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, get_symmetric_group_sgs, TensorType
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
sym = TensorSymmetry(get_symmetric_group_sgs(1))
assert _test_args(TensorType([Lorentz], sym))
def test_sympy__tensor__tensor__TensorHead():
from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, TensorHead
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
sym = TensorSymmetry(get_symmetric_group_sgs(1))
S1 = TensorType([Lorentz], sym)
assert _test_args(TensorHead('p', S1, 0))
def test_sympy__tensor__tensor__TensorIndex():
from sympy.tensor.tensor import TensorIndexType, TensorIndex
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
assert _test_args(TensorIndex('i', Lorentz))
@SKIP("abstract class")
def test_sympy__tensor__tensor__TensExpr():
pass
def test_sympy__tensor__tensor__TensAdd():
from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, tensor_indices, TensAdd
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
a, b = tensor_indices('a,b', Lorentz)
sym = TensorSymmetry(get_symmetric_group_sgs(1))
S1 = TensorType([Lorentz], sym)
p, q = S1('p,q')
t1 = p(a)
t2 = q(a)
assert _test_args(TensAdd(t1, t2))
def test_sympy__tensor__tensor__Tensor():
from sympy.core import S
from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, tensor_indices, TensMul, TIDS
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
a, b = tensor_indices('a,b', Lorentz)
sym = TensorSymmetry(get_symmetric_group_sgs(1))
S1 = TensorType([Lorentz], sym)
p = S1('p')
assert _test_args(p(a))
def test_sympy__tensor__tensor__TensMul():
from sympy.core import S
from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, tensor_indices, TensMul, TIDS
Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
a, b = tensor_indices('a,b', Lorentz)
sym = TensorSymmetry(get_symmetric_group_sgs(1))
S1 = TensorType([Lorentz], sym)
p = S1('p')
q = S1('q')
assert _test_args(3*p(a)*q(b))
def test_as_coeff_add():
assert (7, (3*x, 4*x**2)) == (7 + 3*x + 4*x**2).as_coeff_add()
def test_sympy__geometry__curve__Curve():
from sympy.geometry.curve import Curve
assert _test_args(Curve((x, 1), (x, 0, 1)))
def test_sympy__geometry__point__Point():
from sympy.geometry.point import Point
assert _test_args(Point(0, 1))
def test_sympy__geometry__point__Point2D():
from sympy.geometry.point import Point2D
assert _test_args(Point2D(0, 1))
def test_sympy__geometry__point__Point3D():
from sympy.geometry.point import Point3D
assert _test_args(Point3D(0, 1, 2))
def test_sympy__geometry__ellipse__Ellipse():
from sympy.geometry.ellipse import Ellipse
assert _test_args(Ellipse((0, 1), 2, 3))
def test_sympy__geometry__ellipse__Circle():
from sympy.geometry.ellipse import Circle
assert _test_args(Circle((0, 1), 2))
def test_sympy__geometry__parabola__Parabola():
from sympy.geometry.parabola import Parabola
from sympy.geometry.line import Line
assert _test_args(Parabola((0, 0), Line((2, 3), (4, 3))))
@SKIP("abstract class")
def test_sympy__geometry__line__LinearEntity():
pass
def test_sympy__geometry__line__Line():
from sympy.geometry.line import Line
assert _test_args(Line((0, 1), (2, 3)))
def test_sympy__geometry__line__Ray():
from sympy.geometry.line import Ray
assert _test_args(Ray((0, 1), (2, 3)))
def test_sympy__geometry__line__Segment():
from sympy.geometry.line import Segment
assert _test_args(Segment((0, 1), (2, 3)))
@SKIP("abstract class")
def test_sympy__geometry__line__LinearEntity2D():
pass
def test_sympy__geometry__line__Line2D():
from sympy.geometry.line import Line2D
assert _test_args(Line2D((0, 1), (2, 3)))
def test_sympy__geometry__line__Ray2D():
from sympy.geometry.line import Ray2D
assert _test_args(Ray2D((0, 1), (2, 3)))
def test_sympy__geometry__line__Segment2D():
from sympy.geometry.line import Segment2D
assert _test_args(Segment2D((0, 1), (2, 3)))
@SKIP("abstract class")
def test_sympy__geometry__line__LinearEntity3D():
pass
def test_sympy__geometry__line__Line3D():
from sympy.geometry.line import Line3D
assert _test_args(Line3D((0, 1, 1), (2, 3, 4)))
def test_sympy__geometry__line__Segment3D():
from sympy.geometry.line import Segment3D
assert _test_args(Segment3D((0, 1, 1), (2, 3, 4)))
def test_sympy__geometry__line__Ray3D():
from sympy.geometry.line import Ray3D
assert _test_args(Ray3D((0, 1, 1), (2, 3, 4)))
def test_sympy__geometry__plane__Plane():
from sympy.geometry.plane import Plane
assert _test_args(Plane((1, 1, 1), (-3, 4, -2), (1, 2, 3)))
def test_sympy__geometry__polygon__Polygon():
from sympy.geometry.polygon import Polygon
assert _test_args(Polygon((0, 1), (2, 3), (4, 5), (6, 7)))
def test_sympy__geometry__polygon__RegularPolygon():
from sympy.geometry.polygon import RegularPolygon
assert _test_args(RegularPolygon((0, 1), 2, 3, 4))
def test_sympy__geometry__polygon__Triangle():
from sympy.geometry.polygon import Triangle
assert _test_args(Triangle((0, 1), (2, 3), (4, 5)))
def test_sympy__geometry__entity__GeometryEntity():
from sympy.geometry.entity import GeometryEntity
from sympy.geometry.point import Point
assert _test_args(GeometryEntity(Point(1, 0), 1, [1, 2]))
@SKIP("abstract class")
def test_sympy__geometry__entity__GeometrySet():
pass
def test_sympy__diffgeom__diffgeom__Manifold():
from sympy.diffgeom import Manifold
assert _test_args(Manifold('name', 3))
def test_sympy__diffgeom__diffgeom__Patch():
from sympy.diffgeom import Manifold, Patch
assert _test_args(Patch('name', Manifold('name', 3)))
def test_sympy__diffgeom__diffgeom__CoordSystem():
from sympy.diffgeom import Manifold, Patch, CoordSystem
assert _test_args(CoordSystem('name', Patch('name', Manifold('name', 3))))
@XFAIL
def test_sympy__diffgeom__diffgeom__Point():
from sympy.diffgeom import Manifold, Patch, CoordSystem, Point
assert _test_args(Point(
CoordSystem('name', Patch('name', Manifold('name', 3))), [x, y]))
def test_sympy__diffgeom__diffgeom__BaseScalarField():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField
cs = CoordSystem('name', Patch('name', Manifold('name', 3)))
assert _test_args(BaseScalarField(cs, 0))
def test_sympy__diffgeom__diffgeom__BaseVectorField():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseVectorField
cs = CoordSystem('name', Patch('name', Manifold('name', 3)))
assert _test_args(BaseVectorField(cs, 0))
def test_sympy__diffgeom__diffgeom__Differential():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential
cs = CoordSystem('name', Patch('name', Manifold('name', 3)))
assert _test_args(Differential(BaseScalarField(cs, 0)))
def test_sympy__diffgeom__diffgeom__Commutator():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseVectorField, Commutator
cs = CoordSystem('name', Patch('name', Manifold('name', 3)))
cs1 = CoordSystem('name1', Patch('name', Manifold('name', 3)))
v = BaseVectorField(cs, 0)
v1 = BaseVectorField(cs1, 0)
assert _test_args(Commutator(v, v1))
def test_sympy__diffgeom__diffgeom__TensorProduct():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential, TensorProduct
cs = CoordSystem('name', Patch('name', Manifold('name', 3)))
d = Differential(BaseScalarField(cs, 0))
assert _test_args(TensorProduct(d, d))
def test_sympy__diffgeom__diffgeom__WedgeProduct():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential, WedgeProduct
cs = CoordSystem('name', Patch('name', Manifold('name', 3)))
d = Differential(BaseScalarField(cs, 0))
d1 = Differential(BaseScalarField(cs, 1))
assert _test_args(WedgeProduct(d, d1))
def test_sympy__diffgeom__diffgeom__LieDerivative():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential, BaseVectorField, LieDerivative
cs = CoordSystem('name', Patch('name', Manifold('name', 3)))
d = Differential(BaseScalarField(cs, 0))
v = BaseVectorField(cs, 0)
assert _test_args(LieDerivative(v, d))
@XFAIL
def test_sympy__diffgeom__diffgeom__BaseCovarDerivativeOp():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseCovarDerivativeOp
cs = CoordSystem('name', Patch('name', Manifold('name', 3)))
assert _test_args(BaseCovarDerivativeOp(cs, 0, [[[0, ]*3, ]*3, ]*3))
def test_sympy__diffgeom__diffgeom__CovarDerivativeOp():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseVectorField, CovarDerivativeOp
cs = CoordSystem('name', Patch('name', Manifold('name', 3)))
v = BaseVectorField(cs, 0)
_test_args(CovarDerivativeOp(v, [[[0, ]*3, ]*3, ]*3))
def test_sympy__categories__baseclasses__Class():
from sympy.categories.baseclasses import Class
assert _test_args(Class())
def test_sympy__categories__baseclasses__Object():
from sympy.categories import Object
assert _test_args(Object("A"))
@XFAIL
def test_sympy__categories__baseclasses__Morphism():
from sympy.categories import Object, Morphism
assert _test_args(Morphism(Object("A"), Object("B")))
def test_sympy__categories__baseclasses__IdentityMorphism():
from sympy.categories import Object, IdentityMorphism
assert _test_args(IdentityMorphism(Object("A")))
def test_sympy__categories__baseclasses__NamedMorphism():
from sympy.categories import Object, NamedMorphism
assert _test_args(NamedMorphism(Object("A"), Object("B"), "f"))
def test_sympy__categories__baseclasses__CompositeMorphism():
from sympy.categories import Object, NamedMorphism, CompositeMorphism
A = Object("A")
B = Object("B")
C = Object("C")
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, C, "g")
assert _test_args(CompositeMorphism(f, g))
def test_sympy__categories__baseclasses__Diagram():
from sympy.categories import Object, NamedMorphism, Diagram
A = Object("A")
B = Object("B")
C = Object("C")
f = NamedMorphism(A, B, "f")
d = Diagram([f])
assert _test_args(d)
def test_sympy__categories__baseclasses__Category():
from sympy.categories import Object, NamedMorphism, Diagram, Category
A = Object("A")
B = Object("B")
C = Object("C")
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, C, "g")
d1 = Diagram([f, g])
d2 = Diagram([f])
K = Category("K", commutative_diagrams=[d1, d2])
assert _test_args(K)
def test_sympy__ntheory__factor___totient():
from sympy.ntheory.factor_ import totient
k = symbols('k', integer=True)
t = totient(k)
assert _test_args(t)
def test_sympy__ntheory__factor___reduced_totient():
from sympy.ntheory.factor_ import reduced_totient
k = symbols('k', integer=True)
t = reduced_totient(k)
assert _test_args(t)
def test_sympy__ntheory__factor___divisor_sigma():
from sympy.ntheory.factor_ import divisor_sigma
k = symbols('k', integer=True)
n = symbols('n', integer=True)
t = divisor_sigma(n, k)
assert _test_args(t)
def test_sympy__ntheory__factor___udivisor_sigma():
from sympy.ntheory.factor_ import udivisor_sigma
k = symbols('k', integer=True)
n = symbols('n', integer=True)
t = udivisor_sigma(n, k)
assert _test_args(t)
def test_sympy__ntheory__factor___primenu():
from sympy.ntheory.factor_ import primenu
n = symbols('n', integer=True)
t = primenu(n)
assert _test_args(t)
def test_sympy__ntheory__factor___primeomega():
from sympy.ntheory.factor_ import primeomega
n = symbols('n', integer=True)
t = primeomega(n)
assert _test_args(t)
def test_sympy__ntheory__residue_ntheory__mobius():
from sympy.ntheory import mobius
assert _test_args(mobius(2))
def test_sympy__physics__optics__waves__TWave():
from sympy.physics.optics import TWave
A, f, phi = symbols('A, f, phi')
assert _test_args(TWave(A, f, phi))
def test_sympy__physics__optics__gaussopt__BeamParameter():
from sympy.physics.optics import BeamParameter
assert _test_args(BeamParameter(530e-9, 1, w=1e-3))
def test_sympy__physics__optics__medium__Medium():
from sympy.physics.optics import Medium
assert _test_args(Medium('m'))
def test_sympy__codegen__ast__Assignment():
from sympy.codegen.ast import Assignment
assert _test_args(Assignment(x, y))
def test_sympy__codegen__cfunctions__expm1():
from sympy.codegen.cfunctions import expm1
assert _test_args(expm1(x))
def test_sympy__codegen__cfunctions__log1p():
from sympy.codegen.cfunctions import log1p
assert _test_args(log1p(x))
def test_sympy__codegen__cfunctions__exp2():
from sympy.codegen.cfunctions import exp2
assert _test_args(exp2(x))
def test_sympy__codegen__cfunctions__log2():
from sympy.codegen.cfunctions import log2
assert _test_args(log2(x))
def test_sympy__codegen__cfunctions__fma():
from sympy.codegen.cfunctions import fma
assert _test_args(fma(x, y, z))
def test_sympy__codegen__cfunctions__log10():
from sympy.codegen.cfunctions import log10
assert _test_args(log10(x))
def test_sympy__codegen__cfunctions__Sqrt():
from sympy.codegen.cfunctions import Sqrt
assert _test_args(Sqrt(x))
def test_sympy__codegen__cfunctions__Cbrt():
from sympy.codegen.cfunctions import Cbrt
assert _test_args(Cbrt(x))
def test_sympy__codegen__cfunctions__hypot():
from sympy.codegen.cfunctions import hypot
assert _test_args(hypot(x, y))
def test_sympy__codegen__ffunctions__FFunction():
from sympy.codegen.ffunctions import FFunction
assert _test_args(FFunction('f'))
def test_sympy__codegen__ffunctions__F95Function():
from sympy.codegen.ffunctions import F95Function
assert _test_args(F95Function('f'))
def test_sympy__codegen__ffunctions__isign():
from sympy.codegen.ffunctions import isign
assert _test_args(isign(1, x))
def test_sympy__codegen__ffunctions__dsign():
from sympy.codegen.ffunctions import dsign
assert _test_args(dsign(1, x))
def test_sympy__codegen__ffunctions__cmplx():
from sympy.codegen.ffunctions import cmplx
assert _test_args(cmplx(x, y))
def test_sympy__codegen__ffunctions__kind():
from sympy.codegen.ffunctions import kind
assert _test_args(kind(x))
def test_sympy__codegen__ffunctions__merge():
from sympy.codegen.ffunctions import merge
assert _test_args(merge(1, 2, Eq(x, 0)))
def test_sympy__codegen__ffunctions___literal():
from sympy.codegen.ffunctions import _literal
assert _test_args(_literal(1))
def test_sympy__codegen__ffunctions__literal_sp():
from sympy.codegen.ffunctions import literal_sp
assert _test_args(literal_sp(1))
def test_sympy__codegen__ffunctions__literal_dp():
from sympy.codegen.ffunctions import literal_dp
assert _test_args(literal_dp(1))
def test_sympy__vector__coordsysrect__CoordSys3D():
from sympy.vector.coordsysrect import CoordSys3D
assert _test_args(CoordSys3D('C'))
def test_sympy__vector__point__Point():
from sympy.vector.point import Point
assert _test_args(Point('P'))
def test_sympy__vector__basisdependent__BasisDependent():
from sympy.vector.basisdependent import BasisDependent
#These classes have been created to maintain an OOP hierarchy
#for Vectors and Dyadics. Are NOT meant to be initialized
def test_sympy__vector__basisdependent__BasisDependentMul():
from sympy.vector.basisdependent import BasisDependentMul
#These classes have been created to maintain an OOP hierarchy
#for Vectors and Dyadics. Are NOT meant to be initialized
def test_sympy__vector__basisdependent__BasisDependentAdd():
from sympy.vector.basisdependent import BasisDependentAdd
#These classes have been created to maintain an OOP hierarchy
#for Vectors and Dyadics. Are NOT meant to be initialized
def test_sympy__vector__basisdependent__BasisDependentZero():
from sympy.vector.basisdependent import BasisDependentZero
#These classes have been created to maintain an OOP hierarchy
#for Vectors and Dyadics. Are NOT meant to be initialized
def test_sympy__vector__vector__BaseVector():
from sympy.vector.vector import BaseVector
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
assert _test_args(BaseVector(0, C, ' ', ' '))
def test_sympy__vector__vector__VectorAdd():
from sympy.vector.vector import VectorAdd, VectorMul
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
from sympy.abc import a, b, c, x, y, z
v1 = a*C.i + b*C.j + c*C.k
v2 = x*C.i + y*C.j + z*C.k
assert _test_args(VectorAdd(v1, v2))
assert _test_args(VectorMul(x, v1))
def test_sympy__vector__vector__VectorMul():
from sympy.vector.vector import VectorMul
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
from sympy.abc import a
assert _test_args(VectorMul(a, C.i))
def test_sympy__vector__vector__VectorZero():
from sympy.vector.vector import VectorZero
assert _test_args(VectorZero())
def test_sympy__vector__vector__Vector():
from sympy.vector.vector import Vector
#Vector is never to be initialized using args
pass
def test_sympy__vector__vector__Cross():
from sympy.vector.vector import Cross
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
_test_args(Cross(C.i, C.j))
def test_sympy__vector__vector__Dot():
from sympy.vector.vector import Dot
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
_test_args(Dot(C.i, C.j))
def test_sympy__vector__dyadic__Dyadic():
from sympy.vector.dyadic import Dyadic
#Dyadic is never to be initialized using args
pass
def test_sympy__vector__dyadic__BaseDyadic():
from sympy.vector.dyadic import BaseDyadic
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
assert _test_args(BaseDyadic(C.i, C.j))
def test_sympy__vector__dyadic__DyadicMul():
from sympy.vector.dyadic import BaseDyadic, DyadicMul
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
assert _test_args(DyadicMul(3, BaseDyadic(C.i, C.j)))
def test_sympy__vector__dyadic__DyadicAdd():
from sympy.vector.dyadic import BaseDyadic, DyadicAdd
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
assert _test_args(2 * DyadicAdd(BaseDyadic(C.i, C.i),
BaseDyadic(C.i, C.j)))
def test_sympy__vector__dyadic__DyadicZero():
from sympy.vector.dyadic import DyadicZero
assert _test_args(DyadicZero())
def test_sympy__vector__deloperator__Del():
from sympy.vector.deloperator import Del
assert _test_args(Del())
def test_sympy__vector__operators__Curl():
from sympy.vector.operators import Curl
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
assert _test_args(Curl(C.i))
def test_sympy__vector__operators__Divergence():
from sympy.vector.operators import Divergence
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
assert _test_args(Divergence(C.i))
def test_sympy__vector__operators__Gradient():
from sympy.vector.operators import Gradient
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
assert _test_args(Gradient(C.x))
def test_sympy__vector__orienters__Orienter():
from sympy.vector.orienters import Orienter
#Not to be initialized
def test_sympy__vector__orienters__ThreeAngleOrienter():
from sympy.vector.orienters import ThreeAngleOrienter
#Not to be initialized
def test_sympy__vector__orienters__AxisOrienter():
from sympy.vector.orienters import AxisOrienter
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
assert _test_args(AxisOrienter(x, C.i))
def test_sympy__vector__orienters__BodyOrienter():
from sympy.vector.orienters import BodyOrienter
assert _test_args(BodyOrienter(x, y, z, '123'))
def test_sympy__vector__orienters__SpaceOrienter():
from sympy.vector.orienters import SpaceOrienter
assert _test_args(SpaceOrienter(x, y, z, '123'))
def test_sympy__vector__orienters__QuaternionOrienter():
from sympy.vector.orienters import QuaternionOrienter
a, b, c, d = symbols('a b c d')
assert _test_args(QuaternionOrienter(a, b, c, d))
def test_sympy__vector__scalar__BaseScalar():
from sympy.vector.scalar import BaseScalar
from sympy.vector.coordsysrect import CoordSys3D
C = CoordSys3D('C')
assert _test_args(BaseScalar(0, C, ' ', ' '))
def test_sympy__physics__wigner__Wigner3j():
from sympy.physics.wigner import Wigner3j
assert _test_args(Wigner3j(0, 0, 0, 0, 0, 0))
def test_sympy__integrals__rubi__symbol__matchpyWC():
from sympy.integrals.rubi.symbol import matchpyWC
assert _test_args(matchpyWC(1, True, 'a'))
| true | true |
f7f783469204d3a428dabfa5cbc74d95812a09b0 | 693 | py | Python | organify/groups/migrations/0003_auto_20201205_2108.py | xmedinavei/organify | 2b819f058e8f169cdad5324218d54009d2b937d0 | [
"MIT"
] | null | null | null | organify/groups/migrations/0003_auto_20201205_2108.py | xmedinavei/organify | 2b819f058e8f169cdad5324218d54009d2b937d0 | [
"MIT"
] | null | null | null | organify/groups/migrations/0003_auto_20201205_2108.py | xmedinavei/organify | 2b819f058e8f169cdad5324218d54009d2b937d0 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2020-12-05 21:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('groups', '0002_auto_20201204_2134'),
]
operations = [
migrations.AlterModelOptions(
name='group',
options={'get_latest_by': 'created', 'ordering': ['-created', '-modified']},
),
migrations.AlterModelOptions(
name='membership',
options={'get_latest_by': 'joined', 'ordering': ['-joined']},
),
migrations.AddField(
model_name='group',
name='pic',
field=models.TextField(null=True),
),
]
| 25.666667 | 88 | 0.558442 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('groups', '0002_auto_20201204_2134'),
]
operations = [
migrations.AlterModelOptions(
name='group',
options={'get_latest_by': 'created', 'ordering': ['-created', '-modified']},
),
migrations.AlterModelOptions(
name='membership',
options={'get_latest_by': 'joined', 'ordering': ['-joined']},
),
migrations.AddField(
model_name='group',
name='pic',
field=models.TextField(null=True),
),
]
| true | true |
f7f784fb516e288d01b70a339fb5a0e0052c4534 | 4,472 | py | Python | stock_chart_canvas.py | bshafi/StockMomentum | c2949ebe2fc1b5b370bece1e4d16c057fd7dc5bc | [
"MIT"
] | null | null | null | stock_chart_canvas.py | bshafi/StockMomentum | c2949ebe2fc1b5b370bece1e4d16c057fd7dc5bc | [
"MIT"
] | null | null | null | stock_chart_canvas.py | bshafi/StockMomentum | c2949ebe2fc1b5b370bece1e4d16c057fd7dc5bc | [
"MIT"
] | null | null | null | from typing import Iterable, Tuple
from ipycanvas import Canvas
from datetime import date, datetime, time, timedelta, timezone
from ipycanvas.canvas import hold_canvas
from ipycanvas import Canvas
from threading import Event, Thread
from traitlets.traitlets import Enum
def draw_ticker(ticker, canvas):
pass
class TimeFrame(Enum):
MINUTES_5 = 5 * 60 * 1000
HOURS = 60 * 60 * 1000
DAYS = 24 * HOURS
MONTHS = 30 * DAYS
def choose_time_frame(delta_time_ms: float) -> TimeFrame:
time_frames = [TimeFrame.MS, TimeFrame.SECONDS, TimeFrame.MINUTES, TimeFrame.HOURS, TimeFrame.DAYS, TimeFrame.WEEK, TimeFrame.MONTH]
time_frames_gt_10 = filter(lambda x: delta_time_ms / x > 10, time_frames)
return max(time_frames_gt_10)
def consolidate(stocks: Iterable[Tuple[datetime, float, float, float, float]]) -> Tuple[datetime, float, float, float, float]:
first_date = None
first_open = None
last_close = None
min_low = None
max_high = None
for (timestamp, open, high, low, close) in stocks:
if first_date == None:
first_date = timestamp
if first_open == None:
first_open = open
last_close = close
if min_low == None or min_low > low:
min_low = low
if max_high == None or max_high < high:
max_high = high
return (first_date, first_open, max_high, min_low, last_close)
def draw_stocks(data: Iterable[Tuple[datetime, float, float, float, float]], canvas: Canvas, min_x: datetime, min_y: float, max_y: float):
CANDLE_STICK_WIDTH_PX = 20
max_x = min_x + timedelta(minutes=5) * (canvas.width / CANDLE_STICK_WIDTH_PX)
for (timestamp, open, high, low, close) in data:
if min_x > timestamp or timestamp > max_x:
continue
time_range_ms = (max_x.timestamp() - min_x.timestamp())
time_off_of_cur = (timestamp.timestamp() - min_x.timestamp())
x1 = (time_off_of_cur / time_range_ms) * canvas.width
# TODO: Update this later
# \/ Assumes it a 5min chart
x2 = ((time_off_of_cur + 5 * 60) / time_range_ms) * canvas.width
width = x2 - x1
y_low = canvas.height - ((low - min_y) / (max_y - min_y)) * canvas.height
y_high = canvas.height - ((high - min_y) / (max_y - min_y)) * canvas.height
y_open = canvas.height - ((open - min_y) / (max_y - min_y)) * canvas.height
y_close = canvas.height - ((close - min_y) / (max_y - min_y)) * canvas.height
canvas.fill_style = 'green';
canvas.stroke_style = 'green'
height = abs(y_close - y_open)
top = y_close
if open > close:
canvas.fill_style = 'red'
canvas.stroke_style = 'red'
canvas.stroke_line((x1 + x2) / 2, y_high, (x1 + x2) / 2, y_low)
canvas.fill_rect(x1 + width / 10, top, width - (width / 5), height)
class StockChartCanvas:
def __init__(self, canvas: Canvas, data):
self.data = data
self.canvas = canvas
self.mouse_down = False
self.x_offset = data[0][0]
self.y_offset = 0
self.prev_pos = (0, 0)
self.canvas.on_client_ready(lambda: self.redraw)
self.canvas.on_mouse_down(lambda x, y: self._mouse_down(x, y))
self.canvas.on_mouse_up(lambda x, y: self._mouse_up(x, y))
self.canvas.on_mouse_move(lambda x, y: self._mouse_move(x, y))
self.canvas.on_mouse_out(lambda x, y: self._mouse_out(x, y))
self.stopped = Event()
self.event_loop = Thread(target=lambda: self._update())
pass
def start(self):
self.event_loop.start()
def stop(self):
self.stopped.set()
self.event_loop.join()
def _update(self):
while not self.stopped.wait(1/60):
self.redraw()
def redraw(self):
with hold_canvas(self.canvas):
self.canvas.clear()
draw_stocks(self.data, self.canvas, self.x_offset, self.y_offset + 135, self.y_offset + 140)
def _mouse_down(self, x, y):
self.mouse_down = True
def _mouse_up(self, x, y):
self.mouse_down = False
def _mouse_out(self, x, y):
self.mouse_down = False
def _mouse_move(self, x, y):
if self.mouse_down:
self.x_offset = self.x_offset + timedelta(minutes=(x - self.prev_pos[0]))
self.y_offset = self.y_offset + (y - self.prev_pos[1]) / 100
self.prev_pos = (x, y)
| 36.357724 | 138 | 0.624329 | from typing import Iterable, Tuple
from ipycanvas import Canvas
from datetime import date, datetime, time, timedelta, timezone
from ipycanvas.canvas import hold_canvas
from ipycanvas import Canvas
from threading import Event, Thread
from traitlets.traitlets import Enum
def draw_ticker(ticker, canvas):
pass
class TimeFrame(Enum):
MINUTES_5 = 5 * 60 * 1000
HOURS = 60 * 60 * 1000
DAYS = 24 * HOURS
MONTHS = 30 * DAYS
def choose_time_frame(delta_time_ms: float) -> TimeFrame:
time_frames = [TimeFrame.MS, TimeFrame.SECONDS, TimeFrame.MINUTES, TimeFrame.HOURS, TimeFrame.DAYS, TimeFrame.WEEK, TimeFrame.MONTH]
time_frames_gt_10 = filter(lambda x: delta_time_ms / x > 10, time_frames)
return max(time_frames_gt_10)
def consolidate(stocks: Iterable[Tuple[datetime, float, float, float, float]]) -> Tuple[datetime, float, float, float, float]:
first_date = None
first_open = None
last_close = None
min_low = None
max_high = None
for (timestamp, open, high, low, close) in stocks:
if first_date == None:
first_date = timestamp
if first_open == None:
first_open = open
last_close = close
if min_low == None or min_low > low:
min_low = low
if max_high == None or max_high < high:
max_high = high
return (first_date, first_open, max_high, min_low, last_close)
def draw_stocks(data: Iterable[Tuple[datetime, float, float, float, float]], canvas: Canvas, min_x: datetime, min_y: float, max_y: float):
CANDLE_STICK_WIDTH_PX = 20
max_x = min_x + timedelta(minutes=5) * (canvas.width / CANDLE_STICK_WIDTH_PX)
for (timestamp, open, high, low, close) in data:
if min_x > timestamp or timestamp > max_x:
continue
time_range_ms = (max_x.timestamp() - min_x.timestamp())
time_off_of_cur = (timestamp.timestamp() - min_x.timestamp())
x1 = (time_off_of_cur / time_range_ms) * canvas.width
x2 = ((time_off_of_cur + 5 * 60) / time_range_ms) * canvas.width
width = x2 - x1
y_low = canvas.height - ((low - min_y) / (max_y - min_y)) * canvas.height
y_high = canvas.height - ((high - min_y) / (max_y - min_y)) * canvas.height
y_open = canvas.height - ((open - min_y) / (max_y - min_y)) * canvas.height
y_close = canvas.height - ((close - min_y) / (max_y - min_y)) * canvas.height
canvas.fill_style = 'green';
canvas.stroke_style = 'green'
height = abs(y_close - y_open)
top = y_close
if open > close:
canvas.fill_style = 'red'
canvas.stroke_style = 'red'
canvas.stroke_line((x1 + x2) / 2, y_high, (x1 + x2) / 2, y_low)
canvas.fill_rect(x1 + width / 10, top, width - (width / 5), height)
class StockChartCanvas:
def __init__(self, canvas: Canvas, data):
self.data = data
self.canvas = canvas
self.mouse_down = False
self.x_offset = data[0][0]
self.y_offset = 0
self.prev_pos = (0, 0)
self.canvas.on_client_ready(lambda: self.redraw)
self.canvas.on_mouse_down(lambda x, y: self._mouse_down(x, y))
self.canvas.on_mouse_up(lambda x, y: self._mouse_up(x, y))
self.canvas.on_mouse_move(lambda x, y: self._mouse_move(x, y))
self.canvas.on_mouse_out(lambda x, y: self._mouse_out(x, y))
self.stopped = Event()
self.event_loop = Thread(target=lambda: self._update())
pass
def start(self):
self.event_loop.start()
def stop(self):
self.stopped.set()
self.event_loop.join()
def _update(self):
while not self.stopped.wait(1/60):
self.redraw()
def redraw(self):
with hold_canvas(self.canvas):
self.canvas.clear()
draw_stocks(self.data, self.canvas, self.x_offset, self.y_offset + 135, self.y_offset + 140)
def _mouse_down(self, x, y):
self.mouse_down = True
def _mouse_up(self, x, y):
self.mouse_down = False
def _mouse_out(self, x, y):
self.mouse_down = False
def _mouse_move(self, x, y):
if self.mouse_down:
self.x_offset = self.x_offset + timedelta(minutes=(x - self.prev_pos[0]))
self.y_offset = self.y_offset + (y - self.prev_pos[1]) / 100
self.prev_pos = (x, y)
| true | true |
f7f78527ed44325d149a804e958808ecb83708a2 | 2,556 | py | Python | src/pykeen/trackers/tensorboard.py | SmartDataAnalytics/PyKEEN | f9925a0039dba193a4b55b5489449641491c3d55 | [
"MIT"
] | 88 | 2018-10-14T16:28:38.000Z | 2020-06-22T08:03:15.000Z | src/pykeen/trackers/tensorboard.py | SmartDataAnalytics/PyKEEN | f9925a0039dba193a4b55b5489449641491c3d55 | [
"MIT"
] | 42 | 2018-10-10T18:05:56.000Z | 2020-06-09T09:19:27.000Z | src/pykeen/trackers/tensorboard.py | SmartDataAnalytics/PyKEEN | f9925a0039dba193a4b55b5489449641491c3d55 | [
"MIT"
] | 19 | 2019-02-15T17:36:46.000Z | 2020-03-28T11:03:41.000Z | # -*- coding: utf-8 -*-
"""An adapter for TensorBoard."""
import pathlib
import time
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from .base import ResultTracker
from ..constants import PYKEEN_LOGS
from ..utils import flatten_dictionary, normalize_path
if TYPE_CHECKING:
import torch.utils.tensorboard
__all__ = [
"TensorBoardResultTracker",
]
class TensorBoardResultTracker(ResultTracker):
"""A tracker for TensorBoard."""
summary_writer: "torch.utils.tensorboard.SummaryWriter"
path: pathlib.Path
def __init__(
self,
experiment_path: Union[None, str, pathlib.Path] = None,
experiment_name: Optional[str] = None,
):
"""
Initialize result tracking via Tensorboard.
:param experiment_path:
The experiment path. A custom path at which the tensorboard logs will be saved.
:param experiment_name:
The name of the experiment, will be used as a sub directory name for the logging. If no default is given,
the current time is used. If set, experiment_path is set, this argument has no effect.
"""
import torch.utils.tensorboard
if experiment_name is None:
experiment_name = time.strftime("%Y-%m-%d-%H-%M-%S")
experiment_path = normalize_path(experiment_path, default=PYKEEN_LOGS.joinpath("tensorboard", experiment_name))
# if we really need access to the path later, we can expose it as a property
# via self.writer.log_dir
self.writer = torch.utils.tensorboard.SummaryWriter(log_dir=experiment_path)
# docstr-coverage: inherited
def log_metrics(
self,
metrics: Mapping[str, float],
step: Optional[int] = None,
prefix: Optional[str] = None,
) -> None: # noqa: D102
metrics = flatten_dictionary(dictionary=metrics, prefix=prefix)
for key, value in metrics.items():
self.writer.add_scalar(tag=key, scalar_value=value, global_step=step)
self.writer.flush()
# docstr-coverage: inherited
def log_params(self, params: Mapping[str, Any], prefix: Optional[str] = None) -> None: # noqa: D102
params = flatten_dictionary(dictionary=params, prefix=prefix)
for key, value in params.items():
self.writer.add_text(tag=str(key), text_string=str(value))
self.writer.flush()
# docstr-coverage: inherited
def end_run(self, success: bool = True) -> None: # noqa: D102
self.writer.flush()
self.writer.close()
| 34.540541 | 119 | 0.666667 |
import pathlib
import time
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from .base import ResultTracker
from ..constants import PYKEEN_LOGS
from ..utils import flatten_dictionary, normalize_path
if TYPE_CHECKING:
import torch.utils.tensorboard
__all__ = [
"TensorBoardResultTracker",
]
class TensorBoardResultTracker(ResultTracker):
summary_writer: "torch.utils.tensorboard.SummaryWriter"
path: pathlib.Path
def __init__(
self,
experiment_path: Union[None, str, pathlib.Path] = None,
experiment_name: Optional[str] = None,
):
import torch.utils.tensorboard
if experiment_name is None:
experiment_name = time.strftime("%Y-%m-%d-%H-%M-%S")
experiment_path = normalize_path(experiment_path, default=PYKEEN_LOGS.joinpath("tensorboard", experiment_name))
self.writer = torch.utils.tensorboard.SummaryWriter(log_dir=experiment_path)
def log_metrics(
self,
metrics: Mapping[str, float],
step: Optional[int] = None,
prefix: Optional[str] = None,
) -> None:
metrics = flatten_dictionary(dictionary=metrics, prefix=prefix)
for key, value in metrics.items():
self.writer.add_scalar(tag=key, scalar_value=value, global_step=step)
self.writer.flush()
def log_params(self, params: Mapping[str, Any], prefix: Optional[str] = None) -> None:
params = flatten_dictionary(dictionary=params, prefix=prefix)
for key, value in params.items():
self.writer.add_text(tag=str(key), text_string=str(value))
self.writer.flush()
def end_run(self, success: bool = True) -> None:
self.writer.flush()
self.writer.close()
| true | true |
f7f785346161c516f75e02a225a5fee47833fd32 | 810 | py | Python | setup.py | lklet123/Proxy-List-Scrapper | 77908ac940481d0d8ed1b28d2e37e3de736e6019 | [
"MIT"
] | 58 | 2020-05-18T20:00:50.000Z | 2022-03-19T02:56:11.000Z | setup.py | lklet123/Proxy-List-Scrapper | 77908ac940481d0d8ed1b28d2e37e3de736e6019 | [
"MIT"
] | 6 | 2020-05-29T12:15:39.000Z | 2021-05-01T06:42:28.000Z | setup.py | lklet123/Proxy-List-Scrapper | 77908ac940481d0d8ed1b28d2e37e3de736e6019 | [
"MIT"
] | 17 | 2020-12-17T16:42:59.000Z | 2022-02-03T08:12:24.000Z | # read the contents of your README file
from os import path
from setuptools import find_packages, setup
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md')) as f:
long_description = f.read()
setup(
name='Proxy-List-Scrapper',
version='0.2.2',
packages=find_packages(),
url='https://pypi.org/project/Proxy-List-Scrapper/',
license='MIT License',
author='Sameer Narkhede',
author_email='narkhedesam@gmail.com',
description='Proxy list scrapper from various websites. They gives the free proxies for temporary use.',
# other arguments omitted
long_description=long_description,
long_description_content_type='text/markdown',
install_requires=[
'requests',
],
include_package_data=True,
)
| 28.928571 | 108 | 0.718519 |
from os import path
from setuptools import find_packages, setup
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md')) as f:
long_description = f.read()
setup(
name='Proxy-List-Scrapper',
version='0.2.2',
packages=find_packages(),
url='https://pypi.org/project/Proxy-List-Scrapper/',
license='MIT License',
author='Sameer Narkhede',
author_email='narkhedesam@gmail.com',
description='Proxy list scrapper from various websites. They gives the free proxies for temporary use.',
long_description=long_description,
long_description_content_type='text/markdown',
install_requires=[
'requests',
],
include_package_data=True,
)
| true | true |
f7f785401670b6d4904b14f90060b6de0349974a | 1,779 | py | Python | python/tests/episode_time_test.py | xuyanbo03/lab | cf2f5250e1a00ecce37b3480df28c3a5dcd08b57 | [
"CC-BY-4.0"
] | 7,407 | 2016-12-06T08:40:58.000Z | 2022-03-31T12:19:09.000Z | python/tests/episode_time_test.py | xuyanbo03/lab | cf2f5250e1a00ecce37b3480df28c3a5dcd08b57 | [
"CC-BY-4.0"
] | 227 | 2016-12-06T22:05:33.000Z | 2022-03-29T09:47:06.000Z | python/tests/episode_time_test.py | xuyanbo03/lab | cf2f5250e1a00ecce37b3480df28c3a5dcd08b57 | [
"CC-BY-4.0"
] | 1,594 | 2016-12-06T08:44:13.000Z | 2022-03-31T12:19:12.000Z | # Copyright 2017-2018 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Test for the EpisodeTimeMs callback."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import numpy as np
import six
import deepmind_lab
class EpisodeTimeTest(unittest.TestCase):
def run_at_frame_rate(self, fps):
env = deepmind_lab.Lab(
'tests/episode_time_test', ['EPISODE_TIME_SECONDS'],
config={
'fps': str(fps),
'width': '32',
'height': '32'
})
env.reset()
nop = np.zeros((7,), dtype=np.intc)
for _ in six.moves.range(0, fps):
env.step(nop, 1)
obs = env.observations()
self.assertEqual(obs['EPISODE_TIME_SECONDS'][0], 1.0)
def test_at_60(self):
self.run_at_frame_rate(60)
def test_at_30(self):
self.run_at_frame_rate(30)
if __name__ == '__main__':
if os.environ.get('TEST_SRCDIR'):
deepmind_lab.set_runfiles_path(
os.path.join(os.environ['TEST_SRCDIR'],
'org_deepmind_lab'))
unittest.main()
| 28.693548 | 73 | 0.700956 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import numpy as np
import six
import deepmind_lab
class EpisodeTimeTest(unittest.TestCase):
def run_at_frame_rate(self, fps):
env = deepmind_lab.Lab(
'tests/episode_time_test', ['EPISODE_TIME_SECONDS'],
config={
'fps': str(fps),
'width': '32',
'height': '32'
})
env.reset()
nop = np.zeros((7,), dtype=np.intc)
for _ in six.moves.range(0, fps):
env.step(nop, 1)
obs = env.observations()
self.assertEqual(obs['EPISODE_TIME_SECONDS'][0], 1.0)
def test_at_60(self):
self.run_at_frame_rate(60)
def test_at_30(self):
self.run_at_frame_rate(30)
if __name__ == '__main__':
if os.environ.get('TEST_SRCDIR'):
deepmind_lab.set_runfiles_path(
os.path.join(os.environ['TEST_SRCDIR'],
'org_deepmind_lab'))
unittest.main()
| true | true |
f7f7861e1ef8153ef0d0a064cf966dedb719477f | 7,340 | py | Python | docs/conf.py | EmilyYLMa/cim2busbranch | a5a388c0eb3f126a24f2b0c8234cc8cc36dedefd | [
"BSD-2-Clause"
] | 1 | 2020-03-06T08:09:06.000Z | 2020-03-06T08:09:06.000Z | docs/conf.py | EmilyYLMa/cim2busbranch | a5a388c0eb3f126a24f2b0c8234cc8cc36dedefd | [
"BSD-2-Clause"
] | null | null | null | docs/conf.py | EmilyYLMa/cim2busbranch | a5a388c0eb3f126a24f2b0c8234cc8cc36dedefd | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Cim2BusBranch documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 23 11:31:37 2011.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
import cim2busbranch
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Cim2BusBranch'
copyright = u'2011, Stefan Scherfke'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(cim2busbranch.__version__.split('.')[0:2])
# The full version, including alpha/beta/rc tags.
release = cim2busbranch.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Cim2BusBranchdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Cim2BusBranch.tex', u'Cim2BusBranch Documentation',
u'Stefan Scherfke', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cim2busbranch', u'Cim2BusBranch Documentation',
[u'Stefan Scherfke'], 1),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| 31.913043 | 79 | 0.721935 |
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import cim2busbranch
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
]
templates_path = ['_templates']
source_suffix = '.txt'
master_doc = 'index'
project = u'Cim2BusBranch'
copyright = u'2011, Stefan Scherfke'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(cim2busbranch.__version__.split('.')[0:2])
# The full version, including alpha/beta/rc tags.
release = cim2busbranch.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Cim2BusBranchdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Cim2BusBranch.tex', u'Cim2BusBranch Documentation',
u'Stefan Scherfke', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cim2busbranch', u'Cim2BusBranch Documentation',
[u'Stefan Scherfke'], 1),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| true | true |
f7f7863259a2876e6809fd2aa4200f996043b598 | 3,477 | py | Python | parsers.py | trimitri/freqle | 67f48a0f4a4ab181902796339e7d0ad7a40399c0 | [
"Apache-2.0"
] | 1 | 2019-12-05T09:27:20.000Z | 2019-12-05T09:27:20.000Z | parsers.py | trimitri/freqle | 67f48a0f4a4ab181902796339e7d0ad7a40399c0 | [
"Apache-2.0"
] | null | null | null | parsers.py | trimitri/freqle | 67f48a0f4a4ab181902796339e7d0ad7a40399c0 | [
"Apache-2.0"
] | null | null | null | """Parse output from various sources into `FreqSeries` objects."""
from typing import List
import pandas as pd
from .freq_series import FreqSeries
def fokus2_txt(file_name: str, session: str = None,
drop_lines: List[int] = None) -> FreqSeries:
"""Parse frequency measurement done by the FOKUS2 Dual frequency comb.
:param session: Measurement context. See `FreqSeries`'s `session` param.
"""
data = pd.read_table(file_name, squeeze=True, index_col=0, usecols=[0, 1])
if drop_lines is not None:
data.drop(data.index[drop_lines], inplace=True)
data.index = pd.to_datetime(data.index, unit='us')
with open(file_name) as file:
data.name = file.readline().strip()
return FreqSeries(data, session=session)
def generic_freq_counter(
file_name: str, session: str = None,
time_unit: str = 's', original_freq: float = None) -> FreqSeries:
"""Parse a generic two-column counter file like (time, frequency).
:param file_name: File to read from.
:param time_unit: Which unit does the counter count time in? (s, ms, us, ns)
"""
data = pd.read_table(file_name, squeeze=True, index_col=0, usecols=[0, 1])
data.index = pd.to_datetime(data.index, unit=time_unit)
return FreqSeries(data, session=session, original_freq=original_freq)
def pendulum_cnt91_txt(file_name: str, session: str = None) -> FreqSeries:
"""Parse frequency measurement done with a Pendulum CNT 91 counter.
:param session: Measurement context. See `FreqSeries`'s `session` param.
"""
def get_start_time(file_name: str) -> pd.datetime:
"""Get the measurement starting time from a CNT-91 CSV file."""
time_string = _get_info(file_name)[21:40]
return pd.to_datetime(time_string)
def _get_info(file_name: str) -> str:
with open(file_name) as file:
info = file.readline().replace('\t', ' ').strip()
return info
data = pd.read_table(file_name, squeeze=True, index_col=0, usecols=[0, 1])
data.index = pd.to_datetime(data.index, unit='s',
origin=get_start_time(file_name))
data.name = _get_info(file_name)
return FreqSeries(data, session=session)
def menlo_lambda_freq_counter(file_name: str, session_name: str,
original_freq: float, series: int = 1) -> FreqSeries:
"""
:param series: Which of the recorded time series to use?
"""
data = pd.read_csv(file_name, delim_whitespace=True, usecols=[2 + series],
header=None, squeeze=True)
# Create an equidistand time stamp index, as the values in the Menlo
# counter file are garbage.
first_sample = pd.read_csv(file_name, delim_whitespace=True,
usecols=[0, 1], header=None,
nrows=1).applymap(str).values
last_sample = pd.read_csv(file_name, delim_whitespace=True,
usecols=[0, 1], header=None,
skiprows=len(data) - 1).applymap(str).values
start = pd.to_datetime("{} {}".format(first_sample[0][0], first_sample[0][1]),
format='%y%m%d %H%M%S.%f')
end = pd.to_datetime("{} {}".format(last_sample[0][0], last_sample[0][1]),
format='%y%m%d %H%M%S.%f')
data.index = pd.date_range(start, end, len(data))
return FreqSeries(data, session=session_name, original_freq=original_freq)
| 43.4625 | 83 | 0.637043 | from typing import List
import pandas as pd
from .freq_series import FreqSeries
def fokus2_txt(file_name: str, session: str = None,
drop_lines: List[int] = None) -> FreqSeries:
data = pd.read_table(file_name, squeeze=True, index_col=0, usecols=[0, 1])
if drop_lines is not None:
data.drop(data.index[drop_lines], inplace=True)
data.index = pd.to_datetime(data.index, unit='us')
with open(file_name) as file:
data.name = file.readline().strip()
return FreqSeries(data, session=session)
def generic_freq_counter(
file_name: str, session: str = None,
time_unit: str = 's', original_freq: float = None) -> FreqSeries:
data = pd.read_table(file_name, squeeze=True, index_col=0, usecols=[0, 1])
data.index = pd.to_datetime(data.index, unit=time_unit)
return FreqSeries(data, session=session, original_freq=original_freq)
def pendulum_cnt91_txt(file_name: str, session: str = None) -> FreqSeries:
def get_start_time(file_name: str) -> pd.datetime:
time_string = _get_info(file_name)[21:40]
return pd.to_datetime(time_string)
def _get_info(file_name: str) -> str:
with open(file_name) as file:
info = file.readline().replace('\t', ' ').strip()
return info
data = pd.read_table(file_name, squeeze=True, index_col=0, usecols=[0, 1])
data.index = pd.to_datetime(data.index, unit='s',
origin=get_start_time(file_name))
data.name = _get_info(file_name)
return FreqSeries(data, session=session)
def menlo_lambda_freq_counter(file_name: str, session_name: str,
original_freq: float, series: int = 1) -> FreqSeries:
data = pd.read_csv(file_name, delim_whitespace=True, usecols=[2 + series],
header=None, squeeze=True)
first_sample = pd.read_csv(file_name, delim_whitespace=True,
usecols=[0, 1], header=None,
nrows=1).applymap(str).values
last_sample = pd.read_csv(file_name, delim_whitespace=True,
usecols=[0, 1], header=None,
skiprows=len(data) - 1).applymap(str).values
start = pd.to_datetime("{} {}".format(first_sample[0][0], first_sample[0][1]),
format='%y%m%d %H%M%S.%f')
end = pd.to_datetime("{} {}".format(last_sample[0][0], last_sample[0][1]),
format='%y%m%d %H%M%S.%f')
data.index = pd.date_range(start, end, len(data))
return FreqSeries(data, session=session_name, original_freq=original_freq)
| true | true |
f7f78778ecf8bfb23c7ad8a6d0ed31a85fa31bae | 12,741 | py | Python | pyscf/dh/grad/udfdh.py | hebrewsnabla/dh | 222e3d4d8d4d04cd63074327ebb5fb39ea4441b7 | [
"Apache-2.0"
] | 1 | 2022-02-05T08:58:13.000Z | 2022-02-05T08:58:13.000Z | pyscf/dh/grad/udfdh.py | hebrewsnabla/dh | 222e3d4d8d4d04cd63074327ebb5fb39ea4441b7 | [
"Apache-2.0"
] | null | null | null | pyscf/dh/grad/udfdh.py | hebrewsnabla/dh | 222e3d4d8d4d04cd63074327ebb5fb39ea4441b7 | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
# dh import
try:
from dh.udfdh import UDFDH
from dh.dhutil import calc_batch_size, gen_batch, gen_shl_batch, tot_size, timing
from dh.grad.rdfdh import get_H_1_ao, get_S_1_ao, generator_L_1
from dh.grad.rdfdh import Gradients as RGradients
except ImportError:
from pyscf.dh.udfdh import UDFDH
from pyscf.dh.dhutil import calc_batch_size, gen_batch, gen_shl_batch, tot_size, timing
from pyscf.dh.grad.rdfdh import get_H_1_ao, get_S_1_ao, generator_L_1
from pyscf.dh.grad.rdfdh import Gradients as RGradients
# pyscf import
from pyscf import gto, lib, df
from pyscf.df.grad.rhf import _int3c_wrapper as int3c_wrapper
try:
from pyscf.dftd3 import itrf
except ImportError:
print('''Warning: dftd3 not found. You cannot using functionals with "-D3" suffix
before installing pyscf-dftd3. See https://github.com/pyscf/dftd3 and
https://github.com/ajz34/dh#dftd3-extension ''')
# other import
import numpy as np
import itertools
import ctypes
einsum = lib.einsum
α, β = 0, 1
αα, αβ, ββ = 0, 1, 2
@timing
def get_gradient_jk(dfobj: df.DF, C, D, D_r, Y_mo, cx, cx_n, max_memory=2000):
mol, aux = dfobj.mol, dfobj.auxmol
natm, nao, nmo, nocc = mol.natm, mol.nao, C.shape[-1], mol.nelec
mocc = max(nocc)
naux = Y_mo[0].shape[0]
# this algorithm asserts naux = aux.nao, i.e. no linear dependency in auxiliary basis
assert naux == aux.nao
so = slice(0, nocc[α]), slice(0, nocc[β])
D_r_symm = (D_r + D_r.swapaxes(-1, -2)) / 2
D_r_ao = einsum("sup, spq, svq -> suv", C, D_r_symm, C)
D_mo = np.zeros((2, nmo, nmo))
for σ in (α, β):
for i in range(nocc[σ]):
D_mo[σ, i, i] = 1
Y_dot_D, Y_dot_D_r = np.zeros((2, naux)), np.zeros((2, naux))
nbatch = calc_batch_size(nmo**2, max_memory)
for σ in (α, β):
for i in range(nocc[σ]):
Y_dot_D[σ] += Y_mo[σ][:, i, i]
for saux in gen_batch(0, naux, nbatch):
Y_dot_D_r[σ][saux] = einsum("Ppq, pq -> P", Y_mo[σ][saux], D_r_symm[σ])
Y_ip = [np.asarray(Y_mo[σ][:, so[σ]]) for σ in (α, β)]
L_inv, L_1_gen = generator_L_1(aux)
int3c2e_ip1_gen = int3c_wrapper(mol, aux, "int3c2e_ip1", "s1")
int3c2e_ip2_gen = int3c_wrapper(mol, aux, "int3c2e_ip2", "s1")
C0 = [C[σ][:, so[σ]] for σ in (α, β)]
D1 = [cx * D_r_symm[σ] + 0.5 * cx_n * D_mo[σ] for σ in (α, β)]
C1 = [C[σ] @ D1[σ] for σ in (α, β)]
grad_contrib = np.zeros((natm, 3))
for A in range(natm):
shA0, shA1, _, _ = mol.aoslice_by_atom()[A]
shA0a, shA1a, _, _ = aux.aoslice_by_atom()[A]
Y_1_mo_D_r = [np.zeros((3, naux, nocc[σ], nmo)) for σ in (α, β)]
Y_1_dot_D, Y_1_dot_D_r = np.zeros((2, 3, naux)), np.zeros((2, 3, naux))
pre_flop = tot_size(Y_1_mo_D_r, Y_ip, Y_1_dot_D, Y_1_dot_D_r)
nbatch = calc_batch_size(3*(nao+mocc)*naux, max_memory, pre_flop)
for shU0, shU1, U0, U1 in gen_shl_batch(mol, nbatch, shA0, shA1):
su = slice(U0, U1)
int3c2e_ip1 = int3c2e_ip1_gen((shU0, shU1, 0, mol.nbas, 0, aux.nbas))
for σ in (α, β):
Y_1_mo_D_r[σ] -= einsum("tuvQ, PQ, ui, vp -> tPip", int3c2e_ip1, L_inv, C0[σ][su], C1[σ])
Y_1_mo_D_r[σ] -= einsum("tuvQ, PQ, up, vi -> tPip", int3c2e_ip1, L_inv, C1[σ][su], C0[σ])
Y_1_dot_D[σ] -= 2 * einsum("tuvQ, PQ, uv -> tP", int3c2e_ip1, L_inv, D[σ][su])
Y_1_dot_D_r[σ] -= 2 * einsum("tuvQ, PQ, uv -> tP", int3c2e_ip1, L_inv, D_r_ao[σ][su])
nbatch = calc_batch_size(3*nao*(nao+mocc), max_memory, pre_flop)
for shP0, shP1, P0, P1 in gen_shl_batch(aux, nbatch, shA0a, shA1a):
sp = slice(P0, P1)
int3c2e_ip2 = int3c2e_ip2_gen((0, mol.nbas, 0, mol.nbas, shP0, shP1))
for σ in (α, β):
Y_1_mo_D_r[σ] -= einsum("tuvQ, PQ, ui, vp -> tPip", int3c2e_ip2, L_inv[:, sp], C0[σ], C1[σ])
Y_1_dot_D[σ] -= einsum("tuvQ, PQ, uv -> tP", int3c2e_ip2, L_inv[:, sp], D[σ])
Y_1_dot_D_r[σ] -= einsum("tuvQ, PQ, uv -> tP", int3c2e_ip2, L_inv[:, sp], D_r_ao[σ])
L_1 = L_1_gen(A)
L_1_dot_inv = einsum("tRQ, PR -> tPQ", L_1, L_inv)
for σ in (α, β):
Y_1_mo_D_r[σ] -= einsum("Qiq, qp, tPQ -> tPip", Y_ip[σ], D1[σ], L_1_dot_inv)
Y_1_dot_D[σ] -= einsum("Q, tPQ -> tP", Y_dot_D[σ], L_1_dot_inv)
Y_1_dot_D_r[σ] -= einsum("Q, tPQ -> tP", Y_dot_D_r[σ], L_1_dot_inv)
# RI-K contribution
grad_contrib[A] += - 2 * einsum("Pip, tPip -> t", Y_ip[σ], Y_1_mo_D_r[σ])
# RI-J contribution
for σ, ς in itertools.product((α, β), (α, β)):
grad_contrib[A] += (
+ einsum("P, tP -> t", Y_dot_D[σ], Y_1_dot_D_r[ς])
+ einsum("P, tP -> t", Y_dot_D_r[σ], Y_1_dot_D[ς])
+ einsum("P, tP -> t", Y_dot_D[σ], Y_1_dot_D[ς]))
return grad_contrib
class Gradients(UDFDH, RGradients):
def __init__(self, mol: gto.Mole, *args, skip_construct=False, **kwargs):
if not skip_construct:
super(Gradients, self).__init__(mol, *args, **kwargs)
# results
self.grad_jk = NotImplemented
self.grad_gga = NotImplemented
self.grad_pt2 = NotImplemented
self.grad_enfunc = NotImplemented
self.grad_tot = NotImplemented
self.de = NotImplemented
@timing
def prepare_H_1(self):
H_1_ao = get_H_1_ao(self.mol)
H_1_mo = np.array([einsum("up, Auv, vq -> Apq", self.C[σ], H_1_ao, self.C[σ]) for σ in (α, β)])
self.tensors.create("H_1_ao", H_1_ao)
self.tensors.create("H_1_mo", H_1_mo)
@timing
def prepare_S_1(self):
S_1_ao = get_S_1_ao(self.mol)
S_1_mo = np.array([einsum("up, Auv, vq -> Apq", self.C[σ], S_1_ao, self.C[σ]) for σ in (α, β)])
self.tensors.create("S_1_ao", S_1_ao)
self.tensors.create("S_1_mo", S_1_mo)
def prepare_gradient_jk(self):
D_r = self.tensors.load("D_r")
Y_mo = [self.tensors["Y_mo_jk" + str(σ)] for σ in (α, β)]
# a special treatment
cx_n = self.cx_n if self.xc_n else self.cx
self.grad_jk = get_gradient_jk(self.df_jk, self.C, self.D, D_r, Y_mo, self.cx, cx_n, self.get_memory())
@timing
def prepare_gradient_gga(self):
tensors = self.tensors
if "rho" not in tensors:
self.grad_gga = 0
return self
# --- LAZY CODE ---
from pyscf import grad, hessian
ni, mol, grids = self.ni, self.mol, self.grids
natm = mol.natm
C, D = self.C, self.D
grad_contrib = np.zeros((natm, 3))
xc = self.xc_n if self.xc_n else self.xc
if self.ni._xc_type(xc) == "GGA": # energy functional contribution
veff_1_gga = grad.uks.get_vxc(ni, mol, grids, xc, D)[1]
for A, (_, _, A0, A1) in enumerate(mol.aoslice_by_atom()):
grad_contrib[A] += 2 * einsum("stuv, suv -> t", veff_1_gga[:, :, A0:A1], D[:, A0:A1])
if self.ni._xc_type(self.xc) == "GGA": # reference functional skeleton fock derivative contribution
D_r = tensors.load("D_r")
D_r_symm = (D_r + D_r.swapaxes(-1, -2)) / 2
D_r_ao = einsum("sup, spq, svq -> suv", C, D_r_symm, C)
F_1_ao_dfa = np.array(hessian.uks._get_vxc_deriv1(self.mf_s.Hessian(), C, self.mo_occ, 2000))
grad_contrib += einsum("suv, sAtuv -> At", D_r_ao, F_1_ao_dfa)
self.grad_gga = grad_contrib
return self
@timing
def prepare_gradient_pt2(self):
tensors = self.tensors
C, D, e = self.C, self.D, self.e
mol, aux_ri = self.mol, self.aux_ri
natm, nao, nmo, nocc, nvir, naux = mol.natm, self.nao, self.nmo, self.nocc, self.nvir, self.df_ri.get_naoaux()
mocc, mvir = max(nocc), max(nvir)
# this algorithm asserts naux = aux.nao, i.e. no linear dependency in auxiliary basis
assert naux == aux_ri.nao
so, sv, sa = self.so, self.sv, self.sa
D_r = tensors.load("D_r")
H_1_mo = tensors.load("H_1_mo")
grad_corr = einsum("spq, sApq -> A", D_r, H_1_mo)
if not self.eval_pt2:
grad_corr.shape = (natm, 3)
self.grad_pt2 = grad_corr
return
W_I = tensors.load("W_I")
W_II = - einsum("spq, sq -> spq", D_r, e)
W_III_tmp = self.Ax0_Core(so, so, sa, sa)(D_r)
W = W_I + W_II
for σ in (α, β):
W[σ][so[σ], so[σ]] += - 0.5 * W_III_tmp[σ]
W_ao = einsum("sup, spq, svq -> suv", C, W, C)
S_1_ao = tensors.load("S_1_ao")
grad_corr += np.einsum("suv, Auv -> A", W_ao, S_1_ao)
grad_corr.shape = (natm, 3)
L_inv, L_1_gen = generator_L_1(aux_ri)
int3c2e_ip1_gen = int3c_wrapper(mol, aux_ri, "int3c2e_ip1", "s1")
int3c2e_ip2_gen = int3c_wrapper(mol, aux_ri, "int3c2e_ip2", "s1")
Y_ia_ri = [np.asarray(tensors["Y_mo_ri" + str(σ)][:, so[σ], sv[σ]]) for σ in (α, β)]
G_ia_ri = [tensors.load("G_ia_ri" + str(σ)) for σ in (α, β)]
for A in range(natm):
L_1_ri = L_1_gen(A)
Y_1_ia_ri = [np.zeros((3, naux, nocc[σ], nvir[σ])) for σ in (α, β)]
shA0, shA1, _, _ = mol.aoslice_by_atom()[A]
shA0a, shA1a, _, _ = aux_ri.aoslice_by_atom()[A]
nbatch = calc_batch_size(3*(nao+mocc)*naux, self.get_memory(), tot_size(Y_1_ia_ri))
for shU0, shU1, U0, U1 in gen_shl_batch(mol, nbatch, shA0, shA1):
su = slice(U0, U1)
int3c2e_ip1 = int3c2e_ip1_gen((shU0, shU1, 0, mol.nbas, 0, aux_ri.nbas))
for σ in (α, β):
Y_1_ia_ri[σ] -= einsum("tuvQ, PQ, ui, va -> tPia", int3c2e_ip1, L_inv, C[σ][su, so[σ]], C[σ][:, sv[σ]])
Y_1_ia_ri[σ] -= einsum("tuvQ, PQ, ua, vi -> tPia", int3c2e_ip1, L_inv, C[σ][su, sv[σ]], C[σ][:, so[σ]])
nbatch = calc_batch_size(3*nao*(nao+mocc), self.get_memory(), tot_size(Y_1_ia_ri))
for shP0, shP1, P0, P1 in gen_shl_batch(aux_ri, nbatch, shA0a, shA1a):
sp = slice(P0, P1)
int3c2e_ip2 = int3c2e_ip2_gen((0, mol.nbas, 0, mol.nbas, shP0, shP1))
for σ in (α, β):
Y_1_ia_ri[σ] -= einsum("tuvQ, PQ, ui, va -> tPia", int3c2e_ip2, L_inv[:, sp], C[σ][:, so[σ]], C[σ][:, sv[σ]])
for σ in (α, β):
Y_1_ia_ri[σ] -= einsum("Qia, tRQ, PR -> tPia", Y_ia_ri[σ], L_1_ri, L_inv)
grad_corr[A] += einsum("Pia, tPia -> t", G_ia_ri[σ], Y_1_ia_ri[σ])
self.grad_pt2 = grad_corr
@timing
def prepare_gradient_enfunc(self):
tensors = self.tensors
natm = self.mol.natm
Co, eo, D = self.Co, self.eo, self.D
so = self.so
grad_contrib = self.mf_s.Gradients().grad_nuc()
grad_contrib.shape = (natm * 3,)
H_1_ao = tensors.load("H_1_ao")
S_1_mo = tensors.load("S_1_mo")
grad_contrib += np.einsum("Auv, suv -> A", H_1_ao, D, optimize=True) # TODO check PySCF lib.einsum why fails
if self.xc_n is None:
for σ in (α, β):
grad_contrib -= np.einsum("Ai, i -> A", S_1_mo[σ][:, so[σ], so[σ]].diagonal(0, -1, -2), eo[σ])
else:
# TODO see whether get_fock could use mo_coeff to accelearate RI-K
F_0_ao_n = self.mf_n.get_fock(dm=D)
nc_F_0_ij = [(Co[σ].T @ F_0_ao_n[σ] @ Co[σ]) for σ in (α, β)]
for σ in (α, β):
grad_contrib -= einsum("Aij, ij -> A", S_1_mo[σ][:, so[σ], so[σ]], nc_F_0_ij[σ])
grad_contrib.shape = (natm, 3)
# handle dftd3 situation
mol = self.mol
if "D3" in self.xc_add:
drv = itrf.libdftd3.wrapper_params
params = np.asarray(self.xc_add["D3"][0], order="F")
version = self.xc_add["D3"][1]
coords = np.asarray(mol.atom_coords(), order="F")
itype = np.asarray(mol.atom_charges(), order="F")
edisp = np.zeros(1)
grad = np.zeros((mol.natm, 3))
drv(
ctypes.c_int(mol.natm), # natoms
coords.ctypes.data_as(ctypes.c_void_p), # coords
itype.ctypes.data_as(ctypes.c_void_p), # itype
params.ctypes.data_as(ctypes.c_void_p), # params
ctypes.c_int(version), # version
edisp.ctypes.data_as(ctypes.c_void_p), # edisp
grad.ctypes.data_as(ctypes.c_void_p)) # grads)
grad_contrib += grad
self.grad_enfunc = grad_contrib
def base_method(self) -> UDFDH:
self.__class__ = UDFDH
return self
| 44.239583 | 129 | 0.567695 | from __future__ import annotations
try:
from dh.udfdh import UDFDH
from dh.dhutil import calc_batch_size, gen_batch, gen_shl_batch, tot_size, timing
from dh.grad.rdfdh import get_H_1_ao, get_S_1_ao, generator_L_1
from dh.grad.rdfdh import Gradients as RGradients
except ImportError:
from pyscf.dh.udfdh import UDFDH
from pyscf.dh.dhutil import calc_batch_size, gen_batch, gen_shl_batch, tot_size, timing
from pyscf.dh.grad.rdfdh import get_H_1_ao, get_S_1_ao, generator_L_1
from pyscf.dh.grad.rdfdh import Gradients as RGradients
from pyscf import gto, lib, df
from pyscf.df.grad.rhf import _int3c_wrapper as int3c_wrapper
try:
from pyscf.dftd3 import itrf
except ImportError:
print('''Warning: dftd3 not found. You cannot using functionals with "-D3" suffix
before installing pyscf-dftd3. See https://github.com/pyscf/dftd3 and
https://github.com/ajz34/dh#dftd3-extension ''')
import numpy as np
import itertools
import ctypes
einsum = lib.einsum
α, β = 0, 1
αα, αβ, ββ = 0, 1, 2
@timing
def get_gradient_jk(dfobj: df.DF, C, D, D_r, Y_mo, cx, cx_n, max_memory=2000):
mol, aux = dfobj.mol, dfobj.auxmol
natm, nao, nmo, nocc = mol.natm, mol.nao, C.shape[-1], mol.nelec
mocc = max(nocc)
naux = Y_mo[0].shape[0]
assert naux == aux.nao
so = slice(0, nocc[α]), slice(0, nocc[β])
D_r_symm = (D_r + D_r.swapaxes(-1, -2)) / 2
D_r_ao = einsum("sup, spq, svq -> suv", C, D_r_symm, C)
D_mo = np.zeros((2, nmo, nmo))
for σ in (α, β):
for i in range(nocc[σ]):
D_mo[σ, i, i] = 1
Y_dot_D, Y_dot_D_r = np.zeros((2, naux)), np.zeros((2, naux))
nbatch = calc_batch_size(nmo**2, max_memory)
for σ in (α, β):
for i in range(nocc[σ]):
Y_dot_D[σ] += Y_mo[σ][:, i, i]
for saux in gen_batch(0, naux, nbatch):
Y_dot_D_r[σ][saux] = einsum("Ppq, pq -> P", Y_mo[σ][saux], D_r_symm[σ])
Y_ip = [np.asarray(Y_mo[σ][:, so[σ]]) for σ in (α, β)]
L_inv, L_1_gen = generator_L_1(aux)
int3c2e_ip1_gen = int3c_wrapper(mol, aux, "int3c2e_ip1", "s1")
int3c2e_ip2_gen = int3c_wrapper(mol, aux, "int3c2e_ip2", "s1")
C0 = [C[σ][:, so[σ]] for σ in (α, β)]
D1 = [cx * D_r_symm[σ] + 0.5 * cx_n * D_mo[σ] for σ in (α, β)]
C1 = [C[σ] @ D1[σ] for σ in (α, β)]
grad_contrib = np.zeros((natm, 3))
for A in range(natm):
shA0, shA1, _, _ = mol.aoslice_by_atom()[A]
shA0a, shA1a, _, _ = aux.aoslice_by_atom()[A]
Y_1_mo_D_r = [np.zeros((3, naux, nocc[σ], nmo)) for σ in (α, β)]
Y_1_dot_D, Y_1_dot_D_r = np.zeros((2, 3, naux)), np.zeros((2, 3, naux))
pre_flop = tot_size(Y_1_mo_D_r, Y_ip, Y_1_dot_D, Y_1_dot_D_r)
nbatch = calc_batch_size(3*(nao+mocc)*naux, max_memory, pre_flop)
for shU0, shU1, U0, U1 in gen_shl_batch(mol, nbatch, shA0, shA1):
su = slice(U0, U1)
int3c2e_ip1 = int3c2e_ip1_gen((shU0, shU1, 0, mol.nbas, 0, aux.nbas))
for σ in (α, β):
Y_1_mo_D_r[σ] -= einsum("tuvQ, PQ, ui, vp -> tPip", int3c2e_ip1, L_inv, C0[σ][su], C1[σ])
Y_1_mo_D_r[σ] -= einsum("tuvQ, PQ, up, vi -> tPip", int3c2e_ip1, L_inv, C1[σ][su], C0[σ])
Y_1_dot_D[σ] -= 2 * einsum("tuvQ, PQ, uv -> tP", int3c2e_ip1, L_inv, D[σ][su])
Y_1_dot_D_r[σ] -= 2 * einsum("tuvQ, PQ, uv -> tP", int3c2e_ip1, L_inv, D_r_ao[σ][su])
nbatch = calc_batch_size(3*nao*(nao+mocc), max_memory, pre_flop)
for shP0, shP1, P0, P1 in gen_shl_batch(aux, nbatch, shA0a, shA1a):
sp = slice(P0, P1)
int3c2e_ip2 = int3c2e_ip2_gen((0, mol.nbas, 0, mol.nbas, shP0, shP1))
for σ in (α, β):
Y_1_mo_D_r[σ] -= einsum("tuvQ, PQ, ui, vp -> tPip", int3c2e_ip2, L_inv[:, sp], C0[σ], C1[σ])
Y_1_dot_D[σ] -= einsum("tuvQ, PQ, uv -> tP", int3c2e_ip2, L_inv[:, sp], D[σ])
Y_1_dot_D_r[σ] -= einsum("tuvQ, PQ, uv -> tP", int3c2e_ip2, L_inv[:, sp], D_r_ao[σ])
L_1 = L_1_gen(A)
L_1_dot_inv = einsum("tRQ, PR -> tPQ", L_1, L_inv)
for σ in (α, β):
Y_1_mo_D_r[σ] -= einsum("Qiq, qp, tPQ -> tPip", Y_ip[σ], D1[σ], L_1_dot_inv)
Y_1_dot_D[σ] -= einsum("Q, tPQ -> tP", Y_dot_D[σ], L_1_dot_inv)
Y_1_dot_D_r[σ] -= einsum("Q, tPQ -> tP", Y_dot_D_r[σ], L_1_dot_inv)
grad_contrib[A] += - 2 * einsum("Pip, tPip -> t", Y_ip[σ], Y_1_mo_D_r[σ])
for σ, ς in itertools.product((α, β), (α, β)):
grad_contrib[A] += (
+ einsum("P, tP -> t", Y_dot_D[σ], Y_1_dot_D_r[ς])
+ einsum("P, tP -> t", Y_dot_D_r[σ], Y_1_dot_D[ς])
+ einsum("P, tP -> t", Y_dot_D[σ], Y_1_dot_D[ς]))
return grad_contrib
class Gradients(UDFDH, RGradients):
def __init__(self, mol: gto.Mole, *args, skip_construct=False, **kwargs):
if not skip_construct:
super(Gradients, self).__init__(mol, *args, **kwargs)
self.grad_jk = NotImplemented
self.grad_gga = NotImplemented
self.grad_pt2 = NotImplemented
self.grad_enfunc = NotImplemented
self.grad_tot = NotImplemented
self.de = NotImplemented
@timing
def prepare_H_1(self):
H_1_ao = get_H_1_ao(self.mol)
H_1_mo = np.array([einsum("up, Auv, vq -> Apq", self.C[σ], H_1_ao, self.C[σ]) for σ in (α, β)])
self.tensors.create("H_1_ao", H_1_ao)
self.tensors.create("H_1_mo", H_1_mo)
@timing
def prepare_S_1(self):
S_1_ao = get_S_1_ao(self.mol)
S_1_mo = np.array([einsum("up, Auv, vq -> Apq", self.C[σ], S_1_ao, self.C[σ]) for σ in (α, β)])
self.tensors.create("S_1_ao", S_1_ao)
self.tensors.create("S_1_mo", S_1_mo)
def prepare_gradient_jk(self):
D_r = self.tensors.load("D_r")
Y_mo = [self.tensors["Y_mo_jk" + str(σ)] for σ in (α, β)]
cx_n = self.cx_n if self.xc_n else self.cx
self.grad_jk = get_gradient_jk(self.df_jk, self.C, self.D, D_r, Y_mo, self.cx, cx_n, self.get_memory())
@timing
def prepare_gradient_gga(self):
tensors = self.tensors
if "rho" not in tensors:
self.grad_gga = 0
return self
from pyscf import grad, hessian
ni, mol, grids = self.ni, self.mol, self.grids
natm = mol.natm
C, D = self.C, self.D
grad_contrib = np.zeros((natm, 3))
xc = self.xc_n if self.xc_n else self.xc
if self.ni._xc_type(xc) == "GGA":
veff_1_gga = grad.uks.get_vxc(ni, mol, grids, xc, D)[1]
for A, (_, _, A0, A1) in enumerate(mol.aoslice_by_atom()):
grad_contrib[A] += 2 * einsum("stuv, suv -> t", veff_1_gga[:, :, A0:A1], D[:, A0:A1])
if self.ni._xc_type(self.xc) == "GGA":
D_r = tensors.load("D_r")
D_r_symm = (D_r + D_r.swapaxes(-1, -2)) / 2
D_r_ao = einsum("sup, spq, svq -> suv", C, D_r_symm, C)
F_1_ao_dfa = np.array(hessian.uks._get_vxc_deriv1(self.mf_s.Hessian(), C, self.mo_occ, 2000))
grad_contrib += einsum("suv, sAtuv -> At", D_r_ao, F_1_ao_dfa)
self.grad_gga = grad_contrib
return self
@timing
def prepare_gradient_pt2(self):
tensors = self.tensors
C, D, e = self.C, self.D, self.e
mol, aux_ri = self.mol, self.aux_ri
natm, nao, nmo, nocc, nvir, naux = mol.natm, self.nao, self.nmo, self.nocc, self.nvir, self.df_ri.get_naoaux()
mocc, mvir = max(nocc), max(nvir)
assert naux == aux_ri.nao
so, sv, sa = self.so, self.sv, self.sa
D_r = tensors.load("D_r")
H_1_mo = tensors.load("H_1_mo")
grad_corr = einsum("spq, sApq -> A", D_r, H_1_mo)
if not self.eval_pt2:
grad_corr.shape = (natm, 3)
self.grad_pt2 = grad_corr
return
W_I = tensors.load("W_I")
W_II = - einsum("spq, sq -> spq", D_r, e)
W_III_tmp = self.Ax0_Core(so, so, sa, sa)(D_r)
W = W_I + W_II
for σ in (α, β):
W[σ][so[σ], so[σ]] += - 0.5 * W_III_tmp[σ]
W_ao = einsum("sup, spq, svq -> suv", C, W, C)
S_1_ao = tensors.load("S_1_ao")
grad_corr += np.einsum("suv, Auv -> A", W_ao, S_1_ao)
grad_corr.shape = (natm, 3)
L_inv, L_1_gen = generator_L_1(aux_ri)
int3c2e_ip1_gen = int3c_wrapper(mol, aux_ri, "int3c2e_ip1", "s1")
int3c2e_ip2_gen = int3c_wrapper(mol, aux_ri, "int3c2e_ip2", "s1")
Y_ia_ri = [np.asarray(tensors["Y_mo_ri" + str(σ)][:, so[σ], sv[σ]]) for σ in (α, β)]
G_ia_ri = [tensors.load("G_ia_ri" + str(σ)) for σ in (α, β)]
for A in range(natm):
L_1_ri = L_1_gen(A)
Y_1_ia_ri = [np.zeros((3, naux, nocc[σ], nvir[σ])) for σ in (α, β)]
shA0, shA1, _, _ = mol.aoslice_by_atom()[A]
shA0a, shA1a, _, _ = aux_ri.aoslice_by_atom()[A]
nbatch = calc_batch_size(3*(nao+mocc)*naux, self.get_memory(), tot_size(Y_1_ia_ri))
for shU0, shU1, U0, U1 in gen_shl_batch(mol, nbatch, shA0, shA1):
su = slice(U0, U1)
int3c2e_ip1 = int3c2e_ip1_gen((shU0, shU1, 0, mol.nbas, 0, aux_ri.nbas))
for σ in (α, β):
Y_1_ia_ri[σ] -= einsum("tuvQ, PQ, ui, va -> tPia", int3c2e_ip1, L_inv, C[σ][su, so[σ]], C[σ][:, sv[σ]])
Y_1_ia_ri[σ] -= einsum("tuvQ, PQ, ua, vi -> tPia", int3c2e_ip1, L_inv, C[σ][su, sv[σ]], C[σ][:, so[σ]])
nbatch = calc_batch_size(3*nao*(nao+mocc), self.get_memory(), tot_size(Y_1_ia_ri))
for shP0, shP1, P0, P1 in gen_shl_batch(aux_ri, nbatch, shA0a, shA1a):
sp = slice(P0, P1)
int3c2e_ip2 = int3c2e_ip2_gen((0, mol.nbas, 0, mol.nbas, shP0, shP1))
for σ in (α, β):
Y_1_ia_ri[σ] -= einsum("tuvQ, PQ, ui, va -> tPia", int3c2e_ip2, L_inv[:, sp], C[σ][:, so[σ]], C[σ][:, sv[σ]])
for σ in (α, β):
Y_1_ia_ri[σ] -= einsum("Qia, tRQ, PR -> tPia", Y_ia_ri[σ], L_1_ri, L_inv)
grad_corr[A] += einsum("Pia, tPia -> t", G_ia_ri[σ], Y_1_ia_ri[σ])
self.grad_pt2 = grad_corr
@timing
def prepare_gradient_enfunc(self):
tensors = self.tensors
natm = self.mol.natm
Co, eo, D = self.Co, self.eo, self.D
so = self.so
grad_contrib = self.mf_s.Gradients().grad_nuc()
grad_contrib.shape = (natm * 3,)
H_1_ao = tensors.load("H_1_ao")
S_1_mo = tensors.load("S_1_mo")
grad_contrib += np.einsum("Auv, suv -> A", H_1_ao, D, optimize=True)
if self.xc_n is None:
for σ in (α, β):
grad_contrib -= np.einsum("Ai, i -> A", S_1_mo[σ][:, so[σ], so[σ]].diagonal(0, -1, -2), eo[σ])
else:
F_0_ao_n = self.mf_n.get_fock(dm=D)
nc_F_0_ij = [(Co[σ].T @ F_0_ao_n[σ] @ Co[σ]) for σ in (α, β)]
for σ in (α, β):
grad_contrib -= einsum("Aij, ij -> A", S_1_mo[σ][:, so[σ], so[σ]], nc_F_0_ij[σ])
grad_contrib.shape = (natm, 3)
mol = self.mol
if "D3" in self.xc_add:
drv = itrf.libdftd3.wrapper_params
params = np.asarray(self.xc_add["D3"][0], order="F")
version = self.xc_add["D3"][1]
coords = np.asarray(mol.atom_coords(), order="F")
itype = np.asarray(mol.atom_charges(), order="F")
edisp = np.zeros(1)
grad = np.zeros((mol.natm, 3))
drv(
ctypes.c_int(mol.natm),
coords.ctypes.data_as(ctypes.c_void_p),
itype.ctypes.data_as(ctypes.c_void_p),
params.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(version),
edisp.ctypes.data_as(ctypes.c_void_p),
grad.ctypes.data_as(ctypes.c_void_p))
grad_contrib += grad
self.grad_enfunc = grad_contrib
def base_method(self) -> UDFDH:
self.__class__ = UDFDH
return self
| true | true |
f7f787a0f781bee78bafa0751b991899877c3def | 8,686 | py | Python | ifind/search/engines/neutrinogeoaddress.py | padre-lab-eu/extended_simiir | aa9d54784dcfb4c629687317622eae8ba6d59a79 | [
"MIT"
] | 2 | 2018-03-20T01:14:50.000Z | 2021-05-15T12:05:32.000Z | ifind/search/engines/neutrinogeoaddress.py | padre-lab-eu/extended_simiir | aa9d54784dcfb4c629687317622eae8ba6d59a79 | [
"MIT"
] | null | null | null | ifind/search/engines/neutrinogeoaddress.py | padre-lab-eu/extended_simiir | aa9d54784dcfb4c629687317622eae8ba6d59a79 | [
"MIT"
] | 3 | 2015-02-04T13:36:26.000Z | 2015-03-17T08:22:27.000Z | __author__ = 'smck'
import json
import requests
from ifind.search.engine import Engine
from ifind.search.response import Response
from ifind.search.exceptions import EngineAPIKeyException, QueryParamException, EngineConnectionException
from ifind.utils.encoding import encode_symbols
from string import maketrans
API_ENDPOINT = "https://neutrinoapi.com/geocode-address"
class Neutrinogeoaddress(Engine):
"""
Neutrinogeoaddress search engine.
"""
def __init__(self, api_key='', username='', google_api_key='', **kwargs):
"""
Neutrinogeoaddress engine constructor.
Kwargs:
api_key (str): string representation of api key needed to access the search api
username (str): string representing the username associated with the service
google_api_key(str): string representation of a Google API key which has map API permissions, this is used
for generating the iframe url for embedded maps.
See Engine.
Raises:
EngineException
Usage:
engine = EngineFactory('Neutrinogeoaddress api_key='etc123456etc123456etc123456', username='someguy',
google_api_key='12313414412')
"""
Engine.__init__(self, **kwargs)
self.api_key = api_key
self.username = username
self.google_api_key = google_api_key
self.country_code = kwargs.get('country_code', 'GB') # Set country code to GB if not found
self.language_code = kwargs.get('language_code', '')
if not self.api_key:
raise EngineAPIKeyException(self.name, "'api_key=' keyword argument not specified")
elif not self.username:
raise EngineAPIKeyException(self.name, "'username=' keyword argument not specified")
def _search(self, query):
"""
Concrete method of Engine's interface method 'search'.
Performs a search and retrieves the results as an ifind Response.
Args:
query (ifind Query): Object encapsulating details of a search query.
Returns:
ifind Response: object encapulsating a search request's results.
Raises:
EngineException
Usage:
Private method.
Notes:
https://www.neutrinoapi.com/api/geocode-address/ for full API documentation.
"""
return self._request(query)
def _request(self, query):
"""
Issues a single request to the API_ENDPOINT and returns the result as
an ifind Response.
Args:
query (ifind Query): object encapsulating details of a search query.
Returns:
ifind Response: object encapsulating a search request's results.
Raises:
EngineException
Usage:
Private method.
"""
query_string = self._create_query_string(query)
try:
response = requests.get(query_string)
except requests.exceptions.ConnectionError:
raise EngineConnectionException(self.name, "Unable to send request, check connectivity.")
if response.status_code != 200:
raise EngineConnectionException(self.name, "", code=response.status_code)
return self._parse_json_response(query, response)
def _create_query_string(self, query):
"""
Creates and returns Neutrinogeoaddress API query string with encoded query parameters.
Args:
query (ifind Query): object encapsulating details of a search query.
Query Kwargs:
address: (str) The address string. If this is not found terms will be used instead.
Returns:
str: query string for Neutrinogeoaddress API request
Raises:
EngineException
Usage:
Private method.
"""
address = query.__dict__.get('address', query.terms)
# Throw an exception if we have no search terms.
if not address:
raise QueryParamException(self.name, "No address provided!")
query_append = "?address={}&country-code={}&language-code={}&user-id={}&api-key={}".format\
(address, self.country_code, self.language_code, self.username, self.api_key)
return API_ENDPOINT + encode_symbols(query_append)
def _build_iframe_url(self, address, trans_table):
"""
Builds the url to be used in an iframe for an embedded Google map of the address
Args:
address (str): The address to be used in the search on Google maps
trans_table (str): A translation string to be used with the string.translate method. Can be generated using
the maketrans function.
Returns:
iframe_url (str): The string to be used in the iframe to embed the map.
"""
google_endpoint = 'https://www.google.com/maps/embed/v1/search'
iframe_url = '{}?q={}&key={}'.format(google_endpoint,
encode_symbols(address.encode('utf-8').translate(trans_table)),
encode_symbols(self.google_api_key))
return iframe_url
@staticmethod
def _build_summary(address, city, country, postcode, latitude, longitude):
"""
Uses the information in the response to build a summary string
Args:
address (str): The complete address
city (str): The city name portion of the address
country (str): The country portion of the address
postcode (str): The postcode portion of the address
latitude (str): The latitude portion of the coordinates
longitude (str): The longitude portion of the coordinates
Returns:
summary (str): A string representing the result
Usage:
Private method.
"""
if address:
address = 'Adddress: ' + address.encode('utf-8')
if city:
city = 'City: ' + city.encode('utf-8')
if country:
country = 'Country: ' + city.encode('utf-8')
if postcode:
postcode = 'Postcode: ' + postcode.encode('utf-8')
if latitude and longitude:
coords = 'Coordinates: ' + str((latitude, longitude))
else:
coords = ''
summary = '{} {} {} {}'.format(address, city, country, postcode, coords)
return summary
def _parse_json_response(self, query, results):
"""
Parses Neutrinogeoaddress's JSON response and returns as an ifind Response.
Args:
query (ifind Query): object encapsulating details of a search query.
results : requests library response object containing search results.
Returns:
ifind Response: object encapsulating a search request's results.
Usage:
Private method.
"""
response = Response(query.terms, query)
content = json.loads(results.text)
# Results aren't paginated, no more to get.
response.no_more_results = True
url_base = 'https://www.google.co.uk/maps/place/'
trans_table = maketrans(u' ', u'+') # Switch spaces with + for the google maps url
locations = content.get(u'locations')
if locations:
# There are results present, iterate over them.
for loc in locations:
# Kwargs below
address = loc.get(u'address', '')
latitude = loc.get(u'latitude', '')
longitude = loc.get(u'longitude', '')
country = loc.get(u'country', '')
country_code = loc.get(u'country-code', '')
city = loc.get(u'city', '')
postcode = loc.get(u'postal-code', '')
# The iframe_url must be placed in an iframe in order to render the map.
if self.google_api_key:
iframe_url = self._build_iframe_url(address, trans_table)
else:
iframe_url = None
url = url_base + encode_symbols(address.encode('utf-8').translate(trans_table))
text = Neutrinogeoaddress._build_summary(address, city, country, postcode, latitude, longitude)
response.add_result(title=address, url=url, summary=text, imageurl=None,
address=address, latitude=latitude, longitude=longitude, country=country,
country_code=country_code, city=city, postcode=postcode, iframe_url=iframe_url)
return response
| 35.453061 | 119 | 0.608796 | __author__ = 'smck'
import json
import requests
from ifind.search.engine import Engine
from ifind.search.response import Response
from ifind.search.exceptions import EngineAPIKeyException, QueryParamException, EngineConnectionException
from ifind.utils.encoding import encode_symbols
from string import maketrans
API_ENDPOINT = "https://neutrinoapi.com/geocode-address"
class Neutrinogeoaddress(Engine):
def __init__(self, api_key='', username='', google_api_key='', **kwargs):
Engine.__init__(self, **kwargs)
self.api_key = api_key
self.username = username
self.google_api_key = google_api_key
self.country_code = kwargs.get('country_code', 'GB')
self.language_code = kwargs.get('language_code', '')
if not self.api_key:
raise EngineAPIKeyException(self.name, "'api_key=' keyword argument not specified")
elif not self.username:
raise EngineAPIKeyException(self.name, "'username=' keyword argument not specified")
def _search(self, query):
return self._request(query)
def _request(self, query):
query_string = self._create_query_string(query)
try:
response = requests.get(query_string)
except requests.exceptions.ConnectionError:
raise EngineConnectionException(self.name, "Unable to send request, check connectivity.")
if response.status_code != 200:
raise EngineConnectionException(self.name, "", code=response.status_code)
return self._parse_json_response(query, response)
def _create_query_string(self, query):
address = query.__dict__.get('address', query.terms)
if not address:
raise QueryParamException(self.name, "No address provided!")
query_append = "?address={}&country-code={}&language-code={}&user-id={}&api-key={}".format\
(address, self.country_code, self.language_code, self.username, self.api_key)
return API_ENDPOINT + encode_symbols(query_append)
def _build_iframe_url(self, address, trans_table):
google_endpoint = 'https://www.google.com/maps/embed/v1/search'
iframe_url = '{}?q={}&key={}'.format(google_endpoint,
encode_symbols(address.encode('utf-8').translate(trans_table)),
encode_symbols(self.google_api_key))
return iframe_url
@staticmethod
def _build_summary(address, city, country, postcode, latitude, longitude):
if address:
address = 'Adddress: ' + address.encode('utf-8')
if city:
city = 'City: ' + city.encode('utf-8')
if country:
country = 'Country: ' + city.encode('utf-8')
if postcode:
postcode = 'Postcode: ' + postcode.encode('utf-8')
if latitude and longitude:
coords = 'Coordinates: ' + str((latitude, longitude))
else:
coords = ''
summary = '{} {} {} {}'.format(address, city, country, postcode, coords)
return summary
def _parse_json_response(self, query, results):
response = Response(query.terms, query)
content = json.loads(results.text)
response.no_more_results = True
url_base = 'https://www.google.co.uk/maps/place/'
trans_table = maketrans(u' ', u'+') # Switch spaces with + for the google maps url
locations = content.get(u'locations')
if locations:
# There are results present, iterate over them.
for loc in locations:
# Kwargs below
address = loc.get(u'address', '')
latitude = loc.get(u'latitude', '')
longitude = loc.get(u'longitude', '')
country = loc.get(u'country', '')
country_code = loc.get(u'country-code', '')
city = loc.get(u'city', '')
postcode = loc.get(u'postal-code', '')
# The iframe_url must be placed in an iframe in order to render the map.
if self.google_api_key:
iframe_url = self._build_iframe_url(address, trans_table)
else:
iframe_url = None
url = url_base + encode_symbols(address.encode('utf-8').translate(trans_table))
text = Neutrinogeoaddress._build_summary(address, city, country, postcode, latitude, longitude)
response.add_result(title=address, url=url, summary=text, imageurl=None,
address=address, latitude=latitude, longitude=longitude, country=country,
country_code=country_code, city=city, postcode=postcode, iframe_url=iframe_url)
return response
| true | true |
f7f7888acbc7a5533e674b98d3440ddaf9da16b8 | 1,913 | py | Python | src/graph/graph.py | JadielTeofilo/General-Algorithms | dfcf86c6ecd727573079f8971187c47bdb7a37bb | [
"MIT"
] | null | null | null | src/graph/graph.py | JadielTeofilo/General-Algorithms | dfcf86c6ecd727573079f8971187c47bdb7a37bb | [
"MIT"
] | null | null | null | src/graph/graph.py | JadielTeofilo/General-Algorithms | dfcf86c6ecd727573079f8971187c47bdb7a37bb | [
"MIT"
] | null | null | null | ##### Graph implementation list of adjacencies #####
import dataclasses
from typing import List, Any, Optional
@dataclasses.dataclass
class GraphNode:
value: int
edges: List['GraphNode'] = dataclasses.field(default_factory=list)
class Graph:
def __init__(self) -> None:
self.vertices: List[GraphNode] = []
def add_edge(self, from_value: int, to_value: int) -> None:
from_node: GraphNode = self._create_node(from_value)
to_node: GraphNode = self._create_node(to_value)
for vertex in from_node.edges:
if vertex == to_node:
return
from_node.edges.append(to_node)
def _create_node(self, value: int) -> GraphNode:
"""Creates node if not already there"""
node: Optional[GraphNode] = self.search(value)
if not node:
node = GraphNode(value=value)
self.vertices.append(node)
return node
def search(self, node_value: int) -> Optional[GraphNode]:
for vertex in self.vertices:
if vertex.value == node_value:
return vertex
return None
def remove_edge(self, from_value: int, to_value: int) -> None:
from_node: Optional[GraphNode] = self.search(from_value)
to_node: Optional[GraphNode] = self.search(to_value)
if not from_node or not to_node:
raise ValueError('One or more vertices not found')
from_node.edges.remove(to_node)
def __str__(self) -> str:
graph_values: List[str] = []
for vertex in self.vertices:
graph_values.append(str(vertex))
return '\n'.join(graph_values)
if __name__ == '__main__':
graph = Graph()
print(graph)
graph.add_edge(1,2)
graph.add_edge(3,2)
graph.add_edge(3,5)
graph.add_edge(3,1)
graph.add_edge(3,1)
graph.add_edge(1,3)
graph.remove_edge(3,1)
print(graph)
| 28.984848 | 70 | 0.627287 | init__(self) -> None:
self.vertices: List[GraphNode] = []
def add_edge(self, from_value: int, to_value: int) -> None:
from_node: GraphNode = self._create_node(from_value)
to_node: GraphNode = self._create_node(to_value)
for vertex in from_node.edges:
if vertex == to_node:
return
from_node.edges.append(to_node)
def _create_node(self, value: int) -> GraphNode:
node: Optional[GraphNode] = self.search(value)
if not node:
node = GraphNode(value=value)
self.vertices.append(node)
return node
def search(self, node_value: int) -> Optional[GraphNode]:
for vertex in self.vertices:
if vertex.value == node_value:
return vertex
return None
def remove_edge(self, from_value: int, to_value: int) -> None:
from_node: Optional[GraphNode] = self.search(from_value)
to_node: Optional[GraphNode] = self.search(to_value)
if not from_node or not to_node:
raise ValueError('One or more vertices not found')
from_node.edges.remove(to_node)
def __str__(self) -> str:
graph_values: List[str] = []
for vertex in self.vertices:
graph_values.append(str(vertex))
return '\n'.join(graph_values)
if __name__ == '__main__':
graph = Graph()
print(graph)
graph.add_edge(1,2)
graph.add_edge(3,2)
graph.add_edge(3,5)
graph.add_edge(3,1)
graph.add_edge(3,1)
graph.add_edge(1,3)
graph.remove_edge(3,1)
print(graph)
| true | true |
f7f78b74a0319c892b9aa731bcb780058e07e9c8 | 1,960 | py | Python | tests/test_power_plugs.py | ictes/MerossIot | 9c5d47431211e0eda0cd271b284060d303fdb7d1 | [
"MIT"
] | null | null | null | tests/test_power_plugs.py | ictes/MerossIot | 9c5d47431211e0eda0cd271b284060d303fdb7d1 | [
"MIT"
] | null | null | null | tests/test_power_plugs.py | ictes/MerossIot | 9c5d47431211e0eda0cd271b284060d303fdb7d1 | [
"MIT"
] | null | null | null | from meross_iot.api import MerossHttpClient
from meross_iot.supported_devices.power_plugs import Mss310
import os
import unittest
import time
EMAIL = os.environ.get('MEROSS_EMAIL')
PASSWORD = os.environ.get('MEROSS_PASSWORD')
class TestHttpMethods(unittest.TestCase):
def setUp(self):
self.client = MerossHttpClient(email=EMAIL, password=PASSWORD)
def test_device_listing(self):
devices = self.client.list_devices()
assert devices is not None
assert len(devices) > 0
def test_supported_device_listing(self):
devices = self.client.list_supported_devices()
assert devices is not None
assert len(devices) > 0
class TestMSS310Test(unittest.TestCase):
def setUp(self):
httpHandler = MerossHttpClient(email=EMAIL, password=PASSWORD)
# Retrieves the list of supported devices
devices = httpHandler.list_supported_devices()
for counter, device in enumerate(devices):
if isinstance(device, Mss310):
self.device = device
break
def test_power_cycle(self):
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
self.device.turn_off()
time.sleep(2)
self.assertFalse(self.device.get_status())
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
def test_get_info(self):
consumption = self.device.get_power_consumptionX()
assert consumption is not None
wifi_list = self.device.get_wifi_list()
assert wifi_list is not None
trace = self. device.get_trace()
assert trace is not None
debug = self.device.get_debug()
assert debug is not None
abilities = self.device.get_abilities()
assert abilities is not None
electricity = self.device.get_electricity()
assert electricity is not None
| 27.605634 | 70 | 0.667857 | from meross_iot.api import MerossHttpClient
from meross_iot.supported_devices.power_plugs import Mss310
import os
import unittest
import time
EMAIL = os.environ.get('MEROSS_EMAIL')
PASSWORD = os.environ.get('MEROSS_PASSWORD')
class TestHttpMethods(unittest.TestCase):
def setUp(self):
self.client = MerossHttpClient(email=EMAIL, password=PASSWORD)
def test_device_listing(self):
devices = self.client.list_devices()
assert devices is not None
assert len(devices) > 0
def test_supported_device_listing(self):
devices = self.client.list_supported_devices()
assert devices is not None
assert len(devices) > 0
class TestMSS310Test(unittest.TestCase):
def setUp(self):
httpHandler = MerossHttpClient(email=EMAIL, password=PASSWORD)
devices = httpHandler.list_supported_devices()
for counter, device in enumerate(devices):
if isinstance(device, Mss310):
self.device = device
break
def test_power_cycle(self):
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
self.device.turn_off()
time.sleep(2)
self.assertFalse(self.device.get_status())
self.device.turn_on()
time.sleep(2)
self.assertTrue(self.device.get_status())
def test_get_info(self):
consumption = self.device.get_power_consumptionX()
assert consumption is not None
wifi_list = self.device.get_wifi_list()
assert wifi_list is not None
trace = self. device.get_trace()
assert trace is not None
debug = self.device.get_debug()
assert debug is not None
abilities = self.device.get_abilities()
assert abilities is not None
electricity = self.device.get_electricity()
assert electricity is not None
| true | true |
f7f78b825baa8c64428cec6b9755f2759f25115e | 3,885 | py | Python | python/PyAlembic/Tests/testInstance.py | zsnake1209/alembic | 6245d10e007d75ea7802a22c3e1899706b84698f | [
"BSL-1.0"
] | 921 | 2015-01-03T11:04:38.000Z | 2022-03-29T06:38:34.000Z | python/PyAlembic/Tests/testInstance.py | zsnake1209/alembic | 6245d10e007d75ea7802a22c3e1899706b84698f | [
"BSL-1.0"
] | 264 | 2015-01-05T17:15:45.000Z | 2022-03-28T20:14:51.000Z | python/PyAlembic/Tests/testInstance.py | zsnake1209/alembic | 6245d10e007d75ea7802a22c3e1899706b84698f | [
"BSL-1.0"
] | 276 | 2015-01-12T01:34:20.000Z | 2022-03-08T09:19:42.000Z | #-******************************************************************************
#
# Copyright (c) 2012 - 2013
# Sony Pictures Imageworks Inc. and
# Industrial Light & Magic, a division of Lucasfilm Entertainment Company Ltd.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Sony Pictures Imageworks, nor
# Industrial Light & Magic, nor the names of their contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#-******************************************************************************
import unittest
from imath import *
from alembic.Abc import *
class InstanceTest(unittest.TestCase):
def testInstanceExport(self):
"""Write an oarchive with an instance in it"""
oarch = OArchive('instance.abc')
#
# a
# / \
# b c <-- c is an instance of b
# |
# d
# |
# e
#
a = OObject(oarch.getTop(), 'a')
b = OObject(a, 'b')
d = OObject(b, 'd')
e = OObject(d, 'e')
a.addChildInstance(b, 'c')
def testInstanceImport(self):
"""Read an archive with an instance in it, verify it's instancing correctly."""
iarch = IArchive('instance.abc')
a = IObject(iarch.getTop(), 'a')
self.assertEqual(a.getNumChildren(), 2)
self.assertEqual(a.getChild('c').getName(), 'c')
self.assertEqual(a.getChild('c').getName(), a.getChild(1).getName())
self.assertTrue(a.isChildInstance('c'))
self.assertTrue(a.isChildInstance(1))
b = a.getChild('b')
self.assertTrue(b.valid())
# c is an instance root pointing at b
c = a.getChild('c')
self.assertTrue(c.valid())
self.assertTrue(c.isInstanceRoot())
self.assertTrue(c.isInstanceDescendant())
self.assertEqual(c.instanceSourcePath(), b.getFullName())
# instanced child of c is d
di = c.getChild('d')
self.assertTrue(di.valid())
self.assertEqual(di.getFullName(), '/a/c/d')
self.assertEqual(di.getParent().getFullName(), c.getFullName())
self.assertFalse(di.isInstanceRoot())
self.assertTrue(di.isInstanceDescendant())
# instanced child of d is e
ei = di.getChild('e')
self.assertTrue(ei.valid())
self.assertTrue(ei.getFullName(), '/a/c/d/e')
self.assertTrue(ei.getParent().getFullName(), di.getFullName())
self.assertFalse(ei.isInstanceRoot())
self.assertTrue(ei.isInstanceDescendant())
| 37.355769 | 87 | 0.638095 |
import unittest
from imath import *
from alembic.Abc import *
class InstanceTest(unittest.TestCase):
def testInstanceExport(self):
oarch = OArchive('instance.abc')
a = OObject(oarch.getTop(), 'a')
b = OObject(a, 'b')
d = OObject(b, 'd')
e = OObject(d, 'e')
a.addChildInstance(b, 'c')
def testInstanceImport(self):
iarch = IArchive('instance.abc')
a = IObject(iarch.getTop(), 'a')
self.assertEqual(a.getNumChildren(), 2)
self.assertEqual(a.getChild('c').getName(), 'c')
self.assertEqual(a.getChild('c').getName(), a.getChild(1).getName())
self.assertTrue(a.isChildInstance('c'))
self.assertTrue(a.isChildInstance(1))
b = a.getChild('b')
self.assertTrue(b.valid())
c = a.getChild('c')
self.assertTrue(c.valid())
self.assertTrue(c.isInstanceRoot())
self.assertTrue(c.isInstanceDescendant())
self.assertEqual(c.instanceSourcePath(), b.getFullName())
di = c.getChild('d')
self.assertTrue(di.valid())
self.assertEqual(di.getFullName(), '/a/c/d')
self.assertEqual(di.getParent().getFullName(), c.getFullName())
self.assertFalse(di.isInstanceRoot())
self.assertTrue(di.isInstanceDescendant())
ei = di.getChild('e')
self.assertTrue(ei.valid())
self.assertTrue(ei.getFullName(), '/a/c/d/e')
self.assertTrue(ei.getParent().getFullName(), di.getFullName())
self.assertFalse(ei.isInstanceRoot())
self.assertTrue(ei.isInstanceDescendant())
| true | true |
f7f78b9c614d19290ec5d46a32573aeaf6a92a28 | 6,157 | py | Python | saleor/shipping/migrations/0005_auto_20180805_1754.py | gruzdevasch/lastversionofbs | 17bbaccd683ea274b6ba56cdd39ccd4d664b1586 | [
"BSD-3-Clause"
] | null | null | null | saleor/shipping/migrations/0005_auto_20180805_1754.py | gruzdevasch/lastversionofbs | 17bbaccd683ea274b6ba56cdd39ccd4d664b1586 | [
"BSD-3-Clause"
] | 3 | 2020-03-24T16:21:02.000Z | 2021-02-02T21:57:49.000Z | saleor/shipping/migrations/0005_auto_20180805_1754.py | gruzdevasch/lastversionofbs | 17bbaccd683ea274b6ba56cdd39ccd4d664b1586 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.0.3 on 2018-08-05 10:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shipping', '0004_auto_20180731_1425'),
]
operations = [
migrations.AlterField(
model_name='shippingmethodcountry',
name='country_code',
field=models.CharField(blank=True, choices=[('', 'Rest of World'), ('LY', 'Ливия'), ('EC', 'Эквадор'), ('DJ', 'Джибути'), ('IQ', 'Ирак'), ('KI', 'Кирибати'), ('LR', 'Либерии'), ('BG', 'Болгария'), ('MZ', 'Мозамбик'), ('RW', 'Руанда'), ('TJ', 'Таджикистан'), ('QA', 'Катар'), ('SO', 'Сомали'), ('AQ', 'Антарктида'), ('LB', 'Ливан'), ('JM', 'Ямайка'), ('GL', 'Гренландия'), ('ZM', 'Замбия'), ('MK', 'Македония'), ('PA', 'Панама'), ('GU', 'Гуам'), ('MR', 'Мавритания'), ('SV', 'Сальвадор'), ('VN', 'Вьетнам'), ('MO', 'Макао'), ('LS', 'Лесото'), ('SZ', 'Свазиленд'), ('FI', 'Финляндия'), ('MQ', 'Мартиника'), ('ID', 'Индонезия'), ('MA', 'Марокко'), ('BT', 'Бутан'), ('OM', 'Оман'), ('KE', 'Кения'), ('NP', 'Непал'), ('BW', 'Ботсвана'), ('GY', 'Гайана'), ('GA', 'Габон'), ('BN', 'Бруней'), ('SK', 'Словакия'), ('NR', 'Науру'), ('BE', 'Бельгия'), ('MU', 'Маврикий'), ('BO', 'Боливия'), ('AZ', 'Азербайджан'), ('CL', 'Чили'), ('TR', 'Турция'), ('GI', 'Гибралтар'), ('TH', 'Таиланд'), ('ES', 'Испания'), ('HK', 'Гонконг'), ('FR', 'Франция'), ('SG', 'Сингапур'), ('TO', 'Тонга'), ('FJ', 'Фиджи'), ('GD', 'Гренада'), ('CN', 'Китай'), ('HN', 'Гондурас'), ('AR', 'Аргентина'), ('KG', 'Киргизия'), ('RS', 'Сербия'), ('ME', 'Черногория'), ('TD', 'Чад'), ('IN', 'Индия'), ('GN', 'Гвинея'), ('BD', 'Бангладеш'), ('AL', 'Албания'), ('CG', 'Конго'), ('IR', 'Иран'), ('SY', 'Сирия'), ('CU', 'Куба'), ('NU', 'Ниуэ'), ('RE', 'Реюньон'), ('LI', 'Лихтенштейн'), ('PT', 'Португалия'), ('SE', 'Швеция'), ('DM', 'Доминика'), ('AM', 'Армения'), ('EG', 'Египет'), ('YE', 'Йемен'), ('HT', 'Гаити'), ('NL', 'Нидерланды'), ('NI', 'Никарагуа'), ('LV', 'Латвия'), ('AU', 'Австралия'), ('GE', 'Грузия'), ('CH', 'Швейцария'), ('SR', 'Суринам'), ('IS', 'Исландия'), ('AW', 'Аруба'), ('DE', 'Германия'), ('GH', 'Гана'), ('VE', 'Венесуэла'), ('TG', 'Того'), ('RO', 'Румыния'), ('MN', 'Монголия'), ('MG', 'Мадагаскар'), ('MC', 'Монако'), ('PE', 'Перу'), ('CY', 'Кипр'), ('UY', 'Уругвай'), ('MS', 'Монтсеррат'), ('CZ', 'Чехия'), ('CM', 'Камерун'), ('BH', 'Бахрейн'), ('PK', 'Пакистан'), ('KZ', 'Казахстан'), ('SN', 'Сенегал'), ('BZ', 'Белиз'), ('JP', 'Япония'), ('ER', 'Эритрея'), ('NE', 'Нигер'), ('AT', 'Австрия'), ('GM', 'Гамбия'), ('MT', 'Мальта'), ('CO', 'Колумбия'), ('CA', 'Канада'), ('WS', 'Самоа'), ('RU', 'Россия'), ('NG', 'Нигерия'), ('VU', 'Вануату'), ('GR', 'Греция'), ('KH', 'Камбоджа'), ('PW', 'Палау'), ('YT', 'Майотта'), ('EE', 'Эстония'), ('TM', 'Туркменистан'), ('PN', 'Питкэрн'), ('ET', 'Эфиопия'), ('MX', 'Мексика'), ('JE', 'Джерси'), ('BJ', 'Бенин'), ('LT', 'Литва'), ('DZ', 'Алжир'), ('SD', 'Судан'), ('AD', 'Андорра'), ('ML', 'Мали'), ('JO', 'Иордания'), ('NO', 'Норвегия'), ('AO', 'Ангола'), ('BI', 'Бурунди'), ('BB', 'Барбадос'), ('PY', 'Парагвай'), ('GG', 'Гернси'), ('BR', 'Бразилия'), ('PL', 'Польша'), ('AF', 'Афганистан'), ('PH', 'Филиппины'), ('SI', 'Словения'), ('TW', 'Тайвань'), ('TZ', 'Танзания'), ('MW', 'Малави'), ('TK', 'Токелау'), ('LU', 'Люксембург'), ('MV', 'Мальдивы'), ('HU', 'Венгрия'), ('UZ', 'Узбекистан'), ('MY', 'Малайзия'), ('ZW', 'Зимбабве'), ('HR', 'Хорватия'), ('IL', 'Израиль'), ('UG', 'Уганда'), ('UA', 'Украина'), ('KW', 'Кувейт'), ('TN', 'Тунис'), ('DK', 'Дания'), ('BY', 'Беларусь'), ('GT', 'Гватемала'), ('NA', 'Намибия'), ('AI', 'Ангилья'), ('GP', 'Гваделупа'), ('TV', 'Тувалу'), ('MM', 'Мьянмы'), ('CW', 'Кюрасао'), ('LA', 'Лаос'), ('IT', 'Италия'), ('MD', 'Молдавия'), ('IE', 'Ирландия'), ('EH', 'Западная Сахара'), ('PF', 'Французская Полинезия'), ('MH', 'Маршалловы острова'), ('KM', 'Коморские острова'), ('AS', 'Американское Самоа'), ('IM', 'Остров Мэн'), ('BM', 'Бермудские острова'), ('VA', 'Святой Престол'), ('BS', 'Багамские острова'), ('ZA', 'Южная Африка'), ('SS', 'Южный Судан'), ('FO', 'Фарерские острова'), ('GQ', 'Экваториальная Гвинея'), ('KP', 'Северная Корея'), ('NZ', 'Новая Зеландия'), ('SC', 'Сейшельские острова'), ('GF', 'Французская Гвиана'), ('NF', 'Остров Норфолк'), ('NC', 'Новой Каледонии'), ('SB', 'Соломоновы Острова'), ('BV', 'Остров Буве'), ('CF', 'Центральноафриканская Республика'), ('KR', 'Южная Корея'), ('AX', 'Аландские острова'), ('CX', 'Остров Рождества'), ('DO', 'Доминиканская Республика'), ('GB', 'Соединенное Королевство'), ('SA', 'Саудовская Аравия'), ('CK', 'Острова Кука'), ('KY', 'Каймановы острова'), ('MP', 'Северные Марианские острова'), ('WF', 'Уоллис и Футуна'), ('AE', 'Объединенные Арабские Эмираты'), ('US', 'Соединенные Штаты Америки'), ('TF', 'Французские южные территории'), ('TT', 'Тринидад и Тобаго'), ('AG', 'Антигуа и Барбуда'), ('BA', 'Босния и Герцеговина'), ('TC', 'Острова Теркс и Кайкос'), ('UM', 'Внешние малые острова США'), ('HM', 'Остров Херд и Острова Макдоналд'), ('IO', 'Британская территория в Индийском океане'), ('GS', 'Южная Георгия и Южные Сандвичевы острова'), ('VG', 'Виргинские Острова (Британские)'), ('VI', 'Виргинские Острова (США)'), ('MF', 'Святого Мартина (Остров, французская часть)'), ('SX', 'Святого Мартина (Остров, нидерландская часть)'), ('SJ', 'Шпицберген и Ян-Майен'), ('FK', 'Фолклендские острова [Мальвинские]'), ('CD', 'Конго (Демократическая Республика)'), ('FM', 'Микронезия (Федеративные Штаты)'), ('CC', 'Кокосовые (Килинг) острова'), ('SH', 'Святой Елены, Вознесения и Тристан-да-Кунья (Острова)'), ('SM', 'Сан - Марино'), ('PS', 'Палестина, Государство'), ('BQ', 'Бонайре, Синт-Эстатиус и Саба'), ('BF', 'Буркина-Фасо'), ('PR', 'Пуэрто-Рико'), ('SL', 'Сьерра-Леоне'), ('LK', 'Шри-Ланка'), ('BL', 'Сен-Бартельми'), ('LC', 'Сент-Люсия'), ('GW', 'Гвинея-Бисау'), ('CV', 'Кабо-Верде'), ('TL', 'Тимор-Лесте'), ('CR', 'Коста-Рика'), ('PG', 'Папуа-Новая Гвинея'), ('KN', 'Сент-Китс и Невис'), ('PM', 'Сен-Пьер и Микелон'), ('ST', 'Сан-Томе и Принсипи'), ('VC', 'Сент-Винсент и Гренадины'), ('CI', "Кот-д'Ивуар"), ('EU', 'European Union')], default='', max_length=2),
),
]
| 324.052632 | 5,799 | 0.540198 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shipping', '0004_auto_20180731_1425'),
]
operations = [
migrations.AlterField(
model_name='shippingmethodcountry',
name='country_code',
field=models.CharField(blank=True, choices=[('', 'Rest of World'), ('LY', 'Ливия'), ('EC', 'Эквадор'), ('DJ', 'Джибути'), ('IQ', 'Ирак'), ('KI', 'Кирибати'), ('LR', 'Либерии'), ('BG', 'Болгария'), ('MZ', 'Мозамбик'), ('RW', 'Руанда'), ('TJ', 'Таджикистан'), ('QA', 'Катар'), ('SO', 'Сомали'), ('AQ', 'Антарктида'), ('LB', 'Ливан'), ('JM', 'Ямайка'), ('GL', 'Гренландия'), ('ZM', 'Замбия'), ('MK', 'Македония'), ('PA', 'Панама'), ('GU', 'Гуам'), ('MR', 'Мавритания'), ('SV', 'Сальвадор'), ('VN', 'Вьетнам'), ('MO', 'Макао'), ('LS', 'Лесото'), ('SZ', 'Свазиленд'), ('FI', 'Финляндия'), ('MQ', 'Мартиника'), ('ID', 'Индонезия'), ('MA', 'Марокко'), ('BT', 'Бутан'), ('OM', 'Оман'), ('KE', 'Кения'), ('NP', 'Непал'), ('BW', 'Ботсвана'), ('GY', 'Гайана'), ('GA', 'Габон'), ('BN', 'Бруней'), ('SK', 'Словакия'), ('NR', 'Науру'), ('BE', 'Бельгия'), ('MU', 'Маврикий'), ('BO', 'Боливия'), ('AZ', 'Азербайджан'), ('CL', 'Чили'), ('TR', 'Турция'), ('GI', 'Гибралтар'), ('TH', 'Таиланд'), ('ES', 'Испания'), ('HK', 'Гонконг'), ('FR', 'Франция'), ('SG', 'Сингапур'), ('TO', 'Тонга'), ('FJ', 'Фиджи'), ('GD', 'Гренада'), ('CN', 'Китай'), ('HN', 'Гондурас'), ('AR', 'Аргентина'), ('KG', 'Киргизия'), ('RS', 'Сербия'), ('ME', 'Черногория'), ('TD', 'Чад'), ('IN', 'Индия'), ('GN', 'Гвинея'), ('BD', 'Бангладеш'), ('AL', 'Албания'), ('CG', 'Конго'), ('IR', 'Иран'), ('SY', 'Сирия'), ('CU', 'Куба'), ('NU', 'Ниуэ'), ('RE', 'Реюньон'), ('LI', 'Лихтенштейн'), ('PT', 'Португалия'), ('SE', 'Швеция'), ('DM', 'Доминика'), ('AM', 'Армения'), ('EG', 'Египет'), ('YE', 'Йемен'), ('HT', 'Гаити'), ('NL', 'Нидерланды'), ('NI', 'Никарагуа'), ('LV', 'Латвия'), ('AU', 'Австралия'), ('GE', 'Грузия'), ('CH', 'Швейцария'), ('SR', 'Суринам'), ('IS', 'Исландия'), ('AW', 'Аруба'), ('DE', 'Германия'), ('GH', 'Гана'), ('VE', 'Венесуэла'), ('TG', 'Того'), ('RO', 'Румыния'), ('MN', 'Монголия'), ('MG', 'Мадагаскар'), ('MC', 'Монако'), ('PE', 'Перу'), ('CY', 'Кипр'), ('UY', 'Уругвай'), ('MS', 'Монтсеррат'), ('CZ', 'Чехия'), ('CM', 'Камерун'), ('BH', 'Бахрейн'), ('PK', 'Пакистан'), ('KZ', 'Казахстан'), ('SN', 'Сенегал'), ('BZ', 'Белиз'), ('JP', 'Япония'), ('ER', 'Эритрея'), ('NE', 'Нигер'), ('AT', 'Австрия'), ('GM', 'Гамбия'), ('MT', 'Мальта'), ('CO', 'Колумбия'), ('CA', 'Канада'), ('WS', 'Самоа'), ('RU', 'Россия'), ('NG', 'Нигерия'), ('VU', 'Вануату'), ('GR', 'Греция'), ('KH', 'Камбоджа'), ('PW', 'Палау'), ('YT', 'Майотта'), ('EE', 'Эстония'), ('TM', 'Туркменистан'), ('PN', 'Питкэрн'), ('ET', 'Эфиопия'), ('MX', 'Мексика'), ('JE', 'Джерси'), ('BJ', 'Бенин'), ('LT', 'Литва'), ('DZ', 'Алжир'), ('SD', 'Судан'), ('AD', 'Андорра'), ('ML', 'Мали'), ('JO', 'Иордания'), ('NO', 'Норвегия'), ('AO', 'Ангола'), ('BI', 'Бурунди'), ('BB', 'Барбадос'), ('PY', 'Парагвай'), ('GG', 'Гернси'), ('BR', 'Бразилия'), ('PL', 'Польша'), ('AF', 'Афганистан'), ('PH', 'Филиппины'), ('SI', 'Словения'), ('TW', 'Тайвань'), ('TZ', 'Танзания'), ('MW', 'Малави'), ('TK', 'Токелау'), ('LU', 'Люксембург'), ('MV', 'Мальдивы'), ('HU', 'Венгрия'), ('UZ', 'Узбекистан'), ('MY', 'Малайзия'), ('ZW', 'Зимбабве'), ('HR', 'Хорватия'), ('IL', 'Израиль'), ('UG', 'Уганда'), ('UA', 'Украина'), ('KW', 'Кувейт'), ('TN', 'Тунис'), ('DK', 'Дания'), ('BY', 'Беларусь'), ('GT', 'Гватемала'), ('NA', 'Намибия'), ('AI', 'Ангилья'), ('GP', 'Гваделупа'), ('TV', 'Тувалу'), ('MM', 'Мьянмы'), ('CW', 'Кюрасао'), ('LA', 'Лаос'), ('IT', 'Италия'), ('MD', 'Молдавия'), ('IE', 'Ирландия'), ('EH', 'Западная Сахара'), ('PF', 'Французская Полинезия'), ('MH', 'Маршалловы острова'), ('KM', 'Коморские острова'), ('AS', 'Американское Самоа'), ('IM', 'Остров Мэн'), ('BM', 'Бермудские острова'), ('VA', 'Святой Престол'), ('BS', 'Багамские острова'), ('ZA', 'Южная Африка'), ('SS', 'Южный Судан'), ('FO', 'Фарерские острова'), ('GQ', 'Экваториальная Гвинея'), ('KP', 'Северная Корея'), ('NZ', 'Новая Зеландия'), ('SC', 'Сейшельские острова'), ('GF', 'Французская Гвиана'), ('NF', 'Остров Норфолк'), ('NC', 'Новой Каледонии'), ('SB', 'Соломоновы Острова'), ('BV', 'Остров Буве'), ('CF', 'Центральноафриканская Республика'), ('KR', 'Южная Корея'), ('AX', 'Аландские острова'), ('CX', 'Остров Рождества'), ('DO', 'Доминиканская Республика'), ('GB', 'Соединенное Королевство'), ('SA', 'Саудовская Аравия'), ('CK', 'Острова Кука'), ('KY', 'Каймановы острова'), ('MP', 'Северные Марианские острова'), ('WF', 'Уоллис и Футуна'), ('AE', 'Объединенные Арабские Эмираты'), ('US', 'Соединенные Штаты Америки'), ('TF', 'Французские южные территории'), ('TT', 'Тринидад и Тобаго'), ('AG', 'Антигуа и Барбуда'), ('BA', 'Босния и Герцеговина'), ('TC', 'Острова Теркс и Кайкос'), ('UM', 'Внешние малые острова США'), ('HM', 'Остров Херд и Острова Макдоналд'), ('IO', 'Британская территория в Индийском океане'), ('GS', 'Южная Георгия и Южные Сандвичевы острова'), ('VG', 'Виргинские Острова (Британские)'), ('VI', 'Виргинские Острова (США)'), ('MF', 'Святого Мартина (Остров, французская часть)'), ('SX', 'Святого Мартина (Остров, нидерландская часть)'), ('SJ', 'Шпицберген и Ян-Майен'), ('FK', 'Фолклендские острова [Мальвинские]'), ('CD', 'Конго (Демократическая Республика)'), ('FM', 'Микронезия (Федеративные Штаты)'), ('CC', 'Кокосовые (Килинг) острова'), ('SH', 'Святой Елены, Вознесения и Тристан-да-Кунья (Острова)'), ('SM', 'Сан - Марино'), ('PS', 'Палестина, Государство'), ('BQ', 'Бонайре, Синт-Эстатиус и Саба'), ('BF', 'Буркина-Фасо'), ('PR', 'Пуэрто-Рико'), ('SL', 'Сьерра-Леоне'), ('LK', 'Шри-Ланка'), ('BL', 'Сен-Бартельми'), ('LC', 'Сент-Люсия'), ('GW', 'Гвинея-Бисау'), ('CV', 'Кабо-Верде'), ('TL', 'Тимор-Лесте'), ('CR', 'Коста-Рика'), ('PG', 'Папуа-Новая Гвинея'), ('KN', 'Сент-Китс и Невис'), ('PM', 'Сен-Пьер и Микелон'), ('ST', 'Сан-Томе и Принсипи'), ('VC', 'Сент-Винсент и Гренадины'), ('CI', "Кот-д'Ивуар"), ('EU', 'European Union')], default='', max_length=2),
),
]
| true | true |
f7f78ba101a1e8b992c8b3b1ab16ba8ed335e848 | 4,508 | py | Python | MarketAgents/VAVAgent/vav/agent.py | rkini-pnnl/volttron-GS | 60055438446a060176381468757ad0ec339f2371 | [
"BSD-3-Clause"
] | 8 | 2016-11-03T05:00:58.000Z | 2020-10-18T14:49:36.000Z | MarketAgents/VAVAgent/vav/agent.py | kevinatkinson-pnnl/volttron-GS | 479c614a6f7cd779fcc208e8e35d27d0961a16f8 | [
"BSD-3-Clause"
] | 29 | 2016-06-15T17:45:48.000Z | 2020-08-01T02:41:32.000Z | MarketAgents/VAVAgent/vav/agent.py | kevinatkinson-pnnl/volttron-GS | 479c614a6f7cd779fcc208e8e35d27d0961a16f8 | [
"BSD-3-Clause"
] | 39 | 2016-06-08T01:57:49.000Z | 2020-05-27T14:33:44.000Z | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2019, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import sys
import logging
from datetime import timedelta as td
from volttron.platform.agent import utils
from volttron.pnnl.transactive_base.transactive.transactive import TransactiveBase
#from decorators import time_cls_methods
_log = logging.getLogger(__name__)
utils.setup_logging()
__version__ = '0.3'
#@time_cls_methods
class VAVAgent(TransactiveBase):
"""
The SampleElectricMeterAgent serves as a sample of an electric meter that
sells electricity for a single building at a fixed price.
"""
def __init__(self, config_path, **kwargs):
try:
config = utils.load_config(config_path)
except StandardError:
config = {}
self.agent_name = config.get("agent_name", "vav")
TransactiveBase.__init__(self, config, **kwargs)
def init_predictions(self, output_info):
pass
def update_state(self, market_index, sched_index, price):
market_time = self.current_datetime + td(hours=market_index + 1)
occupied = self.check_future_schedule(market_time)
if occupied:
prices = self.determine_prices()
_set = self.determine_control(self.ct_flexibility, prices, price)
else:
_set = self.off_setpoint
self.model.update(_set, sched_index, market_index)
self.update_flag[market_index] = True
def main():
"""Main method called to start the agent."""
utils.vip_main(VAVAgent, version=__version__)
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| 38.862069 | 82 | 0.748891 |
import sys
import logging
from datetime import timedelta as td
from volttron.platform.agent import utils
from volttron.pnnl.transactive_base.transactive.transactive import TransactiveBase
_log = logging.getLogger(__name__)
utils.setup_logging()
__version__ = '0.3'
class VAVAgent(TransactiveBase):
def __init__(self, config_path, **kwargs):
try:
config = utils.load_config(config_path)
except StandardError:
config = {}
self.agent_name = config.get("agent_name", "vav")
TransactiveBase.__init__(self, config, **kwargs)
def init_predictions(self, output_info):
pass
def update_state(self, market_index, sched_index, price):
market_time = self.current_datetime + td(hours=market_index + 1)
occupied = self.check_future_schedule(market_time)
if occupied:
prices = self.determine_prices()
_set = self.determine_control(self.ct_flexibility, prices, price)
else:
_set = self.off_setpoint
self.model.update(_set, sched_index, market_index)
self.update_flag[market_index] = True
def main():
utils.vip_main(VAVAgent, version=__version__)
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| true | true |
f7f78d05656c8b79bb02964ba50c6492c4115000 | 15,509 | py | Python | dciqueue/test_queue.py | redhat-cip/dci-pipeline | 384909c6ffcb4c44977fb34a5c106df4eecf9e35 | [
"Apache-2.0"
] | null | null | null | dciqueue/test_queue.py | redhat-cip/dci-pipeline | 384909c6ffcb4c44977fb34a5c106df4eecf9e35 | [
"Apache-2.0"
] | null | null | null | dciqueue/test_queue.py | redhat-cip/dci-pipeline | 384909c6ffcb4c44977fb34a5c106df4eecf9e35 | [
"Apache-2.0"
] | 2 | 2021-09-20T10:48:55.000Z | 2022-02-28T19:47:55.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020-2021 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
import json
import os
import shutil
import tempfile
import time
import unittest
from dciqueue import lib
from dciqueue import main
from dciqueue import run_cmd
class TestQueue(unittest.TestCase):
def setUp(self):
self.queue_dir = tempfile.mkdtemp()
os.environ["DCI_QUEUE_DIR"] = self.queue_dir
os.environ["DCI_QUEUE_LOG_LEVEL"] = "DEBUG"
os.environ["DCI_QUEUE_CONSOLE_OUTPUT"] = "t"
def tearDown(self):
shutil.rmtree(self.queue_dir)
def call(self, arg, stdout=None, stderr=None, *args, **kwargs):
self.arg = arg
if stdout:
stdout.close()
if stderr:
stderr.close()
return None
def fork(self, arg, *args, **kwargs):
self.arg = arg
def dir_exists(self, directory, subdir):
path = os.path.join(self.queue_dir, directory, subdir)
self.assertTrue(os.path.exists(path) and os.path.isdir(path), path)
def file_exists(self, directory, subdir, filename):
path = os.path.join(self.queue_dir, directory, subdir, filename)
self.assertTrue(os.path.exists(path) and os.path.isfile(path), path)
def link_exists(self, directory, subdir, filename):
path = os.path.join(self.queue_dir, directory, subdir, filename)
self.assertTrue(os.path.exists(path) and os.path.islink(path), path)
def doesnt_exist(self, directory, subdir, filename=None):
if filename:
path = os.path.join(self.queue_dir, directory, subdir, filename)
else:
path = os.path.join(self.queue_dir, directory, subdir)
self.assertFalse(os.path.exists(path), path)
def test_add_pool(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
for key in lib.DIRS:
self.dir_exists(key, "8nodes")
def test_remove_pool(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(main.main(["dci-queue", "remove-pool", "-n", "8nodes"]), 0)
for key in lib.DIRS:
self.doesnt_exist(key, "8nodes")
def test_add_resource(self):
def validate(key, exist):
path = os.path.join(self.queue_dir, key, "8nodes", "cluster4")
if exist:
self.assertTrue(os.path.exists(path) or os.path.islink(path), path)
else:
self.assertFalse(os.path.exists(path) or os.path.islink(path), path)
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
cmd = os.path.join(self.queue_dir, "queue", "8nodes", "1" + run_cmd.EXT)
with open(cmd, "w") as fd:
json.dump({"resource": "cluster4"}, fd)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
for key, exist in (("pool", True), ("available", False)):
validate(key, exist)
os.unlink(cmd)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
for key, exist in (("pool", True), ("available", True)):
validate(key, exist)
def test_remove_resource(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
with self.assertRaises(SystemExit):
main.main(["dci-queue", "remove-resource", "8nodes", "cluster4"])
self.assertEqual(
main.main(
[
"dci-queue",
"remove-resource",
"8nodes",
"cluster4",
"reserved to debug blabla (fred)",
]
),
0,
)
for key in ("pool", "available"):
self.doesnt_exist(key, "8nodes", "cluster4")
self.file_exists("reason", "8nodes", "cluster4")
self.assertEqual(main.main(["dci-queue", "list", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.doesnt_exist("reason", "8nodes", "cluster4")
def test_schedule(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(
["dci-queue", "schedule", "-p", "1", "8nodes", "echo", "@RESOURCE"]
),
0,
)
self.assertEqual(
main.main(["dci-queue", "schedule", "8nodes", "echo", "@RESOURCE"]), 0
)
self.assertEqual(
main.main(["dci-queue", "schedule", "8nodes", "ls", "/etc/@RESOURCE"]), 0
)
for seq in ("1", "2"):
path = os.path.join(self.queue_dir, "queue", "8nodes", seq)
self.assertTrue(os.path.exists(path) and os.path.isfile(path), path)
data = json.load(open(path))
self.assertIn("priority", data)
self.assertEqual(data["priority"], 1 if seq == "1" else 0)
self.doesnt_exist("queue", "8nodes", "3")
def test_schedule_force(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(["dci-queue", "schedule", "-f", "8nodes", "echo", "@RESOURCE"]), 0
)
self.assertEqual(
main.main(["dci-queue", "schedule", "-f", "8nodes", "echo", "@RESOURCE"]), 0
)
for seq in ("1", "2"):
self.file_exists("queue", "8nodes", seq)
def test_schedule_remove(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(["dci-queue", "schedule", "-r", "8nodes", "echo", "@RESOURCE"]), 0
)
self.assertEqual(main.main(["dci-queue", "run", "8nodes"]), 0)
self.doesnt_exist("pool", "8nodes", "cluster4")
def test_unschedule(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(main.main(["dci-queue", "unschedule", "8nodes", "1"]), 0)
self.assertEqual(main.main(["dci-queue", "unschedule", "8nodes", "1"]), 0)
self.doesnt_exist("queue", "8nodes", "1")
def test_schedule_block(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(["dci-queue", "schedule", "-b", "8nodes", "false", "@RESOURCE"]),
1,
)
def test_run(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(["dci-queue", "schedule", "8nodes", "echo", "@RESOURCE"]), 0
)
self.assertEqual(
main.main(
[
"dci-queue",
"schedule",
"-p",
"2",
"8nodes",
"echo",
"@RESOURCE",
"first",
]
),
0,
)
self.assertEqual(
main.main(
[
"dci-queue",
"schedule",
"-p",
"2",
"8nodes",
"echo",
"@RESOURCE",
"second",
]
),
0,
)
self.assertEqual(main.main(["dci-queue", "run", "8nodes"]), 0)
self.file_exists("queue", "8nodes", "1")
self.doesnt_exist("queue", "8nodes", "2")
self.file_exists("queue", "8nodes", "3")
self.file_exists("available", "8nodes", "cluster4")
self.assertEqual(main.main(["dci-queue", "run", "8nodes"]), 0)
self.doesnt_exist("queue", "8nodes", "3")
def test_jobid(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(
[
"dci-queue",
"schedule",
"8nodes",
"--",
"bash",
"-c",
'test "$DCI_QUEUE_JOBID" = "8nodes.1" || exit 1; echo @RESOURCE',
]
),
0,
)
self.assertEqual(main.main(["dci-queue", "run", "8nodes"]), 0)
self.assertEqual(run_cmd.RET_CODE[1], 0)
def test_run_unschedule(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(
[
"dci-queue",
"schedule",
"8nodes",
"--",
"bash",
"-c",
"sleep 3000; echo @RESOURCE",
]
),
0,
)
os.system("dci-queue run 8nodes &")
time.sleep(5)
self.doesnt_exist("available", "8nodes", "cluster4")
self.file_exists("queue", "8nodes", "1" + run_cmd.EXT)
self.assertEqual(main.main(["dci-queue", "unschedule", "8nodes", "1"]), 0)
time.sleep(5)
self.file_exists("available", "8nodes", "cluster4")
self.doesnt_exist("queue", "8nodes", "1" + run_cmd.EXT)
def test_run_invalid_command(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(
["dci-queue", "schedule", "8nodes", "no-such-command", "@RESOURCE"]
),
0,
)
self.assertEqual(main.main(["dci-queue", "run", "8nodes"]), 0)
self.doesnt_exist("queue", "8nodes", "1" + run_cmd.EXT)
self.file_exists("available", "8nodes", "cluster4")
def test_run_no_resource(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "schedule", "8nodes", "echo", "@RESOURCE"]), 0
)
self.assertEqual(main.main(["dci-queue", "run", "8nodes"]), 0)
self.file_exists("queue", "8nodes", "1")
def test_list(self):
import io
from contextlib import redirect_stdout
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
with io.StringIO() as buf, redirect_stdout(buf):
rc = main.main(["dci-queue", "list"])
output = buf.getvalue()
self.assertEqual(rc, 0)
self.assertIn("8nodes", output)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(["dci-queue", "schedule", "8nodes", "echo", "@RESOURCE"]), 0
)
self.assertEqual(main.main(["dci-queue", "list", "8nodes"]), 0)
self.assertEqual(main.main(["dci-queue", "remove-pool", "-n", "8nodes"]), 0)
with io.StringIO() as buf, redirect_stdout(buf):
rc = main.main(["dci-queue", "list"])
output = buf.getvalue()
self.assertEqual(rc, 0)
self.assertIn("No pool was found", output)
def test_log_level(self):
self.assertEqual(
main.main(["dci-queue", "-l", "CRITICAL", "add-pool", "-n", "8nodes"]), 0
)
with self.assertRaises(SystemExit):
main.main(["dci-queue", "-l", "TOTO", "add-pool", "-n", "8nodes"])
def test_log(self):
self.assertEqual(main.main(["dci-queue", "log", "8nodes", "1"]), 1)
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(["dci-queue", "schedule", "8nodes", "echo", "@RESOURCE"]), 0
)
self.assertEqual(main.main(["dci-queue", "run", "8nodes"]), 0)
saved = os.execlp
os.execlp = self.fork
main.main(["dci-queue", "log", "8nodes", "1"])
self.assertEqual(self.arg, "tail")
os.execlp = saved
def test_search(self):
self.assertEqual(main.main(["dci-queue", "log", "8nodes", "1"]), 1)
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(["dci-queue", "schedule", "8nodes", "echo", "@RESOURCE"]), 0
)
self.assertEqual(
main.main(["dci-queue", "search", "8nodes", "echo", "@RESOURCE"]), 0
)
def test_searchdir(self):
self.assertEqual(main.main(["dci-queue", "log", "8nodes", "1"]), 1)
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
os.chdir("/tmp")
self.assertEqual(
main.main(["dci-queue", "schedule", "8nodes", "echo", "@RESOURCE"]), 0
)
self.assertEqual(main.main(["dci-queue", "searchdir", "8nodes", "/tmp"]), 0)
def test_add_crontab(self):
crontab_file = os.path.join(self.queue_dir, "crontab")
with open(crontab_file, "w"):
pass
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-crontab", "8nodes", crontab_file]), 0
)
self.assertEqual(
main.main(["dci-queue", "remove-crontab", "8nodes", crontab_file]), 0
)
if __name__ == "__main__":
unittest.main()
# test_queue.py ends here
| 37.643204 | 88 | 0.523631 |
import json
import os
import shutil
import tempfile
import time
import unittest
from dciqueue import lib
from dciqueue import main
from dciqueue import run_cmd
class TestQueue(unittest.TestCase):
def setUp(self):
self.queue_dir = tempfile.mkdtemp()
os.environ["DCI_QUEUE_DIR"] = self.queue_dir
os.environ["DCI_QUEUE_LOG_LEVEL"] = "DEBUG"
os.environ["DCI_QUEUE_CONSOLE_OUTPUT"] = "t"
def tearDown(self):
shutil.rmtree(self.queue_dir)
def call(self, arg, stdout=None, stderr=None, *args, **kwargs):
self.arg = arg
if stdout:
stdout.close()
if stderr:
stderr.close()
return None
def fork(self, arg, *args, **kwargs):
self.arg = arg
def dir_exists(self, directory, subdir):
path = os.path.join(self.queue_dir, directory, subdir)
self.assertTrue(os.path.exists(path) and os.path.isdir(path), path)
def file_exists(self, directory, subdir, filename):
path = os.path.join(self.queue_dir, directory, subdir, filename)
self.assertTrue(os.path.exists(path) and os.path.isfile(path), path)
def link_exists(self, directory, subdir, filename):
path = os.path.join(self.queue_dir, directory, subdir, filename)
self.assertTrue(os.path.exists(path) and os.path.islink(path), path)
def doesnt_exist(self, directory, subdir, filename=None):
if filename:
path = os.path.join(self.queue_dir, directory, subdir, filename)
else:
path = os.path.join(self.queue_dir, directory, subdir)
self.assertFalse(os.path.exists(path), path)
def test_add_pool(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
for key in lib.DIRS:
self.dir_exists(key, "8nodes")
def test_remove_pool(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(main.main(["dci-queue", "remove-pool", "-n", "8nodes"]), 0)
for key in lib.DIRS:
self.doesnt_exist(key, "8nodes")
def test_add_resource(self):
def validate(key, exist):
path = os.path.join(self.queue_dir, key, "8nodes", "cluster4")
if exist:
self.assertTrue(os.path.exists(path) or os.path.islink(path), path)
else:
self.assertFalse(os.path.exists(path) or os.path.islink(path), path)
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
cmd = os.path.join(self.queue_dir, "queue", "8nodes", "1" + run_cmd.EXT)
with open(cmd, "w") as fd:
json.dump({"resource": "cluster4"}, fd)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
for key, exist in (("pool", True), ("available", False)):
validate(key, exist)
os.unlink(cmd)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
for key, exist in (("pool", True), ("available", True)):
validate(key, exist)
def test_remove_resource(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
with self.assertRaises(SystemExit):
main.main(["dci-queue", "remove-resource", "8nodes", "cluster4"])
self.assertEqual(
main.main(
[
"dci-queue",
"remove-resource",
"8nodes",
"cluster4",
"reserved to debug blabla (fred)",
]
),
0,
)
for key in ("pool", "available"):
self.doesnt_exist(key, "8nodes", "cluster4")
self.file_exists("reason", "8nodes", "cluster4")
self.assertEqual(main.main(["dci-queue", "list", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.doesnt_exist("reason", "8nodes", "cluster4")
def test_schedule(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(
["dci-queue", "schedule", "-p", "1", "8nodes", "echo", "@RESOURCE"]
),
0,
)
self.assertEqual(
main.main(["dci-queue", "schedule", "8nodes", "echo", "@RESOURCE"]), 0
)
self.assertEqual(
main.main(["dci-queue", "schedule", "8nodes", "ls", "/etc/@RESOURCE"]), 0
)
for seq in ("1", "2"):
path = os.path.join(self.queue_dir, "queue", "8nodes", seq)
self.assertTrue(os.path.exists(path) and os.path.isfile(path), path)
data = json.load(open(path))
self.assertIn("priority", data)
self.assertEqual(data["priority"], 1 if seq == "1" else 0)
self.doesnt_exist("queue", "8nodes", "3")
def test_schedule_force(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(["dci-queue", "schedule", "-f", "8nodes", "echo", "@RESOURCE"]), 0
)
self.assertEqual(
main.main(["dci-queue", "schedule", "-f", "8nodes", "echo", "@RESOURCE"]), 0
)
for seq in ("1", "2"):
self.file_exists("queue", "8nodes", seq)
def test_schedule_remove(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(["dci-queue", "schedule", "-r", "8nodes", "echo", "@RESOURCE"]), 0
)
self.assertEqual(main.main(["dci-queue", "run", "8nodes"]), 0)
self.doesnt_exist("pool", "8nodes", "cluster4")
def test_unschedule(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(main.main(["dci-queue", "unschedule", "8nodes", "1"]), 0)
self.assertEqual(main.main(["dci-queue", "unschedule", "8nodes", "1"]), 0)
self.doesnt_exist("queue", "8nodes", "1")
def test_schedule_block(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(["dci-queue", "schedule", "-b", "8nodes", "false", "@RESOURCE"]),
1,
)
def test_run(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(["dci-queue", "schedule", "8nodes", "echo", "@RESOURCE"]), 0
)
self.assertEqual(
main.main(
[
"dci-queue",
"schedule",
"-p",
"2",
"8nodes",
"echo",
"@RESOURCE",
"first",
]
),
0,
)
self.assertEqual(
main.main(
[
"dci-queue",
"schedule",
"-p",
"2",
"8nodes",
"echo",
"@RESOURCE",
"second",
]
),
0,
)
self.assertEqual(main.main(["dci-queue", "run", "8nodes"]), 0)
self.file_exists("queue", "8nodes", "1")
self.doesnt_exist("queue", "8nodes", "2")
self.file_exists("queue", "8nodes", "3")
self.file_exists("available", "8nodes", "cluster4")
self.assertEqual(main.main(["dci-queue", "run", "8nodes"]), 0)
self.doesnt_exist("queue", "8nodes", "3")
def test_jobid(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(
[
"dci-queue",
"schedule",
"8nodes",
"--",
"bash",
"-c",
'test "$DCI_QUEUE_JOBID" = "8nodes.1" || exit 1; echo @RESOURCE',
]
),
0,
)
self.assertEqual(main.main(["dci-queue", "run", "8nodes"]), 0)
self.assertEqual(run_cmd.RET_CODE[1], 0)
def test_run_unschedule(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(
[
"dci-queue",
"schedule",
"8nodes",
"--",
"bash",
"-c",
"sleep 3000; echo @RESOURCE",
]
),
0,
)
os.system("dci-queue run 8nodes &")
time.sleep(5)
self.doesnt_exist("available", "8nodes", "cluster4")
self.file_exists("queue", "8nodes", "1" + run_cmd.EXT)
self.assertEqual(main.main(["dci-queue", "unschedule", "8nodes", "1"]), 0)
time.sleep(5)
self.file_exists("available", "8nodes", "cluster4")
self.doesnt_exist("queue", "8nodes", "1" + run_cmd.EXT)
def test_run_invalid_command(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(
["dci-queue", "schedule", "8nodes", "no-such-command", "@RESOURCE"]
),
0,
)
self.assertEqual(main.main(["dci-queue", "run", "8nodes"]), 0)
self.doesnt_exist("queue", "8nodes", "1" + run_cmd.EXT)
self.file_exists("available", "8nodes", "cluster4")
def test_run_no_resource(self):
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "schedule", "8nodes", "echo", "@RESOURCE"]), 0
)
self.assertEqual(main.main(["dci-queue", "run", "8nodes"]), 0)
self.file_exists("queue", "8nodes", "1")
def test_list(self):
import io
from contextlib import redirect_stdout
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
with io.StringIO() as buf, redirect_stdout(buf):
rc = main.main(["dci-queue", "list"])
output = buf.getvalue()
self.assertEqual(rc, 0)
self.assertIn("8nodes", output)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(["dci-queue", "schedule", "8nodes", "echo", "@RESOURCE"]), 0
)
self.assertEqual(main.main(["dci-queue", "list", "8nodes"]), 0)
self.assertEqual(main.main(["dci-queue", "remove-pool", "-n", "8nodes"]), 0)
with io.StringIO() as buf, redirect_stdout(buf):
rc = main.main(["dci-queue", "list"])
output = buf.getvalue()
self.assertEqual(rc, 0)
self.assertIn("No pool was found", output)
def test_log_level(self):
self.assertEqual(
main.main(["dci-queue", "-l", "CRITICAL", "add-pool", "-n", "8nodes"]), 0
)
with self.assertRaises(SystemExit):
main.main(["dci-queue", "-l", "TOTO", "add-pool", "-n", "8nodes"])
def test_log(self):
self.assertEqual(main.main(["dci-queue", "log", "8nodes", "1"]), 1)
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(["dci-queue", "schedule", "8nodes", "echo", "@RESOURCE"]), 0
)
self.assertEqual(main.main(["dci-queue", "run", "8nodes"]), 0)
saved = os.execlp
os.execlp = self.fork
main.main(["dci-queue", "log", "8nodes", "1"])
self.assertEqual(self.arg, "tail")
os.execlp = saved
def test_search(self):
self.assertEqual(main.main(["dci-queue", "log", "8nodes", "1"]), 1)
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
self.assertEqual(
main.main(["dci-queue", "schedule", "8nodes", "echo", "@RESOURCE"]), 0
)
self.assertEqual(
main.main(["dci-queue", "search", "8nodes", "echo", "@RESOURCE"]), 0
)
def test_searchdir(self):
self.assertEqual(main.main(["dci-queue", "log", "8nodes", "1"]), 1)
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-resource", "8nodes", "cluster4"]), 0
)
os.chdir("/tmp")
self.assertEqual(
main.main(["dci-queue", "schedule", "8nodes", "echo", "@RESOURCE"]), 0
)
self.assertEqual(main.main(["dci-queue", "searchdir", "8nodes", "/tmp"]), 0)
def test_add_crontab(self):
crontab_file = os.path.join(self.queue_dir, "crontab")
with open(crontab_file, "w"):
pass
self.assertEqual(main.main(["dci-queue", "add-pool", "-n", "8nodes"]), 0)
self.assertEqual(
main.main(["dci-queue", "add-crontab", "8nodes", crontab_file]), 0
)
self.assertEqual(
main.main(["dci-queue", "remove-crontab", "8nodes", crontab_file]), 0
)
if __name__ == "__main__":
unittest.main()
| true | true |
f7f78d7eab191ff6a62accff1b243ef9c1aa2ac6 | 1,315 | py | Python | setup.py | eddwhite/KiKit | e538fcfe40f857697fccb0d7693f1dbb058bf226 | [
"MIT"
] | null | null | null | setup.py | eddwhite/KiKit | e538fcfe40f857697fccb0d7693f1dbb058bf226 | [
"MIT"
] | null | null | null | setup.py | eddwhite/KiKit | e538fcfe40f857697fccb0d7693f1dbb058bf226 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import setuptools
import versioneer
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="KiKit",
python_requires='>=3.7',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author="Jan Mrázek",
author_email="email@honzamrazek.cz",
description="Automation for KiCAD boards",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/yaqwsx/KiKit",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"pcbnewTransition==0.2.0",
"numpy>=1.21",
"shapely>=1.7",
"click>=7.1",
"markdown2>=2.4",
"pybars3>=0.9",
"solidpython>=1.1.2",
"commentjson>=0.9"
],
setup_requires=[
"versioneer"
],
extras_require={
"dev": ["pytest"],
},
zip_safe=False,
include_package_data=True,
entry_points = {
"console_scripts": [
"kikit=kikit.ui:cli",
"kikit-plugin=kikit.plugin:cli",
"kikit-info=kikit.info:cli"
],
}
)
| 24.351852 | 50 | 0.58327 |
import setuptools
import versioneer
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="KiKit",
python_requires='>=3.7',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author="Jan Mrázek",
author_email="email@honzamrazek.cz",
description="Automation for KiCAD boards",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/yaqwsx/KiKit",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"pcbnewTransition==0.2.0",
"numpy>=1.21",
"shapely>=1.7",
"click>=7.1",
"markdown2>=2.4",
"pybars3>=0.9",
"solidpython>=1.1.2",
"commentjson>=0.9"
],
setup_requires=[
"versioneer"
],
extras_require={
"dev": ["pytest"],
},
zip_safe=False,
include_package_data=True,
entry_points = {
"console_scripts": [
"kikit=kikit.ui:cli",
"kikit-plugin=kikit.plugin:cli",
"kikit-info=kikit.info:cli"
],
}
)
| true | true |
f7f78d9b50f7e17926d9be2d895307cca5ded3c0 | 605 | py | Python | setup.py | deepak1792/python-jenkinsfile-testing | 1198f7d02921792c3c254e0141007c65e3ef4772 | [
"BSD-3-Clause"
] | 30 | 2017-10-09T09:13:37.000Z | 2021-09-23T04:16:21.000Z | setup.py | deepak1792/python-jenkinsfile-testing | 1198f7d02921792c3c254e0141007c65e3ef4772 | [
"BSD-3-Clause"
] | 1 | 2018-02-24T16:43:13.000Z | 2018-02-24T16:43:13.000Z | setup.py | deepak1792/python-jenkinsfile-testing | 1198f7d02921792c3c254e0141007c65e3ef4772 | [
"BSD-3-Clause"
] | 39 | 2018-02-24T16:43:50.000Z | 2021-11-23T13:39:44.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup file for scaffold_test.
This file was generated with PyScaffold 2.5.7, a tool that easily
puts up a scaffold for your new Python project. Learn more under:
http://pyscaffold.readthedocs.org/
"""
import sys
from setuptools import setup
def setup_package():
needs_sphinx = {'build_sphinx', 'upload_docs'}.intersection(sys.argv)
sphinx = ['sphinx'] if needs_sphinx else []
setup(setup_requires=['six', 'pyscaffold>=2.5a0,<2.6a0'] + sphinx,
use_pyscaffold=True)
if __name__ == "__main__":
setup_package()
| 25.208333 | 73 | 0.679339 |
import sys
from setuptools import setup
def setup_package():
needs_sphinx = {'build_sphinx', 'upload_docs'}.intersection(sys.argv)
sphinx = ['sphinx'] if needs_sphinx else []
setup(setup_requires=['six', 'pyscaffold>=2.5a0,<2.6a0'] + sphinx,
use_pyscaffold=True)
if __name__ == "__main__":
setup_package()
| true | true |
f7f78e222aecca4acd8c7043faea0214b9ecb37f | 1,284 | py | Python | patches/sitecustomize.py | toshihikoyanase/docker-python | 02f8400ee7390e514c03aa313b6a509db72cab23 | [
"Apache-2.0"
] | 38 | 2020-07-13T08:46:39.000Z | 2021-02-08T01:38:44.000Z | patches/sitecustomize.py | toshihikoyanase/docker-python | 02f8400ee7390e514c03aa313b6a509db72cab23 | [
"Apache-2.0"
] | null | null | null | patches/sitecustomize.py | toshihikoyanase/docker-python | 02f8400ee7390e514c03aa313b6a509db72cab23 | [
"Apache-2.0"
] | 20 | 2020-07-14T03:38:50.000Z | 2021-01-08T06:24:17.000Z | import os
kaggle_proxy_token = os.getenv("KAGGLE_DATA_PROXY_TOKEN")
bq_user_jwt = os.getenv("KAGGLE_BQ_USER_JWT")
if kaggle_proxy_token or bq_user_jwt:
from google.auth import credentials
from google.cloud import bigquery
from google.cloud.bigquery._http import Connection
# TODO: Update this to the correct kaggle.gcp path once we no longer inject modules
# from the worker.
from kaggle_gcp import PublicBigqueryClient
def monkeypatch_bq(bq_client, *args, **kwargs):
data_proxy_project = os.getenv("KAGGLE_DATA_PROXY_PROJECT")
specified_project = kwargs.get('project')
specified_credentials = kwargs.get('credentials')
if specified_project is None and specified_credentials is None:
print("Using Kaggle's public dataset BigQuery integration.")
return PublicBigqueryClient(*args, **kwargs)
else:
return bq_client(*args, **kwargs)
# Monkey patches BigQuery client creation to use proxy or user-connected GCP account.
# Deprecated in favor of Kaggle.DataProxyClient().
# TODO: Remove this once uses have migrated to that new interface.
bq_client = bigquery.Client
bigquery.Client = lambda *args, **kwargs: monkeypatch_bq(
bq_client, *args, **kwargs)
| 44.275862 | 89 | 0.722741 | import os
kaggle_proxy_token = os.getenv("KAGGLE_DATA_PROXY_TOKEN")
bq_user_jwt = os.getenv("KAGGLE_BQ_USER_JWT")
if kaggle_proxy_token or bq_user_jwt:
from google.auth import credentials
from google.cloud import bigquery
from google.cloud.bigquery._http import Connection
from kaggle_gcp import PublicBigqueryClient
def monkeypatch_bq(bq_client, *args, **kwargs):
data_proxy_project = os.getenv("KAGGLE_DATA_PROXY_PROJECT")
specified_project = kwargs.get('project')
specified_credentials = kwargs.get('credentials')
if specified_project is None and specified_credentials is None:
print("Using Kaggle's public dataset BigQuery integration.")
return PublicBigqueryClient(*args, **kwargs)
else:
return bq_client(*args, **kwargs)
# Monkey patches BigQuery client creation to use proxy or user-connected GCP account.
# Deprecated in favor of Kaggle.DataProxyClient().
# TODO: Remove this once uses have migrated to that new interface.
bq_client = bigquery.Client
bigquery.Client = lambda *args, **kwargs: monkeypatch_bq(
bq_client, *args, **kwargs)
| true | true |
f7f78e9f081df49d789b9e90857ff2903b9b41a6 | 3,820 | py | Python | rotkehlchen/accounting/structures.py | MichaelHettmer/rotki | a4b2dbf4bcbe409a86fa144932c26305b1c449f3 | [
"BSD-3-Clause"
] | null | null | null | rotkehlchen/accounting/structures.py | MichaelHettmer/rotki | a4b2dbf4bcbe409a86fa144932c26305b1c449f3 | [
"BSD-3-Clause"
] | 3 | 2021-01-28T21:30:46.000Z | 2022-03-25T19:17:00.000Z | rotkehlchen/accounting/structures.py | MichaelHettmer/rotki | a4b2dbf4bcbe409a86fa144932c26305b1c449f3 | [
"BSD-3-Clause"
] | null | null | null | from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict
from rotkehlchen.assets.asset import Asset
from rotkehlchen.constants.misc import ZERO
from rotkehlchen.errors import InputError
from rotkehlchen.fval import FVal
from rotkehlchen.typing import Timestamp
class DefiEventType(Enum):
DSR_LOAN_GAIN = 0
MAKERDAO_VAULT_LOSS = 1
AAVE_LOAN_INTEREST = 2
COMPOUND_LOAN_INTEREST = 3
COMPOUND_DEBT_REPAY = 4
COMPOUND_LIQUIDATION_DEBT_REPAID = 5
COMPOUND_LIQUIDATION_COLLATERAL_LOST = 6
COMPOUND_REWARDS = 7
def __str__(self) -> str:
if self == DefiEventType.DSR_LOAN_GAIN:
return "DSR loan gain"
elif self == DefiEventType.MAKERDAO_VAULT_LOSS:
return "Makerdao vault loss"
elif self == DefiEventType.AAVE_LOAN_INTEREST:
return "Aave loan interest"
elif self == DefiEventType.COMPOUND_LOAN_INTEREST:
return "Compound loan interest"
elif self == DefiEventType.COMPOUND_DEBT_REPAY:
return "Compound debt repayment"
elif self == DefiEventType.COMPOUND_LIQUIDATION_DEBT_REPAID:
return "Compound liquidation debt repayment"
elif self == DefiEventType.COMPOUND_LIQUIDATION_COLLATERAL_LOST:
return "Compound liquidation collateral lost"
elif self == DefiEventType.COMPOUND_REWARDS:
return "Compound rewards"
raise RuntimeError(f'Corrupt value {self} for DefiEventType -- Should never happen')
def is_profitable(self) -> bool:
return self in (
DefiEventType.DSR_LOAN_GAIN,
DefiEventType.AAVE_LOAN_INTEREST,
DefiEventType.COMPOUND_LOAN_INTEREST,
DefiEventType.COMPOUND_REWARDS,
DefiEventType.COMPOUND_DEBT_REPAY,
)
@dataclass(init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False)
class DefiEvent:
timestamp: Timestamp
event_type: DefiEventType
asset: Asset
amount: FVal
def is_profitable(self) -> bool:
return self.event_type.is_profitable()
@dataclass(init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False)
class Balance:
amount: FVal = ZERO
usd_value: FVal = ZERO
def serialize(self) -> Dict[str, str]:
return {'amount': str(self.amount), 'usd_value': str(self.usd_value)}
def to_dict(self) -> Dict[str, FVal]:
return {'amount': self.amount, 'usd_value': self.usd_value}
def __add__(self, other: Any) -> 'Balance':
other = _evaluate_balance_input(other, 'addition')
return Balance(
amount=self.amount + other.amount,
usd_value=self.usd_value + other.usd_value,
)
def __sub__(self, other: Any) -> 'Balance':
other = _evaluate_balance_input(other, 'subtraction')
return Balance(
amount=self.amount - other.amount,
usd_value=self.usd_value - other.usd_value,
)
def _evaluate_balance_input(other: Any, operation: str) -> Balance:
transformed_input = other
if isinstance(other, dict):
if len(other) == 2 and 'amount' in other and 'usd_value' in other:
try:
amount = FVal(other['amount'])
usd_value = FVal(other['usd_value'])
except ValueError:
raise InputError(
f'Found valid dict object but with invalid values during Balance {operation}',
)
transformed_input = Balance(amount=amount, usd_value=usd_value)
else:
raise InputError(f'Found invalid dict object during Balance {operation}')
elif not isinstance(other, Balance):
raise InputError(f'Found a {type(other)} object during Balance {operation}')
return transformed_input
| 35.700935 | 98 | 0.666754 | from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict
from rotkehlchen.assets.asset import Asset
from rotkehlchen.constants.misc import ZERO
from rotkehlchen.errors import InputError
from rotkehlchen.fval import FVal
from rotkehlchen.typing import Timestamp
class DefiEventType(Enum):
DSR_LOAN_GAIN = 0
MAKERDAO_VAULT_LOSS = 1
AAVE_LOAN_INTEREST = 2
COMPOUND_LOAN_INTEREST = 3
COMPOUND_DEBT_REPAY = 4
COMPOUND_LIQUIDATION_DEBT_REPAID = 5
COMPOUND_LIQUIDATION_COLLATERAL_LOST = 6
COMPOUND_REWARDS = 7
def __str__(self) -> str:
if self == DefiEventType.DSR_LOAN_GAIN:
return "DSR loan gain"
elif self == DefiEventType.MAKERDAO_VAULT_LOSS:
return "Makerdao vault loss"
elif self == DefiEventType.AAVE_LOAN_INTEREST:
return "Aave loan interest"
elif self == DefiEventType.COMPOUND_LOAN_INTEREST:
return "Compound loan interest"
elif self == DefiEventType.COMPOUND_DEBT_REPAY:
return "Compound debt repayment"
elif self == DefiEventType.COMPOUND_LIQUIDATION_DEBT_REPAID:
return "Compound liquidation debt repayment"
elif self == DefiEventType.COMPOUND_LIQUIDATION_COLLATERAL_LOST:
return "Compound liquidation collateral lost"
elif self == DefiEventType.COMPOUND_REWARDS:
return "Compound rewards"
raise RuntimeError(f'Corrupt value {self} for DefiEventType -- Should never happen')
def is_profitable(self) -> bool:
return self in (
DefiEventType.DSR_LOAN_GAIN,
DefiEventType.AAVE_LOAN_INTEREST,
DefiEventType.COMPOUND_LOAN_INTEREST,
DefiEventType.COMPOUND_REWARDS,
DefiEventType.COMPOUND_DEBT_REPAY,
)
@dataclass(init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False)
class DefiEvent:
timestamp: Timestamp
event_type: DefiEventType
asset: Asset
amount: FVal
def is_profitable(self) -> bool:
return self.event_type.is_profitable()
@dataclass(init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False)
class Balance:
amount: FVal = ZERO
usd_value: FVal = ZERO
def serialize(self) -> Dict[str, str]:
return {'amount': str(self.amount), 'usd_value': str(self.usd_value)}
def to_dict(self) -> Dict[str, FVal]:
return {'amount': self.amount, 'usd_value': self.usd_value}
def __add__(self, other: Any) -> 'Balance':
other = _evaluate_balance_input(other, 'addition')
return Balance(
amount=self.amount + other.amount,
usd_value=self.usd_value + other.usd_value,
)
def __sub__(self, other: Any) -> 'Balance':
other = _evaluate_balance_input(other, 'subtraction')
return Balance(
amount=self.amount - other.amount,
usd_value=self.usd_value - other.usd_value,
)
def _evaluate_balance_input(other: Any, operation: str) -> Balance:
transformed_input = other
if isinstance(other, dict):
if len(other) == 2 and 'amount' in other and 'usd_value' in other:
try:
amount = FVal(other['amount'])
usd_value = FVal(other['usd_value'])
except ValueError:
raise InputError(
f'Found valid dict object but with invalid values during Balance {operation}',
)
transformed_input = Balance(amount=amount, usd_value=usd_value)
else:
raise InputError(f'Found invalid dict object during Balance {operation}')
elif not isinstance(other, Balance):
raise InputError(f'Found a {type(other)} object during Balance {operation}')
return transformed_input
| true | true |
f7f78ee21707bcc69686776c462840457f6eaf68 | 6,307 | py | Python | prepare_vocab.py | frankxu2004/tacred-relation-cotrain | 005dd0cf27d6a68fcf3cbef736de3fe9759ce6b4 | [
"Apache-2.0"
] | null | null | null | prepare_vocab.py | frankxu2004/tacred-relation-cotrain | 005dd0cf27d6a68fcf3cbef736de3fe9759ce6b4 | [
"Apache-2.0"
] | null | null | null | prepare_vocab.py | frankxu2004/tacred-relation-cotrain | 005dd0cf27d6a68fcf3cbef736de3fe9759ce6b4 | [
"Apache-2.0"
] | null | null | null | """
Prepare vocabulary and initial word vectors.
"""
import json
import msgpack
import pickle
import argparse
import numpy as np
from collections import Counter
from utils import vocab, constant, helper
def parse_args():
parser = argparse.ArgumentParser(description='Prepare vocab for relation extraction.')
parser.add_argument('data_dir', help='TACRED directory.')
parser.add_argument('squad_dir', help='SQuAD directory.')
parser.add_argument('vocab_dir', help='Output vocab directory.')
parser.add_argument('--glove_dir', default='dataset/glove', help='GloVe directory.')
parser.add_argument('--wv_file', default='glove.840B.300d.txt', help='GloVe vector file.')
parser.add_argument('--wv_dim', type=int, default=300, help='GloVe vector dimension.')
parser.add_argument('--min_freq', type=int, default=0, help='If > 0, use min_freq as the cutoff.')
parser.add_argument('--lower', action='store_true', help='If specified, lowercase all words.')
args = parser.parse_args()
return args
def process_squad(squad_msgpack):
train, dev = squad_msgpack
train_tokens = []
dev_tokens = []
for row in train:
train_tokens += row[1] # context
train_tokens += row[5] # question
for row in dev:
dev_tokens += row[1]
dev_tokens += row[5]
return train_tokens, dev_tokens
def main():
args = parse_args()
# input files
train_file = args.data_dir + '/train.json'
dev_file = args.data_dir + '/dev.json'
test_file = args.data_dir + '/test.json'
wv_file = args.glove_dir + '/' + args.wv_file
wv_dim = args.wv_dim
# output files
helper.ensure_dir(args.vocab_dir)
vocab_file = args.vocab_dir + '/vocab.pkl'
emb_file = args.vocab_dir + '/embedding.npy'
# load files
print("loading files...")
train_tokens = load_tokens(train_file)
dev_tokens = load_tokens(dev_file)
test_tokens = load_tokens(test_file)
# processing squad intermediate files
with open(args.squad_dir + '/intermediate.msgpack', 'rb') as squad_file:
squad_msgpack = msgpack.load(squad_file, encoding='utf-8')
squad_train, squad_dev = squad_msgpack
squad_train_tokens, squad_dev_tokens = process_squad(squad_msgpack)
if args.lower:
train_tokens, dev_tokens, test_tokens = [[t.lower() for t in tokens] for tokens in \
(train_tokens, dev_tokens, test_tokens)]
squad_train_tokens, squad_dev_tokens = [[t.lower() for t in tokens] for tokens in \
(squad_train_tokens, squad_dev_tokens)]
# load glove
print("loading glove...")
glove_vocab = vocab.load_glove_vocab(wv_file, wv_dim)
print("{} words loaded from glove.".format(len(glove_vocab)))
print("building vocab...")
v = build_vocab(train_tokens + squad_train_tokens, glove_vocab, args.min_freq)
print("calculating oov...")
datasets = {'train': train_tokens, 'dev': dev_tokens, 'test': test_tokens}
for dname, d in datasets.items():
total, oov = count_oov(d, v)
print("{} oov: {}/{} ({:.2f}%)".format(dname, oov, total, oov * 100.0 / total))
print("building embeddings...")
embedding = vocab.build_embedding(wv_file, v, wv_dim)
print("embedding size: {} x {}".format(*embedding.shape))
print("dumping to files...")
with open(vocab_file, 'wb') as outfile:
pickle.dump(v, outfile)
np.save(emb_file, embedding)
print("all done.")
print('converting SQuAD dataset to ids')
id2word = v
word2id = dict([(id2word[idx], idx) for idx in range(len(id2word))])
def to_id(row, unk_id=constant.UNK_ID):
context_tokens = row[1]
context_features = row[2]
context_tags = row[3]
context_ents = row[4]
question_tokens = row[5]
question_ids = [word2id[w] if w in word2id else unk_id for w in question_tokens]
context_ids = [word2id[w] if w in word2id else unk_id for w in context_tokens]
tag_ids = [constant.POS_TO_ID[w] if w in constant.POS_TO_ID else unk_id for w in context_tags]
ent_ids = [constant.NER_TO_ID[w] if w in constant.NER_TO_ID else unk_id for w in context_ents]
return [row[0], context_ids, context_features, tag_ids, ent_ids, question_ids] + row[6:]
squad_train = list(map(to_id, squad_train))
squad_dev = list(map(to_id, squad_dev))
result = {
'train': squad_train,
'dev': squad_dev
}
# train: id, context_id, context_features, tag_id, ent_id,
# question_id, context, context_token_span, answer_start, answer_end
# dev: id, context_id, context_features, tag_id, ent_id,
# question_id, context, context_token_span, answer
with open('dataset/SQuAD/data.msgpack', 'wb') as f:
msgpack.dump(result, f)
def load_tokens(filename):
with open(filename) as infile:
data = json.load(infile)
tokens = []
for d in data:
tokens += d['tokens']
print("{} tokens from {} examples loaded from {}.".format(len(tokens), len(data), filename))
return tokens
def build_vocab(tokens, glove_vocab, min_freq):
""" build vocab from tokens and glove words. """
counter = Counter(t for t in tokens)
# if min_freq > 0, use min_freq, otherwise keep all glove words
if min_freq > 0:
v = sorted([t for t in counter if counter.get(t) >= min_freq], key=counter.get, reverse=True)
else:
v = sorted([t for t in counter if t in glove_vocab], key=counter.get, reverse=True)
# add special tokens and entity mask tokens
v = constant.VOCAB_PREFIX + entity_masks() + v
print("vocab built with {}/{} words.".format(len(v), len(counter)))
return v
def count_oov(tokens, vocab):
c = Counter(t for t in tokens)
total = sum(c.values())
matched = sum(c[t] for t in vocab)
return total, total - matched
def entity_masks():
""" Get all entity mask tokens as a list. """
masks = []
subj_entities = list(constant.SUBJ_NER_TO_ID.keys())[2:]
obj_entities = list(constant.OBJ_NER_TO_ID.keys())[2:]
masks += ["SUBJ-" + e for e in subj_entities]
masks += ["OBJ-" + e for e in obj_entities]
return masks
if __name__ == '__main__':
main()
| 36.883041 | 102 | 0.654352 | import json
import msgpack
import pickle
import argparse
import numpy as np
from collections import Counter
from utils import vocab, constant, helper
def parse_args():
parser = argparse.ArgumentParser(description='Prepare vocab for relation extraction.')
parser.add_argument('data_dir', help='TACRED directory.')
parser.add_argument('squad_dir', help='SQuAD directory.')
parser.add_argument('vocab_dir', help='Output vocab directory.')
parser.add_argument('--glove_dir', default='dataset/glove', help='GloVe directory.')
parser.add_argument('--wv_file', default='glove.840B.300d.txt', help='GloVe vector file.')
parser.add_argument('--wv_dim', type=int, default=300, help='GloVe vector dimension.')
parser.add_argument('--min_freq', type=int, default=0, help='If > 0, use min_freq as the cutoff.')
parser.add_argument('--lower', action='store_true', help='If specified, lowercase all words.')
args = parser.parse_args()
return args
def process_squad(squad_msgpack):
train, dev = squad_msgpack
train_tokens = []
dev_tokens = []
for row in train:
train_tokens += row[1]
train_tokens += row[5]
for row in dev:
dev_tokens += row[1]
dev_tokens += row[5]
return train_tokens, dev_tokens
def main():
args = parse_args()
train_file = args.data_dir + '/train.json'
dev_file = args.data_dir + '/dev.json'
test_file = args.data_dir + '/test.json'
wv_file = args.glove_dir + '/' + args.wv_file
wv_dim = args.wv_dim
helper.ensure_dir(args.vocab_dir)
vocab_file = args.vocab_dir + '/vocab.pkl'
emb_file = args.vocab_dir + '/embedding.npy'
print("loading files...")
train_tokens = load_tokens(train_file)
dev_tokens = load_tokens(dev_file)
test_tokens = load_tokens(test_file)
with open(args.squad_dir + '/intermediate.msgpack', 'rb') as squad_file:
squad_msgpack = msgpack.load(squad_file, encoding='utf-8')
squad_train, squad_dev = squad_msgpack
squad_train_tokens, squad_dev_tokens = process_squad(squad_msgpack)
if args.lower:
train_tokens, dev_tokens, test_tokens = [[t.lower() for t in tokens] for tokens in \
(train_tokens, dev_tokens, test_tokens)]
squad_train_tokens, squad_dev_tokens = [[t.lower() for t in tokens] for tokens in \
(squad_train_tokens, squad_dev_tokens)]
print("loading glove...")
glove_vocab = vocab.load_glove_vocab(wv_file, wv_dim)
print("{} words loaded from glove.".format(len(glove_vocab)))
print("building vocab...")
v = build_vocab(train_tokens + squad_train_tokens, glove_vocab, args.min_freq)
print("calculating oov...")
datasets = {'train': train_tokens, 'dev': dev_tokens, 'test': test_tokens}
for dname, d in datasets.items():
total, oov = count_oov(d, v)
print("{} oov: {}/{} ({:.2f}%)".format(dname, oov, total, oov * 100.0 / total))
print("building embeddings...")
embedding = vocab.build_embedding(wv_file, v, wv_dim)
print("embedding size: {} x {}".format(*embedding.shape))
print("dumping to files...")
with open(vocab_file, 'wb') as outfile:
pickle.dump(v, outfile)
np.save(emb_file, embedding)
print("all done.")
print('converting SQuAD dataset to ids')
id2word = v
word2id = dict([(id2word[idx], idx) for idx in range(len(id2word))])
def to_id(row, unk_id=constant.UNK_ID):
context_tokens = row[1]
context_features = row[2]
context_tags = row[3]
context_ents = row[4]
question_tokens = row[5]
question_ids = [word2id[w] if w in word2id else unk_id for w in question_tokens]
context_ids = [word2id[w] if w in word2id else unk_id for w in context_tokens]
tag_ids = [constant.POS_TO_ID[w] if w in constant.POS_TO_ID else unk_id for w in context_tags]
ent_ids = [constant.NER_TO_ID[w] if w in constant.NER_TO_ID else unk_id for w in context_ents]
return [row[0], context_ids, context_features, tag_ids, ent_ids, question_ids] + row[6:]
squad_train = list(map(to_id, squad_train))
squad_dev = list(map(to_id, squad_dev))
result = {
'train': squad_train,
'dev': squad_dev
}
with open('dataset/SQuAD/data.msgpack', 'wb') as f:
msgpack.dump(result, f)
def load_tokens(filename):
with open(filename) as infile:
data = json.load(infile)
tokens = []
for d in data:
tokens += d['tokens']
print("{} tokens from {} examples loaded from {}.".format(len(tokens), len(data), filename))
return tokens
def build_vocab(tokens, glove_vocab, min_freq):
counter = Counter(t for t in tokens)
if min_freq > 0:
v = sorted([t for t in counter if counter.get(t) >= min_freq], key=counter.get, reverse=True)
else:
v = sorted([t for t in counter if t in glove_vocab], key=counter.get, reverse=True)
v = constant.VOCAB_PREFIX + entity_masks() + v
print("vocab built with {}/{} words.".format(len(v), len(counter)))
return v
def count_oov(tokens, vocab):
c = Counter(t for t in tokens)
total = sum(c.values())
matched = sum(c[t] for t in vocab)
return total, total - matched
def entity_masks():
masks = []
subj_entities = list(constant.SUBJ_NER_TO_ID.keys())[2:]
obj_entities = list(constant.OBJ_NER_TO_ID.keys())[2:]
masks += ["SUBJ-" + e for e in subj_entities]
masks += ["OBJ-" + e for e in obj_entities]
return masks
if __name__ == '__main__':
main()
| true | true |
f7f78fcfcf856c5765cbe994b8dd7e978c64aaa8 | 229,936 | py | Python | test/draw_test.py | Sowmyasree101/pygame | 264c74a288fa36d8a90c9e2115bd8610cd6b3fd4 | [
"Python-2.0",
"OLDAP-2.3"
] | 1 | 2021-04-26T09:21:12.000Z | 2021-04-26T09:21:12.000Z | test/draw_test.py | Sowmyasree101/pygame | 264c74a288fa36d8a90c9e2115bd8610cd6b3fd4 | [
"Python-2.0",
"OLDAP-2.3"
] | null | null | null | test/draw_test.py | Sowmyasree101/pygame | 264c74a288fa36d8a90c9e2115bd8610cd6b3fd4 | [
"Python-2.0",
"OLDAP-2.3"
] | null | null | null | import math
import unittest
import sys
import pygame
from pygame import draw
from pygame import draw_py
from pygame.locals import SRCALPHA
from pygame.tests import test_utils
from pygame.math import Vector2
PY3 = sys.version_info >= (3, 0, 0)
RED = BG_RED = pygame.Color("red")
GREEN = FG_GREEN = pygame.Color("green")
# Clockwise from the top left corner and ending with the center point.
RECT_POSITION_ATTRIBUTES = (
"topleft",
"midtop",
"topright",
"midright",
"bottomright",
"midbottom",
"bottomleft",
"midleft",
"center",
)
def get_border_values(surface, width, height):
"""Returns a list containing lists with the values of the surface's
borders.
"""
border_top = [surface.get_at((x, 0)) for x in range(width)]
border_left = [surface.get_at((0, y)) for y in range(height)]
border_right = [surface.get_at((width - 1, y)) for y in range(height)]
border_bottom = [surface.get_at((x, height - 1)) for x in range(width)]
return [border_top, border_left, border_right, border_bottom]
def corners(surface):
"""Returns a tuple with the corner positions of the given surface.
Clockwise from the top left corner.
"""
width, height = surface.get_size()
return ((0, 0), (width - 1, 0), (width - 1, height - 1), (0, height - 1))
def rect_corners_mids_and_center(rect):
"""Returns a tuple with each corner, mid, and the center for a given rect.
Clockwise from the top left corner and ending with the center point.
"""
return (
rect.topleft,
rect.midtop,
rect.topright,
rect.midright,
rect.bottomright,
rect.midbottom,
rect.bottomleft,
rect.midleft,
rect.center,
)
def border_pos_and_color(surface):
"""Yields each border position and its color for a given surface.
Clockwise from the top left corner.
"""
width, height = surface.get_size()
right, bottom = width - 1, height - 1
# Top edge.
for x in range(width):
pos = (x, 0)
yield pos, surface.get_at(pos)
# Right edge.
# Top right done in top edge loop.
for y in range(1, height):
pos = (right, y)
yield pos, surface.get_at(pos)
# Bottom edge.
# Bottom right done in right edge loop.
for x in range(right - 1, -1, -1):
pos = (x, bottom)
yield pos, surface.get_at(pos)
# Left edge.
# Bottom left done in bottom edge loop. Top left done in top edge loop.
for y in range(bottom - 1, 0, -1):
pos = (0, y)
yield pos, surface.get_at(pos)
def get_color_points(surface, color, bounds_rect=None, match_color=True):
"""Get all the points of a given color on the surface within the given
bounds.
If bounds_rect is None the full surface is checked.
If match_color is True, all points matching the color are returned,
otherwise all points not matching the color are returned.
"""
get_at = surface.get_at # For possible speed up.
if bounds_rect is None:
x_range = range(surface.get_width())
y_range = range(surface.get_height())
else:
x_range = range(bounds_rect.left, bounds_rect.right)
y_range = range(bounds_rect.top, bounds_rect.bottom)
surface.lock() # For possible speed up.
if match_color:
pts = [(x, y) for x in x_range for y in y_range if get_at((x, y)) == color]
else:
pts = [(x, y) for x in x_range for y in y_range if get_at((x, y)) != color]
surface.unlock()
return pts
def create_bounding_rect(surface, surf_color, default_pos):
"""Create a rect to bound all the pixels that don't match surf_color.
The default_pos parameter is used to position the bounding rect for the
case where all pixels match the surf_color.
"""
width, height = surface.get_clip().size
xmin, ymin = width, height
xmax, ymax = -1, -1
get_at = surface.get_at # For possible speed up.
surface.lock() # For possible speed up.
for y in range(height):
for x in range(width):
if get_at((x, y)) != surf_color:
xmin = min(x, xmin)
xmax = max(x, xmax)
ymin = min(y, ymin)
ymax = max(y, ymax)
surface.unlock()
if -1 == xmax:
# No points means a 0 sized rect positioned at default_pos.
return pygame.Rect(default_pos, (0, 0))
return pygame.Rect((xmin, ymin), (xmax - xmin + 1, ymax - ymin + 1))
class InvalidBool(object):
"""To help test invalid bool values."""
__nonzero__ = None
__bool__ = None
class DrawTestCase(unittest.TestCase):
"""Base class to test draw module functions."""
draw_rect = staticmethod(draw.rect)
draw_polygon = staticmethod(draw.polygon)
draw_circle = staticmethod(draw.circle)
draw_ellipse = staticmethod(draw.ellipse)
draw_arc = staticmethod(draw.arc)
draw_line = staticmethod(draw.line)
draw_lines = staticmethod(draw.lines)
draw_aaline = staticmethod(draw.aaline)
draw_aalines = staticmethod(draw.aalines)
class PythonDrawTestCase(unittest.TestCase):
"""Base class to test draw_py module functions."""
# draw_py is currently missing some functions.
# draw_rect = staticmethod(draw_py.draw_rect)
draw_polygon = staticmethod(draw_py.draw_polygon)
# draw_circle = staticmethod(draw_py.draw_circle)
# draw_ellipse = staticmethod(draw_py.draw_ellipse)
# draw_arc = staticmethod(draw_py.draw_arc)
draw_line = staticmethod(draw_py.draw_line)
draw_lines = staticmethod(draw_py.draw_lines)
draw_aaline = staticmethod(draw_py.draw_aaline)
draw_aalines = staticmethod(draw_py.draw_aalines)
### Ellipse Testing ###########################################################
class DrawEllipseMixin(object):
"""Mixin tests for drawing ellipses.
This class contains all the general ellipse drawing tests.
"""
def test_ellipse(self) :
surf = pygame.Surface((320,200))
pygame.draw.ellipse(surf,(255,0,0),(10,10,25,20))
def test_ellipse__args(self):
"""Ensures draw ellipse accepts the correct args."""
bounds_rect = self.draw_ellipse(
pygame.Surface((3, 3)), (0, 10, 0, 50), pygame.Rect((0, 0), (3, 2)), 1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_ellipse__args_without_width(self):
"""Ensures draw ellipse accepts the args without a width."""
bounds_rect = self.draw_ellipse(
pygame.Surface((2, 2)), (1, 1, 1, 99), pygame.Rect((1, 1), (1, 1))
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_ellipse__args_with_negative_width(self):
"""Ensures draw ellipse accepts the args with negative width."""
bounds_rect = self.draw_ellipse(
pygame.Surface((3, 3)), (0, 10, 0, 50), pygame.Rect((2, 3), (3, 2)), -1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
self.assertEqual(bounds_rect, pygame.Rect(2, 3, 0, 0))
def test_ellipse__args_with_width_gt_radius(self):
"""Ensures draw ellipse accepts the args with
width > rect.w // 2 and width > rect.h // 2.
"""
rect = pygame.Rect((0, 0), (4, 4))
bounds_rect = self.draw_ellipse(
pygame.Surface((3, 3)), (0, 10, 0, 50), rect, rect.w // 2 + 1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
bounds_rect = self.draw_ellipse(
pygame.Surface((3, 3)), (0, 10, 0, 50), rect, rect.h // 2 + 1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_ellipse__kwargs(self):
"""Ensures draw ellipse accepts the correct kwargs
with and without a width arg.
"""
kwargs_list = [
{
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("yellow"),
"rect": pygame.Rect((0, 0), (3, 2)),
"width": 1,
},
{
"surface": pygame.Surface((2, 1)),
"color": (0, 10, 20),
"rect": (0, 0, 1, 1),
},
]
for kwargs in kwargs_list:
bounds_rect = self.draw_ellipse(**kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_ellipse__kwargs_order_independent(self):
"""Ensures draw ellipse's kwargs are not order dependent."""
bounds_rect = self.draw_ellipse(
color=(1, 2, 3),
surface=pygame.Surface((3, 2)),
width=0,
rect=pygame.Rect((1, 0), (1, 1)),
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_ellipse__args_missing(self):
"""Ensures draw ellipse detects any missing required args."""
surface = pygame.Surface((1, 1))
with self.assertRaises(TypeError):
bounds_rect = self.draw_ellipse(surface, pygame.Color("red"))
with self.assertRaises(TypeError):
bounds_rect = self.draw_ellipse(surface)
with self.assertRaises(TypeError):
bounds_rect = self.draw_ellipse()
def test_ellipse__kwargs_missing(self):
"""Ensures draw ellipse detects any missing required kwargs."""
kwargs = {
"surface": pygame.Surface((1, 2)),
"color": pygame.Color("red"),
"rect": pygame.Rect((1, 0), (2, 2)),
"width": 2,
}
for name in ("rect", "color", "surface"):
invalid_kwargs = dict(kwargs)
invalid_kwargs.pop(name) # Pop from a copy.
with self.assertRaises(TypeError):
bounds_rect = self.draw_ellipse(**invalid_kwargs)
def test_ellipse__arg_invalid_types(self):
"""Ensures draw ellipse detects invalid arg types."""
surface = pygame.Surface((2, 2))
color = pygame.Color("blue")
rect = pygame.Rect((1, 1), (1, 1))
with self.assertRaises(TypeError):
# Invalid width.
bounds_rect = self.draw_ellipse(surface, color, rect, "1")
with self.assertRaises(TypeError):
# Invalid rect.
bounds_rect = self.draw_ellipse(surface, color, (1, 2, 3, 4, 5), 1)
with self.assertRaises(TypeError):
# Invalid color.
bounds_rect = self.draw_ellipse(surface, 2.3, rect, 0)
with self.assertRaises(TypeError):
# Invalid surface.
bounds_rect = self.draw_ellipse(rect, color, rect, 2)
def test_ellipse__kwarg_invalid_types(self):
"""Ensures draw ellipse detects invalid kwarg types."""
surface = pygame.Surface((3, 3))
color = pygame.Color("green")
rect = pygame.Rect((0, 1), (1, 1))
kwargs_list = [
{
"surface": pygame.Surface, # Invalid surface.
"color": color,
"rect": rect,
"width": 1,
},
{
"surface": surface,
"color": 2.3, # Invalid color.
"rect": rect,
"width": 1,
},
{
"surface": surface,
"color": color,
"rect": (0, 0, 0), # Invalid rect.
"width": 1,
},
{"surface": surface, "color": color, "rect": rect, "width": 1.1},
] # Invalid width.
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_ellipse(**kwargs)
def test_ellipse__kwarg_invalid_name(self):
"""Ensures draw ellipse detects invalid kwarg names."""
surface = pygame.Surface((2, 3))
color = pygame.Color("cyan")
rect = pygame.Rect((0, 1), (2, 2))
kwargs_list = [
{
"surface": surface,
"color": color,
"rect": rect,
"width": 1,
"invalid": 1,
},
{"surface": surface, "color": color, "rect": rect, "invalid": 1},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_ellipse(**kwargs)
def test_ellipse__args_and_kwargs(self):
"""Ensures draw ellipse accepts a combination of args/kwargs"""
surface = pygame.Surface((3, 1))
color = (255, 255, 0, 0)
rect = pygame.Rect((1, 0), (2, 1))
width = 0
kwargs = {"surface": surface, "color": color, "rect": rect, "width": width}
for name in ("surface", "color", "rect", "width"):
kwargs.pop(name)
if "surface" == name:
bounds_rect = self.draw_ellipse(surface, **kwargs)
elif "color" == name:
bounds_rect = self.draw_ellipse(surface, color, **kwargs)
elif "rect" == name:
bounds_rect = self.draw_ellipse(surface, color, rect, **kwargs)
else:
bounds_rect = self.draw_ellipse(surface, color, rect, width, **kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_ellipse__valid_width_values(self):
"""Ensures draw ellipse accepts different width values."""
pos = (1, 1)
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
color = (10, 20, 30, 255)
kwargs = {
"surface": surface,
"color": color,
"rect": pygame.Rect(pos, (3, 2)),
"width": None,
}
for width in (-1000, -10, -1, 0, 1, 10, 1000):
surface.fill(surface_color) # Clear for each test.
kwargs["width"] = width
expected_color = color if width >= 0 else surface_color
bounds_rect = self.draw_ellipse(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_ellipse__valid_rect_formats(self):
"""Ensures draw ellipse accepts different rect formats."""
pos = (1, 1)
expected_color = pygame.Color("red")
surface_color = pygame.Color("black")
surface = pygame.Surface((4, 4))
kwargs = {"surface": surface, "color": expected_color, "rect": None, "width": 0}
rects = (pygame.Rect(pos, (1, 3)), (pos, (2, 1)), (pos[0], pos[1], 1, 1))
for rect in rects:
surface.fill(surface_color) # Clear for each test.
kwargs["rect"] = rect
bounds_rect = self.draw_ellipse(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_ellipse__valid_color_formats(self):
"""Ensures draw ellipse accepts different color formats."""
pos = (1, 1)
green_color = pygame.Color("green")
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
kwargs = {
"surface": surface,
"color": None,
"rect": pygame.Rect(pos, (1, 2)),
"width": 0,
}
reds = (
(0, 255, 0),
(0, 255, 0, 255),
surface.map_rgb(green_color),
green_color,
)
for color in reds:
surface.fill(surface_color) # Clear for each test.
kwargs["color"] = color
if isinstance(color, int):
expected_color = surface.unmap_rgb(color)
else:
expected_color = green_color
bounds_rect = self.draw_ellipse(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_ellipse__invalid_color_formats(self):
"""Ensures draw ellipse handles invalid color formats correctly."""
pos = (1, 1)
surface = pygame.Surface((4, 3))
kwargs = {
"surface": surface,
"color": None,
"rect": pygame.Rect(pos, (2, 2)),
"width": 1,
}
for expected_color in (2.3, surface):
kwargs["color"] = expected_color
with self.assertRaises(TypeError):
bounds_rect = self.draw_ellipse(**kwargs)
def test_ellipse(self):
"""Tests ellipses of differing sizes on surfaces of differing sizes.
Checks if the number of sides touching the border of the surface is
correct.
"""
left_top = [(0, 0), (1, 0), (0, 1), (1, 1)]
sizes = [(4, 4), (5, 4), (4, 5), (5, 5)]
color = (1, 13, 24, 255)
def same_size(width, height, border_width):
"""Test for ellipses with the same size as the surface."""
surface = pygame.Surface((width, height))
self.draw_ellipse(surface, color, (0, 0, width, height), border_width)
# For each of the four borders check if it contains the color
borders = get_border_values(surface, width, height)
for border in borders:
self.assertTrue(color in border)
def not_same_size(width, height, border_width, left, top):
"""Test for ellipses that aren't the same size as the surface."""
surface = pygame.Surface((width, height))
self.draw_ellipse(
surface, color, (left, top, width - 1, height - 1), border_width
)
borders = get_border_values(surface, width, height)
# Check if two sides of the ellipse are touching the border
sides_touching = [color in border for border in borders].count(True)
self.assertEqual(sides_touching, 2)
for width, height in sizes:
for border_width in (0, 1):
same_size(width, height, border_width)
for left, top in left_top:
not_same_size(width, height, border_width, left, top)
def test_ellipse__thick_line(self):
"""Ensures a thick lined ellipse is drawn correctly."""
ellipse_color = pygame.Color("yellow")
surface_color = pygame.Color("black")
surface = pygame.Surface((40, 40))
rect = pygame.Rect((0, 0), (31, 23))
rect.center = surface.get_rect().center
# As the lines get thicker the internals of the ellipse are not
# cleanly defined. So only test up to a few thicknesses before the
# maximum thickness.
for thickness in range(1, min(*rect.size) // 2 - 2):
surface.fill(surface_color) # Clear for each test.
self.draw_ellipse(surface, ellipse_color, rect, thickness)
surface.lock() # For possible speed up.
# Check vertical thickness on the ellipse's top.
x = rect.centerx
y_start = rect.top
y_end = rect.top + thickness - 1
for y in range(y_start, y_end + 1):
self.assertEqual(surface.get_at((x, y)), ellipse_color, thickness)
# Check pixels above and below this line.
self.assertEqual(surface.get_at((x, y_start - 1)), surface_color, thickness)
self.assertEqual(surface.get_at((x, y_end + 1)), surface_color, thickness)
# Check vertical thickness on the ellipse's bottom.
x = rect.centerx
y_start = rect.bottom - thickness
y_end = rect.bottom - 1
for y in range(y_start, y_end + 1):
self.assertEqual(surface.get_at((x, y)), ellipse_color, thickness)
# Check pixels above and below this line.
self.assertEqual(surface.get_at((x, y_start - 1)), surface_color, thickness)
self.assertEqual(surface.get_at((x, y_end + 1)), surface_color, thickness)
# Check horizontal thickness on the ellipse's left.
x_start = rect.left
x_end = rect.left + thickness - 1
y = rect.centery
for x in range(x_start, x_end + 1):
self.assertEqual(surface.get_at((x, y)), ellipse_color, thickness)
# Check pixels to the left and right of this line.
self.assertEqual(surface.get_at((x_start - 1, y)), surface_color, thickness)
self.assertEqual(surface.get_at((x_end + 1, y)), surface_color, thickness)
# Check horizontal thickness on the ellipse's right.
x_start = rect.right - thickness
x_end = rect.right - 1
y = rect.centery
for x in range(x_start, x_end + 1):
self.assertEqual(surface.get_at((x, y)), ellipse_color, thickness)
# Check pixels to the left and right of this line.
self.assertEqual(surface.get_at((x_start - 1, y)), surface_color, thickness)
self.assertEqual(surface.get_at((x_end + 1, y)), surface_color, thickness)
surface.unlock()
def test_ellipse__max_width(self):
"""Ensures an ellipse with max width (and greater) is drawn correctly."""
ellipse_color = pygame.Color("yellow")
surface_color = pygame.Color("black")
surface = pygame.Surface((40, 40))
rect = pygame.Rect((0, 0), (31, 21))
rect.center = surface.get_rect().center
max_thickness = (min(*rect.size) + 1) // 2
for thickness in range(max_thickness, max_thickness + 3):
surface.fill(surface_color) # Clear for each test.
self.draw_ellipse(surface, ellipse_color, rect, thickness)
surface.lock() # For possible speed up.
# Check vertical thickness.
for y in range(rect.top, rect.bottom):
self.assertEqual(surface.get_at((rect.centerx, y)), ellipse_color)
# Check horizontal thickness.
for x in range(rect.left, rect.right):
self.assertEqual(surface.get_at((x, rect.centery)), ellipse_color)
# Check pixels above and below ellipse.
self.assertEqual(
surface.get_at((rect.centerx, rect.top - 1)), surface_color
)
self.assertEqual(
surface.get_at((rect.centerx, rect.bottom + 1)), surface_color
)
# Check pixels to the left and right of the ellipse.
self.assertEqual(
surface.get_at((rect.left - 1, rect.centery)), surface_color
)
self.assertEqual(
surface.get_at((rect.right + 1, rect.centery)), surface_color
)
surface.unlock()
def _check_1_pixel_sized_ellipse(
self, surface, collide_rect, surface_color, ellipse_color
):
# Helper method to check the surface for 1 pixel wide and/or high
# ellipses.
surf_w, surf_h = surface.get_size()
surface.lock() # For possible speed up.
for pos in ((x, y) for y in range(surf_h) for x in range(surf_w)):
# Since the ellipse is just a line we can use a rect to help find
# where it is expected to be drawn.
if collide_rect.collidepoint(pos):
expected_color = ellipse_color
else:
expected_color = surface_color
self.assertEqual(
surface.get_at(pos),
expected_color,
"collide_rect={}, pos={}".format(collide_rect, pos),
)
surface.unlock()
def test_ellipse__1_pixel_width(self):
"""Ensures an ellipse with a width of 1 is drawn correctly.
An ellipse with a width of 1 pixel is a vertical line.
"""
ellipse_color = pygame.Color("red")
surface_color = pygame.Color("black")
surf_w, surf_h = 10, 20
surface = pygame.Surface((surf_w, surf_h))
rect = pygame.Rect((0, 0), (1, 0))
collide_rect = rect.copy()
# Calculate some positions.
off_left = -1
off_right = surf_w
off_bottom = surf_h
center_x = surf_w // 2
center_y = surf_h // 2
# Test some even and odd heights.
for ellipse_h in range(6, 10):
# The ellipse is drawn on the edge of the rect so collide_rect
# needs +1 height to track where it's drawn.
collide_rect.h = ellipse_h + 1
rect.h = ellipse_h
# Calculate some variable positions.
off_top = -(ellipse_h + 1)
half_off_top = -(ellipse_h // 2)
half_off_bottom = surf_h - (ellipse_h // 2)
# Draw the ellipse in different positions: fully on-surface,
# partially off-surface, and fully off-surface.
positions = (
(off_left, off_top),
(off_left, half_off_top),
(off_left, center_y),
(off_left, half_off_bottom),
(off_left, off_bottom),
(center_x, off_top),
(center_x, half_off_top),
(center_x, center_y),
(center_x, half_off_bottom),
(center_x, off_bottom),
(off_right, off_top),
(off_right, half_off_top),
(off_right, center_y),
(off_right, half_off_bottom),
(off_right, off_bottom),
)
for rect_pos in positions:
surface.fill(surface_color) # Clear before each draw.
rect.topleft = rect_pos
collide_rect.topleft = rect_pos
self.draw_ellipse(surface, ellipse_color, rect)
self._check_1_pixel_sized_ellipse(
surface, collide_rect, surface_color, ellipse_color
)
def test_ellipse__1_pixel_width_spanning_surface(self):
"""Ensures an ellipse with a width of 1 is drawn correctly
when spanning the height of the surface.
An ellipse with a width of 1 pixel is a vertical line.
"""
ellipse_color = pygame.Color("red")
surface_color = pygame.Color("black")
surf_w, surf_h = 10, 20
surface = pygame.Surface((surf_w, surf_h))
rect = pygame.Rect((0, 0), (1, surf_h + 2)) # Longer than the surface.
# Draw the ellipse in different positions: on-surface and off-surface.
positions = (
(-1, -1), # (off_left, off_top)
(0, -1), # (left_edge, off_top)
(surf_w // 2, -1), # (center_x, off_top)
(surf_w - 1, -1), # (right_edge, off_top)
(surf_w, -1),
) # (off_right, off_top)
for rect_pos in positions:
surface.fill(surface_color) # Clear before each draw.
rect.topleft = rect_pos
self.draw_ellipse(surface, ellipse_color, rect)
self._check_1_pixel_sized_ellipse(
surface, rect, surface_color, ellipse_color
)
def test_ellipse__1_pixel_height(self):
"""Ensures an ellipse with a height of 1 is drawn correctly.
An ellipse with a height of 1 pixel is a horizontal line.
"""
ellipse_color = pygame.Color("red")
surface_color = pygame.Color("black")
surf_w, surf_h = 20, 10
surface = pygame.Surface((surf_w, surf_h))
rect = pygame.Rect((0, 0), (0, 1))
collide_rect = rect.copy()
# Calculate some positions.
off_right = surf_w
off_top = -1
off_bottom = surf_h
center_x = surf_w // 2
center_y = surf_h // 2
# Test some even and odd widths.
for ellipse_w in range(6, 10):
# The ellipse is drawn on the edge of the rect so collide_rect
# needs +1 width to track where it's drawn.
collide_rect.w = ellipse_w + 1
rect.w = ellipse_w
# Calculate some variable positions.
off_left = -(ellipse_w + 1)
half_off_left = -(ellipse_w // 2)
half_off_right = surf_w - (ellipse_w // 2)
# Draw the ellipse in different positions: fully on-surface,
# partially off-surface, and fully off-surface.
positions = (
(off_left, off_top),
(half_off_left, off_top),
(center_x, off_top),
(half_off_right, off_top),
(off_right, off_top),
(off_left, center_y),
(half_off_left, center_y),
(center_x, center_y),
(half_off_right, center_y),
(off_right, center_y),
(off_left, off_bottom),
(half_off_left, off_bottom),
(center_x, off_bottom),
(half_off_right, off_bottom),
(off_right, off_bottom),
)
for rect_pos in positions:
surface.fill(surface_color) # Clear before each draw.
rect.topleft = rect_pos
collide_rect.topleft = rect_pos
self.draw_ellipse(surface, ellipse_color, rect)
self._check_1_pixel_sized_ellipse(
surface, collide_rect, surface_color, ellipse_color
)
def test_ellipse__1_pixel_height_spanning_surface(self):
"""Ensures an ellipse with a height of 1 is drawn correctly
when spanning the width of the surface.
An ellipse with a height of 1 pixel is a horizontal line.
"""
ellipse_color = pygame.Color("red")
surface_color = pygame.Color("black")
surf_w, surf_h = 20, 10
surface = pygame.Surface((surf_w, surf_h))
rect = pygame.Rect((0, 0), (surf_w + 2, 1)) # Wider than the surface.
# Draw the ellipse in different positions: on-surface and off-surface.
positions = (
(-1, -1), # (off_left, off_top)
(-1, 0), # (off_left, top_edge)
(-1, surf_h // 2), # (off_left, center_y)
(-1, surf_h - 1), # (off_left, bottom_edge)
(-1, surf_h),
) # (off_left, off_bottom)
for rect_pos in positions:
surface.fill(surface_color) # Clear before each draw.
rect.topleft = rect_pos
self.draw_ellipse(surface, ellipse_color, rect)
self._check_1_pixel_sized_ellipse(
surface, rect, surface_color, ellipse_color
)
def test_ellipse__1_pixel_width_and_height(self):
"""Ensures an ellipse with a width and height of 1 is drawn correctly.
An ellipse with a width and height of 1 pixel is a single pixel.
"""
ellipse_color = pygame.Color("red")
surface_color = pygame.Color("black")
surf_w, surf_h = 10, 10
surface = pygame.Surface((surf_w, surf_h))
rect = pygame.Rect((0, 0), (1, 1))
# Calculate some positions.
off_left = -1
off_right = surf_w
off_top = -1
off_bottom = surf_h
left_edge = 0
right_edge = surf_w - 1
top_edge = 0
bottom_edge = surf_h - 1
center_x = surf_w // 2
center_y = surf_h // 2
# Draw the ellipse in different positions: center surface,
# top/bottom/left/right edges, and off-surface.
positions = (
(off_left, off_top),
(off_left, top_edge),
(off_left, center_y),
(off_left, bottom_edge),
(off_left, off_bottom),
(left_edge, off_top),
(left_edge, top_edge),
(left_edge, center_y),
(left_edge, bottom_edge),
(left_edge, off_bottom),
(center_x, off_top),
(center_x, top_edge),
(center_x, center_y),
(center_x, bottom_edge),
(center_x, off_bottom),
(right_edge, off_top),
(right_edge, top_edge),
(right_edge, center_y),
(right_edge, bottom_edge),
(right_edge, off_bottom),
(off_right, off_top),
(off_right, top_edge),
(off_right, center_y),
(off_right, bottom_edge),
(off_right, off_bottom),
)
for rect_pos in positions:
surface.fill(surface_color) # Clear before each draw.
rect.topleft = rect_pos
self.draw_ellipse(surface, ellipse_color, rect)
self._check_1_pixel_sized_ellipse(
surface, rect, surface_color, ellipse_color
)
def test_ellipse__bounding_rect(self):
"""Ensures draw ellipse returns the correct bounding rect.
Tests ellipses on and off the surface and a range of width/thickness
values.
"""
ellipse_color = pygame.Color("red")
surf_color = pygame.Color("black")
min_width = min_height = 5
max_width = max_height = 7
sizes = ((min_width, min_height), (max_width, max_height))
surface = pygame.Surface((20, 20), 0, 32)
surf_rect = surface.get_rect()
# Make a rect that is bigger than the surface to help test drawing
# ellipses off and partially off the surface.
big_rect = surf_rect.inflate(min_width * 2 + 1, min_height * 2 + 1)
for pos in rect_corners_mids_and_center(
surf_rect
) + rect_corners_mids_and_center(big_rect):
# Each of the ellipse's rect position attributes will be set to
# the pos value.
for attr in RECT_POSITION_ATTRIBUTES:
# Test using different rect sizes and thickness values.
for width, height in sizes:
ellipse_rect = pygame.Rect((0, 0), (width, height))
setattr(ellipse_rect, attr, pos)
for thickness in (0, 1, 2, 3, min(width, height)):
surface.fill(surf_color) # Clear for each test.
bounding_rect = self.draw_ellipse(
surface, ellipse_color, ellipse_rect, thickness
)
# Calculating the expected_rect after the ellipse
# is drawn (it uses what is actually drawn).
expected_rect = create_bounding_rect(
surface, surf_color, ellipse_rect.topleft
)
self.assertEqual(bounding_rect, expected_rect)
def test_ellipse__surface_clip(self):
"""Ensures draw ellipse respects a surface's clip area.
Tests drawing the ellipse filled and unfilled.
"""
surfw = surfh = 30
ellipse_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
clip_rect = pygame.Rect((0, 0), (11, 11))
clip_rect.center = surface.get_rect().center
pos_rect = clip_rect.copy() # Manages the ellipse's pos.
for width in (0, 1): # Filled and unfilled.
# Test centering the ellipse along the clip rect's edge.
for center in rect_corners_mids_and_center(clip_rect):
# Get the expected points by drawing the ellipse without the
# clip area set.
pos_rect.center = center
surface.set_clip(None)
surface.fill(surface_color)
self.draw_ellipse(surface, ellipse_color, pos_rect, width)
expected_pts = get_color_points(surface, ellipse_color, clip_rect)
# Clear the surface and set the clip area. Redraw the ellipse
# and check that only the clip area is modified.
surface.fill(surface_color)
surface.set_clip(clip_rect)
self.draw_ellipse(surface, ellipse_color, pos_rect, width)
surface.lock() # For possible speed up.
# Check all the surface points to ensure only the expected_pts
# are the ellipse_color.
for pt in ((x, y) for x in range(surfw) for y in range(surfh)):
if pt in expected_pts:
expected_color = ellipse_color
else:
expected_color = surface_color
self.assertEqual(surface.get_at(pt), expected_color, pt)
surface.unlock()
class DrawEllipseTest(DrawEllipseMixin, DrawTestCase):
"""Test draw module function ellipse.
This class inherits the general tests from DrawEllipseMixin. It is also
the class to add any draw.ellipse specific tests to.
"""
# Commented out to avoid cluttering the test output. Add back in if draw_py
# ever properly supports drawing ellipses.
# @unittest.skip('draw_py.draw_ellipse not supported yet')
# class PythonDrawEllipseTest(DrawEllipseMixin, PythonDrawTestCase):
# """Test draw_py module function draw_ellipse.
#
# This class inherits the general tests from DrawEllipseMixin. It is also
# the class to add any draw_py.draw_ellipse specific tests to.
# """
### Line/Lines/AALine/AALines Testing #########################################
class BaseLineMixin(object):
"""Mixin base for drawing various lines.
This class contains general helper methods and setup for testing the
different types of lines.
"""
COLORS = (
(0, 0, 0),
(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
(255, 255, 0),
(255, 0, 255),
(0, 255, 255),
(255, 255, 255),
)
@staticmethod
def _create_surfaces():
# Create some surfaces with different sizes, depths, and flags.
surfaces = []
for size in ((49, 49), (50, 50)):
for depth in (8, 16, 24, 32):
for flags in (0, SRCALPHA):
surface = pygame.display.set_mode(size, flags, depth)
surfaces.append(surface)
surfaces.append(surface.convert_alpha())
return surfaces
@staticmethod
def _rect_lines(rect):
# Yields pairs of end points and their reverse (to test symmetry).
# Uses a rect with the points radiating from its midleft.
for pt in rect_corners_mids_and_center(rect):
if pt == rect.midleft or pt == rect.center:
# Don't bother with these points.
continue
yield (rect.midleft, pt)
yield (pt, rect.midleft)
### Line Testing ##############################################################
class LineMixin(BaseLineMixin):
"""Mixin test for drawing a single line.
This class contains all the general single line drawing tests.
"""
def test_line__args(self):
"""Ensures draw line accepts the correct args."""
bounds_rect = self.draw_line(
pygame.Surface((3, 3)), (0, 10, 0, 50), (0, 0), (1, 1), 1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_line__args_without_width(self):
"""Ensures draw line accepts the args without a width."""
bounds_rect = self.draw_line(
pygame.Surface((2, 2)), (0, 0, 0, 50), (0, 0), (2, 2)
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_line__kwargs(self):
"""Ensures draw line accepts the correct kwargs
with and without a width arg.
"""
surface = pygame.Surface((4, 4))
color = pygame.Color("yellow")
start_pos = (1, 1)
end_pos = (2, 2)
kwargs_list = [
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"width": 1,
},
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
},
]
for kwargs in kwargs_list:
bounds_rect = self.draw_line(**kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_line__kwargs_order_independent(self):
"""Ensures draw line's kwargs are not order dependent."""
bounds_rect = self.draw_line(
start_pos=(1, 2),
end_pos=(2, 1),
width=2,
color=(10, 20, 30),
surface=pygame.Surface((3, 2)),
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_line__args_missing(self):
"""Ensures draw line detects any missing required args."""
surface = pygame.Surface((1, 1))
color = pygame.Color("blue")
with self.assertRaises(TypeError):
bounds_rect = self.draw_line(surface, color, (0, 0))
with self.assertRaises(TypeError):
bounds_rect = self.draw_line(surface, color)
with self.assertRaises(TypeError):
bounds_rect = self.draw_line(surface)
with self.assertRaises(TypeError):
bounds_rect = self.draw_line()
def test_line__kwargs_missing(self):
"""Ensures draw line detects any missing required kwargs."""
kwargs = {
"surface": pygame.Surface((3, 2)),
"color": pygame.Color("red"),
"start_pos": (2, 1),
"end_pos": (2, 2),
"width": 1,
}
for name in ("end_pos", "start_pos", "color", "surface"):
invalid_kwargs = dict(kwargs)
invalid_kwargs.pop(name) # Pop from a copy.
with self.assertRaises(TypeError):
bounds_rect = self.draw_line(**invalid_kwargs)
def test_line__arg_invalid_types(self):
"""Ensures draw line detects invalid arg types."""
surface = pygame.Surface((2, 2))
color = pygame.Color("blue")
start_pos = (0, 1)
end_pos = (1, 2)
with self.assertRaises(TypeError):
# Invalid width.
bounds_rect = self.draw_line(surface, color, start_pos, end_pos, "1")
with self.assertRaises(TypeError):
# Invalid end_pos.
bounds_rect = self.draw_line(surface, color, start_pos, (1, 2, 3))
with self.assertRaises(TypeError):
# Invalid start_pos.
bounds_rect = self.draw_line(surface, color, (1,), end_pos)
with self.assertRaises(TypeError):
# Invalid color.
bounds_rect = self.draw_line(surface, 2.3, start_pos, end_pos)
with self.assertRaises(TypeError):
# Invalid surface.
bounds_rect = self.draw_line((1, 2, 3, 4), color, start_pos, end_pos)
def test_line__kwarg_invalid_types(self):
"""Ensures draw line detects invalid kwarg types."""
surface = pygame.Surface((3, 3))
color = pygame.Color("green")
start_pos = (1, 0)
end_pos = (2, 0)
width = 1
kwargs_list = [
{
"surface": pygame.Surface, # Invalid surface.
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"width": width,
},
{
"surface": surface,
"color": 2.3, # Invalid color.
"start_pos": start_pos,
"end_pos": end_pos,
"width": width,
},
{
"surface": surface,
"color": color,
"start_pos": (0, 0, 0), # Invalid start_pos.
"end_pos": end_pos,
"width": width,
},
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": (0,), # Invalid end_pos.
"width": width,
},
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"width": 1.2,
},
] # Invalid width.
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_line(**kwargs)
def test_line__kwarg_invalid_name(self):
"""Ensures draw line detects invalid kwarg names."""
surface = pygame.Surface((2, 3))
color = pygame.Color("cyan")
start_pos = (1, 1)
end_pos = (2, 0)
kwargs_list = [
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"width": 1,
"invalid": 1,
},
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"invalid": 1,
},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_line(**kwargs)
def test_line__args_and_kwargs(self):
"""Ensures draw line accepts a combination of args/kwargs"""
surface = pygame.Surface((3, 2))
color = (255, 255, 0, 0)
start_pos = (0, 1)
end_pos = (1, 2)
width = 0
kwargs = {
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"width": width,
}
for name in ("surface", "color", "start_pos", "end_pos", "width"):
kwargs.pop(name)
if "surface" == name:
bounds_rect = self.draw_line(surface, **kwargs)
elif "color" == name:
bounds_rect = self.draw_line(surface, color, **kwargs)
elif "start_pos" == name:
bounds_rect = self.draw_line(surface, color, start_pos, **kwargs)
elif "end_pos" == name:
bounds_rect = self.draw_line(
surface, color, start_pos, end_pos, **kwargs
)
else:
bounds_rect = self.draw_line(
surface, color, start_pos, end_pos, width, **kwargs
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_line__valid_width_values(self):
"""Ensures draw line accepts different width values."""
line_color = pygame.Color("yellow")
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
pos = (2, 1)
kwargs = {
"surface": surface,
"color": line_color,
"start_pos": pos,
"end_pos": (2, 2),
"width": None,
}
for width in (-100, -10, -1, 0, 1, 10, 100):
surface.fill(surface_color) # Clear for each test.
kwargs["width"] = width
expected_color = line_color if width > 0 else surface_color
bounds_rect = self.draw_line(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_line__valid_start_pos_formats(self):
"""Ensures draw line accepts different start_pos formats."""
expected_color = pygame.Color("red")
surface_color = pygame.Color("black")
surface = pygame.Surface((4, 4))
kwargs = {
"surface": surface,
"color": expected_color,
"start_pos": None,
"end_pos": (2, 2),
"width": 2,
}
x, y = 2, 1 # start position
# The point values can be ints or floats.
for start_pos in ((x, y), (x + 0.1, y), (x, y + 0.1), (x + 0.1, y + 0.1)):
# The point type can be a tuple/list/Vector2.
for seq_type in (tuple, list, Vector2):
surface.fill(surface_color) # Clear for each test.
kwargs["start_pos"] = seq_type(start_pos)
bounds_rect = self.draw_line(**kwargs)
self.assertEqual(surface.get_at((x, y)), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_line__valid_end_pos_formats(self):
"""Ensures draw line accepts different end_pos formats."""
expected_color = pygame.Color("red")
surface_color = pygame.Color("black")
surface = pygame.Surface((4, 4))
kwargs = {
"surface": surface,
"color": expected_color,
"start_pos": (2, 1),
"end_pos": None,
"width": 2,
}
x, y = 2, 2 # end position
# The point values can be ints or floats.
for end_pos in ((x, y), (x + 0.2, y), (x, y + 0.2), (x + 0.2, y + 0.2)):
# The point type can be a tuple/list/Vector2.
for seq_type in (tuple, list, Vector2):
surface.fill(surface_color) # Clear for each test.
kwargs["end_pos"] = seq_type(end_pos)
bounds_rect = self.draw_line(**kwargs)
self.assertEqual(surface.get_at((x, y)), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_line__invalid_start_pos_formats(self):
"""Ensures draw line handles invalid start_pos formats correctly."""
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"start_pos": None,
"end_pos": (2, 2),
"width": 1,
}
start_pos_fmts = (
(2,), # Too few coords.
(2, 1, 0), # Too many coords.
(2, "1"), # Wrong type.
set([2, 1]), # Wrong type.
dict(((2, 1),)),
) # Wrong type.
for start_pos in start_pos_fmts:
kwargs["start_pos"] = start_pos
with self.assertRaises(TypeError):
bounds_rect = self.draw_line(**kwargs)
def test_line__invalid_end_pos_formats(self):
"""Ensures draw line handles invalid end_pos formats correctly."""
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"start_pos": (2, 2),
"end_pos": None,
"width": 1,
}
end_pos_fmts = (
(2,), # Too few coords.
(2, 1, 0), # Too many coords.
(2, "1"), # Wrong type.
set([2, 1]), # Wrong type.
dict(((2, 1),)),
) # Wrong type.
for end_pos in end_pos_fmts:
kwargs["end_pos"] = end_pos
with self.assertRaises(TypeError):
bounds_rect = self.draw_line(**kwargs)
def test_line__valid_color_formats(self):
"""Ensures draw line accepts different color formats."""
green_color = pygame.Color("green")
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
pos = (1, 1)
kwargs = {
"surface": surface,
"color": None,
"start_pos": pos,
"end_pos": (2, 1),
"width": 3,
}
greens = (
(0, 255, 0),
(0, 255, 0, 255),
surface.map_rgb(green_color),
green_color,
)
for color in greens:
surface.fill(surface_color) # Clear for each test.
kwargs["color"] = color
if isinstance(color, int):
expected_color = surface.unmap_rgb(color)
else:
expected_color = green_color
bounds_rect = self.draw_line(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_line__invalid_color_formats(self):
"""Ensures draw line handles invalid color formats correctly."""
kwargs = {
"surface": pygame.Surface((4, 3)),
"color": None,
"start_pos": (1, 1),
"end_pos": (2, 1),
"width": 1,
}
for expected_color in (2.3, self):
kwargs["color"] = expected_color
with self.assertRaises(TypeError):
bounds_rect = self.draw_line(**kwargs)
def test_line__color(self):
"""Tests if the line drawn is the correct color."""
pos = (0, 0)
for surface in self._create_surfaces():
for expected_color in self.COLORS:
self.draw_line(surface, expected_color, pos, (1, 0))
self.assertEqual(
surface.get_at(pos), expected_color, "pos={}".format(pos)
)
def todo_test_line__color_with_thickness(self):
"""Ensures a thick line is drawn using the correct color."""
self.fail()
def test_line__gaps(self):
"""Tests if the line drawn contains any gaps."""
expected_color = (255, 255, 255)
for surface in self._create_surfaces():
width = surface.get_width()
self.draw_line(surface, expected_color, (0, 0), (width - 1, 0))
for x in range(width):
pos = (x, 0)
self.assertEqual(
surface.get_at(pos), expected_color, "pos={}".format(pos)
)
def todo_test_line__gaps_with_thickness(self):
"""Ensures a thick line is drawn without any gaps."""
self.fail()
def test_line__bounding_rect(self):
"""Ensures draw line returns the correct bounding rect.
Tests lines with endpoints on and off the surface and a range of
width/thickness values.
"""
if isinstance(self, PythonDrawTestCase):
self.skipTest("bounding rects not supported in draw_py.draw_line")
line_color = pygame.Color("red")
surf_color = pygame.Color("black")
width = height = 30
# Using a rect to help manage where the lines are drawn.
helper_rect = pygame.Rect((0, 0), (width, height))
# Testing surfaces of different sizes. One larger than the helper_rect
# and one smaller (to test lines that span the surface).
for size in ((width + 5, height + 5), (width - 5, height - 5)):
surface = pygame.Surface(size, 0, 32)
surf_rect = surface.get_rect()
# Move the helper rect to different positions to test line
# endpoints on and off the surface.
for pos in rect_corners_mids_and_center(surf_rect):
helper_rect.center = pos
# Draw using different thicknesses.
for thickness in range(-1, 5):
for start, end in self._rect_lines(helper_rect):
surface.fill(surf_color) # Clear for each test.
bounding_rect = self.draw_line(
surface, line_color, start, end, thickness
)
if 0 < thickness:
# Calculating the expected_rect after the line is
# drawn (it uses what is actually drawn).
expected_rect = create_bounding_rect(
surface, surf_color, start
)
else:
# Nothing drawn.
expected_rect = pygame.Rect(start, (0, 0))
self.assertEqual(
bounding_rect,
expected_rect,
"start={}, end={}, size={}, thickness={}".format(
start, end, size, thickness
),
)
def test_line__surface_clip(self):
"""Ensures draw line respects a surface's clip area."""
surfw = surfh = 30
line_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
clip_rect = pygame.Rect((0, 0), (11, 11))
clip_rect.center = surface.get_rect().center
pos_rect = clip_rect.copy() # Manages the line's pos.
for thickness in (1, 3): # Test different line widths.
# Test centering the line along the clip rect's edge.
for center in rect_corners_mids_and_center(clip_rect):
# Get the expected points by drawing the line without the
# clip area set.
pos_rect.center = center
surface.set_clip(None)
surface.fill(surface_color)
self.draw_line(
surface, line_color, pos_rect.midtop, pos_rect.midbottom, thickness
)
expected_pts = get_color_points(surface, line_color, clip_rect)
# Clear the surface and set the clip area. Redraw the line
# and check that only the clip area is modified.
surface.fill(surface_color)
surface.set_clip(clip_rect)
self.draw_line(
surface, line_color, pos_rect.midtop, pos_rect.midbottom, thickness
)
surface.lock() # For possible speed up.
# Check all the surface points to ensure only the expected_pts
# are the line_color.
for pt in ((x, y) for x in range(surfw) for y in range(surfh)):
if pt in expected_pts:
expected_color = line_color
else:
expected_color = surface_color
self.assertEqual(surface.get_at(pt), expected_color, pt)
surface.unlock()
# Commented out to avoid cluttering the test output. Add back in if draw_py
# ever fully supports drawing single lines.
# @unittest.skip('draw_py.draw_line not fully supported yet')
# class PythonDrawLineTest(LineMixin, PythonDrawTestCase):
# """Test draw_py module function line.
#
# This class inherits the general tests from LineMixin. It is also the class
# to add any draw_py.draw_line specific tests to.
# """
class DrawLineTest(LineMixin, DrawTestCase):
"""Test draw module function line.
This class inherits the general tests from LineMixin. It is also the class
to add any draw.line specific tests to.
"""
def test_line_endianness(self):
""" test color component order """
for depth in (24, 32):
surface = pygame.Surface((5, 3), 0, depth)
surface.fill(pygame.Color(0, 0, 0))
self.draw_line(surface, pygame.Color(255, 0, 0), (0, 1), (2, 1), 1)
self.assertGreater(surface.get_at((1, 1)).r, 0, "there should be red here")
surface.fill(pygame.Color(0, 0, 0))
self.draw_line(surface, pygame.Color(0, 0, 255), (0, 1), (2, 1), 1)
self.assertGreater(surface.get_at((1, 1)).b, 0, "there should be blue here")
def test_line(self):
# (l, t), (l, t)
self.surf_size = (320, 200)
self.surf = pygame.Surface(self.surf_size, pygame.SRCALPHA)
self.color = (1, 13, 24, 205)
drawn = draw.line(self.surf, self.color, (1, 0), (200, 0))
self.assertEqual(
drawn.right, 201, "end point arg should be (or at least was) inclusive"
)
# Should be colored where it's supposed to be
for pt in test_utils.rect_area_pts(drawn):
self.assertEqual(self.surf.get_at(pt), self.color)
# And not where it shouldn't
for pt in test_utils.rect_outer_bounds(drawn):
self.assertNotEqual(self.surf.get_at(pt), self.color)
# Line width greater that 1
line_width = 2
offset = 5
a = (offset, offset)
b = (self.surf_size[0] - offset, a[1])
c = (a[0], self.surf_size[1] - offset)
d = (b[0], c[1])
e = (a[0] + offset, c[1])
f = (b[0], c[0] + 5)
lines = [
(a, d),
(b, c),
(c, b),
(d, a),
(a, b),
(b, a),
(a, c),
(c, a),
(a, e),
(e, a),
(a, f),
(f, a),
(a, a),
]
for p1, p2 in lines:
msg = "%s - %s" % (p1, p2)
if p1[0] <= p2[0]:
plow = p1
phigh = p2
else:
plow = p2
phigh = p1
self.surf.fill((0, 0, 0))
rec = draw.line(self.surf, (255, 255, 255), p1, p2, line_width)
xinc = yinc = 0
if abs(p1[0] - p2[0]) > abs(p1[1] - p2[1]):
yinc = 1
else:
xinc = 1
for i in range(line_width):
p = (p1[0] + xinc * i, p1[1] + yinc * i)
self.assertEqual(self.surf.get_at(p), (255, 255, 255), msg)
p = (p2[0] + xinc * i, p2[1] + yinc * i)
self.assertEqual(self.surf.get_at(p), (255, 255, 255), msg)
p = (plow[0] - 1, plow[1])
self.assertEqual(self.surf.get_at(p), (0, 0, 0), msg)
p = (plow[0] + xinc * line_width, plow[1] + yinc * line_width)
self.assertEqual(self.surf.get_at(p), (0, 0, 0), msg)
p = (phigh[0] + xinc * line_width, phigh[1] + yinc * line_width)
self.assertEqual(self.surf.get_at(p), (0, 0, 0), msg)
if p1[0] < p2[0]:
rx = p1[0]
else:
rx = p2[0]
if p1[1] < p2[1]:
ry = p1[1]
else:
ry = p2[1]
w = abs(p2[0] - p1[0]) + 1 + xinc * (line_width - 1)
h = abs(p2[1] - p1[1]) + 1 + yinc * (line_width - 1)
msg += ", %s" % (rec,)
self.assertEqual(rec, (rx, ry, w, h), msg)
def test_line_for_gaps(self):
# This checks bug Thick Line Bug #448
width = 200
height = 200
surf = pygame.Surface((width, height), pygame.SRCALPHA)
def white_surrounded_pixels(x, y):
offsets = [(1, 0), (0, 1), (-1, 0), (0, -1)]
WHITE = (255, 255, 255, 255)
return len(
[1 for dx, dy in offsets if surf.get_at((x + dx, y + dy)) == WHITE]
)
def check_white_line(start, end):
surf.fill((0, 0, 0))
pygame.draw.line(surf, (255, 255, 255), start, end, 30)
BLACK = (0, 0, 0, 255)
for x in range(1, width - 1):
for y in range(1, height - 1):
if surf.get_at((x, y)) == BLACK:
self.assertTrue(white_surrounded_pixels(x, y) < 3)
check_white_line((50, 50), (140, 0))
check_white_line((50, 50), (0, 120))
check_white_line((50, 50), (199, 198))
### Lines Testing #############################################################
class LinesMixin(BaseLineMixin):
"""Mixin test for drawing lines.
This class contains all the general lines drawing tests.
"""
def test_lines__args(self):
"""Ensures draw lines accepts the correct args."""
bounds_rect = self.draw_lines(
pygame.Surface((3, 3)), (0, 10, 0, 50), False, ((0, 0), (1, 1)), 1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_lines__args_without_width(self):
"""Ensures draw lines accepts the args without a width."""
bounds_rect = self.draw_lines(
pygame.Surface((2, 2)), (0, 0, 0, 50), False, ((0, 0), (1, 1))
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_lines__kwargs(self):
"""Ensures draw lines accepts the correct kwargs
with and without a width arg.
"""
surface = pygame.Surface((4, 4))
color = pygame.Color("yellow")
points = ((0, 0), (1, 1), (2, 2))
kwargs_list = [
{
"surface": surface,
"color": color,
"closed": False,
"points": points,
"width": 1,
},
{"surface": surface, "color": color, "closed": False, "points": points},
]
for kwargs in kwargs_list:
bounds_rect = self.draw_lines(**kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_lines__kwargs_order_independent(self):
"""Ensures draw lines's kwargs are not order dependent."""
bounds_rect = self.draw_lines(
closed=1,
points=((0, 0), (1, 1), (2, 2)),
width=2,
color=(10, 20, 30),
surface=pygame.Surface((3, 2)),
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_lines__args_missing(self):
"""Ensures draw lines detects any missing required args."""
surface = pygame.Surface((1, 1))
color = pygame.Color("blue")
with self.assertRaises(TypeError):
bounds_rect = self.draw_lines(surface, color, 0)
with self.assertRaises(TypeError):
bounds_rect = self.draw_lines(surface, color)
with self.assertRaises(TypeError):
bounds_rect = self.draw_lines(surface)
with self.assertRaises(TypeError):
bounds_rect = self.draw_lines()
def test_lines__kwargs_missing(self):
"""Ensures draw lines detects any missing required kwargs."""
kwargs = {
"surface": pygame.Surface((3, 2)),
"color": pygame.Color("red"),
"closed": 1,
"points": ((2, 2), (1, 1)),
"width": 1,
}
for name in ("points", "closed", "color", "surface"):
invalid_kwargs = dict(kwargs)
invalid_kwargs.pop(name) # Pop from a copy.
with self.assertRaises(TypeError):
bounds_rect = self.draw_lines(**invalid_kwargs)
def test_lines__arg_invalid_types(self):
"""Ensures draw lines detects invalid arg types."""
surface = pygame.Surface((2, 2))
color = pygame.Color("blue")
closed = 0
points = ((1, 2), (2, 1))
with self.assertRaises(TypeError):
# Invalid width.
bounds_rect = self.draw_lines(surface, color, closed, points, "1")
with self.assertRaises(TypeError):
# Invalid points.
bounds_rect = self.draw_lines(surface, color, closed, (1, 2, 3))
with self.assertRaises(TypeError):
# Invalid closed.
bounds_rect = self.draw_lines(surface, color, InvalidBool(), points)
with self.assertRaises(TypeError):
# Invalid color.
bounds_rect = self.draw_lines(surface, 2.3, closed, points)
with self.assertRaises(TypeError):
# Invalid surface.
bounds_rect = self.draw_lines((1, 2, 3, 4), color, closed, points)
def test_lines__kwarg_invalid_types(self):
"""Ensures draw lines detects invalid kwarg types."""
valid_kwargs = {
"surface": pygame.Surface((3, 3)),
"color": pygame.Color("green"),
"closed": False,
"points": ((1, 2), (2, 1)),
"width": 1,
}
invalid_kwargs = {
"surface": pygame.Surface,
"color": 2.3,
"closed": InvalidBool(),
"points": (0, 0, 0),
"width": 1.2,
}
for kwarg in ("surface", "color", "closed", "points", "width"):
kwargs = dict(valid_kwargs)
kwargs[kwarg] = invalid_kwargs[kwarg]
with self.assertRaises(TypeError):
bounds_rect = self.draw_lines(**kwargs)
def test_lines__kwarg_invalid_name(self):
"""Ensures draw lines detects invalid kwarg names."""
surface = pygame.Surface((2, 3))
color = pygame.Color("cyan")
closed = 1
points = ((1, 2), (2, 1))
kwargs_list = [
{
"surface": surface,
"color": color,
"closed": closed,
"points": points,
"width": 1,
"invalid": 1,
},
{
"surface": surface,
"color": color,
"closed": closed,
"points": points,
"invalid": 1,
},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_lines(**kwargs)
def test_lines__args_and_kwargs(self):
"""Ensures draw lines accepts a combination of args/kwargs"""
surface = pygame.Surface((3, 2))
color = (255, 255, 0, 0)
closed = 0
points = ((1, 2), (2, 1))
width = 1
kwargs = {
"surface": surface,
"color": color,
"closed": closed,
"points": points,
"width": width,
}
for name in ("surface", "color", "closed", "points", "width"):
kwargs.pop(name)
if "surface" == name:
bounds_rect = self.draw_lines(surface, **kwargs)
elif "color" == name:
bounds_rect = self.draw_lines(surface, color, **kwargs)
elif "closed" == name:
bounds_rect = self.draw_lines(surface, color, closed, **kwargs)
elif "points" == name:
bounds_rect = self.draw_lines(surface, color, closed, points, **kwargs)
else:
bounds_rect = self.draw_lines(
surface, color, closed, points, width, **kwargs
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_lines__valid_width_values(self):
"""Ensures draw lines accepts different width values."""
line_color = pygame.Color("yellow")
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
pos = (1, 1)
kwargs = {
"surface": surface,
"color": line_color,
"closed": False,
"points": (pos, (2, 1)),
"width": None,
}
for width in (-100, -10, -1, 0, 1, 10, 100):
surface.fill(surface_color) # Clear for each test.
kwargs["width"] = width
expected_color = line_color if width > 0 else surface_color
bounds_rect = self.draw_lines(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_lines__valid_points_format(self):
"""Ensures draw lines accepts different points formats."""
expected_color = (10, 20, 30, 255)
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
kwargs = {
"surface": surface,
"color": expected_color,
"closed": False,
"points": None,
"width": 1,
}
# The point type can be a tuple/list/Vector2.
point_types = (
(tuple, tuple, tuple, tuple), # all tuples
(list, list, list, list), # all lists
(Vector2, Vector2, Vector2, Vector2), # all Vector2s
(list, Vector2, tuple, Vector2),
) # mix
# The point values can be ints or floats.
point_values = (
((1, 1), (2, 1), (2, 2), (1, 2)),
((1, 1), (2.2, 1), (2.1, 2.2), (1, 2.1)),
)
# Each sequence of points can be a tuple or a list.
seq_types = (tuple, list)
for point_type in point_types:
for values in point_values:
check_pos = values[0]
points = [point_type[i](pt) for i, pt in enumerate(values)]
for seq_type in seq_types:
surface.fill(surface_color) # Clear for each test.
kwargs["points"] = seq_type(points)
bounds_rect = self.draw_lines(**kwargs)
self.assertEqual(surface.get_at(check_pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_lines__invalid_points_formats(self):
"""Ensures draw lines handles invalid points formats correctly."""
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"closed": False,
"points": None,
"width": 1,
}
points_fmts = (
((1, 1), (2,)), # Too few coords.
((1, 1), (2, 2, 2)), # Too many coords.
((1, 1), (2, "2")), # Wrong type.
((1, 1), set([2, 3])), # Wrong type.
((1, 1), dict(((2, 2), (3, 3)))), # Wrong type.
set(((1, 1), (1, 2))), # Wrong type.
dict(((1, 1), (4, 4))),
) # Wrong type.
for points in points_fmts:
kwargs["points"] = points
with self.assertRaises(TypeError):
bounds_rect = self.draw_lines(**kwargs)
def test_lines__invalid_points_values(self):
"""Ensures draw lines handles invalid points values correctly."""
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"closed": False,
"points": None,
"width": 1,
}
for points in ([], ((1, 1),)): # Too few points.
for seq_type in (tuple, list): # Test as tuples and lists.
kwargs["points"] = seq_type(points)
with self.assertRaises(ValueError):
bounds_rect = self.draw_lines(**kwargs)
def test_lines__valid_closed_values(self):
"""Ensures draw lines accepts different closed values."""
line_color = pygame.Color("blue")
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
pos = (1, 2)
kwargs = {
"surface": surface,
"color": line_color,
"closed": None,
"points": ((1, 1), (3, 1), (3, 3), (1, 3)),
"width": 1,
}
true_values = (-7, 1, 10, "2", 3.1, (4,), [5], True)
false_values = (None, "", 0, (), [], False)
for closed in true_values + false_values:
surface.fill(surface_color) # Clear for each test.
kwargs["closed"] = closed
expected_color = line_color if closed else surface_color
bounds_rect = self.draw_lines(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_lines__valid_color_formats(self):
"""Ensures draw lines accepts different color formats."""
green_color = pygame.Color("green")
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
pos = (1, 1)
kwargs = {
"surface": surface,
"color": None,
"closed": False,
"points": (pos, (2, 1)),
"width": 3,
}
greens = (
(0, 255, 0),
(0, 255, 0, 255),
surface.map_rgb(green_color),
green_color,
)
for color in greens:
surface.fill(surface_color) # Clear for each test.
kwargs["color"] = color
if isinstance(color, int):
expected_color = surface.unmap_rgb(color)
else:
expected_color = green_color
bounds_rect = self.draw_lines(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_lines__invalid_color_formats(self):
"""Ensures draw lines handles invalid color formats correctly."""
kwargs = {
"surface": pygame.Surface((4, 3)),
"color": None,
"closed": False,
"points": ((1, 1), (1, 2)),
"width": 1,
}
for expected_color in (2.3, self):
kwargs["color"] = expected_color
with self.assertRaises(TypeError):
bounds_rect = self.draw_lines(**kwargs)
def test_lines__color(self):
"""Tests if the lines drawn are the correct color.
Draws lines around the border of the given surface and checks if all
borders of the surface only contain the given color.
"""
for surface in self._create_surfaces():
for expected_color in self.COLORS:
self.draw_lines(surface, expected_color, True, corners(surface))
for pos, color in border_pos_and_color(surface):
self.assertEqual(color, expected_color, "pos={}".format(pos))
def todo_test_lines__color_with_thickness(self):
"""Ensures thick lines are drawn using the correct color."""
self.fail()
def test_lines__gaps(self):
"""Tests if the lines drawn contain any gaps.
Draws lines around the border of the given surface and checks if
all borders of the surface contain any gaps.
"""
expected_color = (255, 255, 255)
for surface in self._create_surfaces():
self.draw_lines(surface, expected_color, True, corners(surface))
for pos, color in border_pos_and_color(surface):
self.assertEqual(color, expected_color, "pos={}".format(pos))
def todo_test_lines__gaps_with_thickness(self):
"""Ensures thick lines are drawn without any gaps."""
self.fail()
def test_lines__bounding_rect(self):
"""Ensures draw lines returns the correct bounding rect.
Tests lines with endpoints on and off the surface and a range of
width/thickness values.
"""
line_color = pygame.Color("red")
surf_color = pygame.Color("black")
width = height = 30
# Using a rect to help manage where the lines are drawn.
pos_rect = pygame.Rect((0, 0), (width, height))
# Testing surfaces of different sizes. One larger than the pos_rect
# and one smaller (to test lines that span the surface).
for size in ((width + 5, height + 5), (width - 5, height - 5)):
surface = pygame.Surface(size, 0, 32)
surf_rect = surface.get_rect()
# Move pos_rect to different positions to test line endpoints on
# and off the surface.
for pos in rect_corners_mids_and_center(surf_rect):
pos_rect.center = pos
# Shape: Triangle (if closed), ^ caret (if not closed).
pts = (pos_rect.midleft, pos_rect.midtop, pos_rect.midright)
pos = pts[0] # Rect position if nothing drawn.
# Draw using different thickness and closed values.
for thickness in range(-1, 5):
for closed in (True, False):
surface.fill(surf_color) # Clear for each test.
bounding_rect = self.draw_lines(
surface, line_color, closed, pts, thickness
)
if 0 < thickness:
# Calculating the expected_rect after the lines are
# drawn (it uses what is actually drawn).
expected_rect = create_bounding_rect(
surface, surf_color, pos
)
else:
# Nothing drawn.
expected_rect = pygame.Rect(pos, (0, 0))
self.assertEqual(bounding_rect, expected_rect)
def test_lines__surface_clip(self):
"""Ensures draw lines respects a surface's clip area."""
surfw = surfh = 30
line_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
clip_rect = pygame.Rect((0, 0), (11, 11))
clip_rect.center = surface.get_rect().center
pos_rect = clip_rect.copy() # Manages the lines's pos.
# Test centering the pos_rect along the clip rect's edge to allow for
# drawing the lines over the clip_rect's bounds.
for center in rect_corners_mids_and_center(clip_rect):
pos_rect.center = center
pts = (pos_rect.midtop, pos_rect.center, pos_rect.midbottom)
for closed in (True, False): # Test closed and not closed.
for thickness in (1, 3): # Test different line widths.
# Get the expected points by drawing the lines without the
# clip area set.
surface.set_clip(None)
surface.fill(surface_color)
self.draw_lines(surface, line_color, closed, pts, thickness)
expected_pts = get_color_points(surface, line_color, clip_rect)
# Clear the surface and set the clip area. Redraw the lines
# and check that only the clip area is modified.
surface.fill(surface_color)
surface.set_clip(clip_rect)
self.draw_lines(surface, line_color, closed, pts, thickness)
surface.lock() # For possible speed up.
# Check all the surface points to ensure only the
# expected_pts are the line_color.
for pt in ((x, y) for x in range(surfw) for y in range(surfh)):
if pt in expected_pts:
expected_color = line_color
else:
expected_color = surface_color
self.assertEqual(surface.get_at(pt), expected_color, pt)
surface.unlock()
# Commented out to avoid cluttering the test output. Add back in if draw_py
# ever fully supports drawing lines.
# class PythonDrawLinesTest(LinesMixin, PythonDrawTestCase):
# """Test draw_py module function lines.
#
# This class inherits the general tests from LinesMixin. It is also the
# class to add any draw_py.draw_lines specific tests to.
# """
class DrawLinesTest(LinesMixin, DrawTestCase):
"""Test draw module function lines.
This class inherits the general tests from LinesMixin. It is also the class
to add any draw.lines specific tests to.
"""
### AALine Testing ############################################################
class AALineMixin(BaseLineMixin):
"""Mixin test for drawing a single aaline.
This class contains all the general single aaline drawing tests.
"""
def test_aaline__args(self):
"""Ensures draw aaline accepts the correct args."""
bounds_rect = self.draw_aaline(
pygame.Surface((3, 3)), (0, 10, 0, 50), (0, 0), (1, 1), 1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aaline__args_without_blend(self):
"""Ensures draw aaline accepts the args without a blend."""
bounds_rect = self.draw_aaline(
pygame.Surface((2, 2)), (0, 0, 0, 50), (0, 0), (2, 2)
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aaline__kwargs(self):
"""Ensures draw aaline accepts the correct kwargs
with and without a blend arg.
"""
surface = pygame.Surface((4, 4))
color = pygame.Color("yellow")
start_pos = (1, 1)
end_pos = (2, 2)
kwargs_list = [
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"blend": 1,
},
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
},
]
for kwargs in kwargs_list:
bounds_rect = self.draw_aaline(**kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aaline__kwargs_order_independent(self):
"""Ensures draw aaline's kwargs are not order dependent."""
bounds_rect = self.draw_aaline(
start_pos=(1, 2),
end_pos=(2, 1),
blend=1,
color=(10, 20, 30),
surface=pygame.Surface((3, 2)),
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aaline__args_missing(self):
"""Ensures draw aaline detects any missing required args."""
surface = pygame.Surface((1, 1))
color = pygame.Color("blue")
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline(surface, color, (0, 0))
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline(surface, color)
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline(surface)
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline()
def test_aaline__kwargs_missing(self):
"""Ensures draw aaline detects any missing required kwargs."""
kwargs = {
"surface": pygame.Surface((3, 2)),
"color": pygame.Color("red"),
"start_pos": (2, 1),
"end_pos": (2, 2),
"blend": 1,
}
for name in ("end_pos", "start_pos", "color", "surface"):
invalid_kwargs = dict(kwargs)
invalid_kwargs.pop(name) # Pop from a copy.
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline(**invalid_kwargs)
def test_aaline__arg_invalid_types(self):
"""Ensures draw aaline detects invalid arg types."""
surface = pygame.Surface((2, 2))
color = pygame.Color("blue")
start_pos = (0, 1)
end_pos = (1, 2)
with self.assertRaises(TypeError):
# Invalid blend.
bounds_rect = self.draw_aaline(surface, color, start_pos, end_pos, "1")
with self.assertRaises(TypeError):
# Invalid end_pos.
bounds_rect = self.draw_aaline(surface, color, start_pos, (1, 2, 3))
with self.assertRaises(TypeError):
# Invalid start_pos.
bounds_rect = self.draw_aaline(surface, color, (1,), end_pos)
with self.assertRaises(ValueError):
# Invalid color.
bounds_rect = self.draw_aaline(surface, "invalid-color", start_pos, end_pos)
with self.assertRaises(TypeError):
# Invalid surface.
bounds_rect = self.draw_aaline((1, 2, 3, 4), color, start_pos, end_pos)
def test_aaline__kwarg_invalid_types(self):
"""Ensures draw aaline detects invalid kwarg types."""
surface = pygame.Surface((3, 3))
color = pygame.Color("green")
start_pos = (1, 0)
end_pos = (2, 0)
blend = 1
kwargs_list = [
{
"surface": pygame.Surface, # Invalid surface.
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"blend": blend,
},
{
"surface": surface,
"color": 2.3, # Invalid color.
"start_pos": start_pos,
"end_pos": end_pos,
"blend": blend,
},
{
"surface": surface,
"color": color,
"start_pos": (0, 0, 0), # Invalid start_pos.
"end_pos": end_pos,
"blend": blend,
},
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": (0,), # Invalid end_pos.
"blend": blend,
},
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"blend": 1.2,
},
] # Invalid blend.
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline(**kwargs)
def test_aaline__kwarg_invalid_name(self):
"""Ensures draw aaline detects invalid kwarg names."""
surface = pygame.Surface((2, 3))
color = pygame.Color("cyan")
start_pos = (1, 1)
end_pos = (2, 0)
kwargs_list = [
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"blend": 1,
"invalid": 1,
},
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"invalid": 1,
},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline(**kwargs)
def test_aaline__args_and_kwargs(self):
"""Ensures draw aaline accepts a combination of args/kwargs"""
surface = pygame.Surface((3, 2))
color = (255, 255, 0, 0)
start_pos = (0, 1)
end_pos = (1, 2)
blend = 0
kwargs = {
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"blend": blend,
}
for name in ("surface", "color", "start_pos", "end_pos", "blend"):
kwargs.pop(name)
if "surface" == name:
bounds_rect = self.draw_aaline(surface, **kwargs)
elif "color" == name:
bounds_rect = self.draw_aaline(surface, color, **kwargs)
elif "start_pos" == name:
bounds_rect = self.draw_aaline(surface, color, start_pos, **kwargs)
elif "end_pos" == name:
bounds_rect = self.draw_aaline(
surface, color, start_pos, end_pos, **kwargs
)
else:
bounds_rect = self.draw_aaline(
surface, color, start_pos, end_pos, blend, **kwargs
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aaline__valid_blend_values(self):
"""Ensures draw aaline accepts different blend values."""
expected_color = pygame.Color("yellow")
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
pos = (2, 1)
kwargs = {
"surface": surface,
"color": expected_color,
"start_pos": pos,
"end_pos": (2, 2),
"blend": None,
}
for blend in (-10, -2, -1, 0, 1, 2, 10):
surface.fill(surface_color) # Clear for each test.
kwargs["blend"] = blend
bounds_rect = self.draw_aaline(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aaline__valid_start_pos_formats(self):
"""Ensures draw aaline accepts different start_pos formats."""
expected_color = pygame.Color("red")
surface_color = pygame.Color("black")
surface = pygame.Surface((4, 4))
kwargs = {
"surface": surface,
"color": expected_color,
"start_pos": None,
"end_pos": (2, 2),
"blend": 0,
}
x, y = 2, 1 # start position
positions = ((x, y), (x + 0.01, y), (x, y + 0.01), (x + 0.01, y + 0.01))
for start_pos in positions:
for seq_type in (tuple, list, Vector2):
surface.fill(surface_color) # Clear for each test.
kwargs["start_pos"] = seq_type(start_pos)
bounds_rect = self.draw_aaline(**kwargs)
color = surface.get_at((x, y))
for i, sub_color in enumerate(expected_color):
# The color could be slightly off the expected color due to
# any fractional position arguments.
self.assertGreaterEqual(color[i] + 5, sub_color, start_pos)
self.assertIsInstance(bounds_rect, pygame.Rect, start_pos)
def test_aaline__valid_end_pos_formats(self):
"""Ensures draw aaline accepts different end_pos formats."""
expected_color = pygame.Color("red")
surface_color = pygame.Color("black")
surface = pygame.Surface((4, 4))
kwargs = {
"surface": surface,
"color": expected_color,
"start_pos": (2, 1),
"end_pos": None,
"blend": 0,
}
x, y = 2, 2 # end position
positions = ((x, y), (x + 0.02, y), (x, y + 0.02), (x + 0.02, y + 0.02))
for end_pos in positions:
for seq_type in (tuple, list, Vector2):
surface.fill(surface_color) # Clear for each test.
kwargs["end_pos"] = seq_type(end_pos)
bounds_rect = self.draw_aaline(**kwargs)
color = surface.get_at((x, y))
for i, sub_color in enumerate(expected_color):
# The color could be slightly off the expected color due to
# any fractional position arguments.
self.assertGreaterEqual(color[i] + 15, sub_color, end_pos)
self.assertIsInstance(bounds_rect, pygame.Rect, end_pos)
def test_aaline__invalid_start_pos_formats(self):
"""Ensures draw aaline handles invalid start_pos formats correctly."""
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"start_pos": None,
"end_pos": (2, 2),
"blend": 0,
}
start_pos_fmts = (
(2,), # Too few coords.
(2, 1, 0), # Too many coords.
(2, "1"), # Wrong type.
set([2, 1]), # Wrong type.
dict(((2, 1),)),
) # Wrong type.
for start_pos in start_pos_fmts:
kwargs["start_pos"] = start_pos
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline(**kwargs)
def test_aaline__invalid_end_pos_formats(self):
"""Ensures draw aaline handles invalid end_pos formats correctly."""
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"start_pos": (2, 2),
"end_pos": None,
"blend": 0,
}
end_pos_fmts = (
(2,), # Too few coords.
(2, 1, 0), # Too many coords.
(2, "1"), # Wrong type.
set([2, 1]), # Wrong type.
dict(((2, 1),)),
) # Wrong type.
for end_pos in end_pos_fmts:
kwargs["end_pos"] = end_pos
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline(**kwargs)
def test_aaline__valid_color_formats(self):
"""Ensures draw aaline accepts different color formats."""
green_color = pygame.Color("green")
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
pos = (1, 1)
kwargs = {
"surface": surface,
"color": None,
"start_pos": pos,
"end_pos": (2, 1),
"blend": 0,
}
greens = (
(0, 255, 0),
(0, 255, 0, 255),
surface.map_rgb(green_color),
green_color,
)
for color in greens:
surface.fill(surface_color) # Clear for each test.
kwargs["color"] = color
if isinstance(color, int):
expected_color = surface.unmap_rgb(color)
else:
expected_color = green_color
bounds_rect = self.draw_aaline(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aaline__invalid_color_formats(self):
"""Ensures draw aaline handles invalid color formats correctly."""
kwargs = {
"surface": pygame.Surface((4, 3)),
"color": None,
"start_pos": (1, 1),
"end_pos": (2, 1),
"blend": 0,
}
for expected_color in (2.3, self):
kwargs["color"] = expected_color
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline(**kwargs)
def test_aaline__color(self):
"""Tests if the aaline drawn is the correct color."""
pos = (0, 0)
for surface in self._create_surfaces():
for expected_color in self.COLORS:
self.draw_aaline(surface, expected_color, pos, (1, 0))
self.assertEqual(
surface.get_at(pos), expected_color, "pos={}".format(pos)
)
def test_aaline__gaps(self):
"""Tests if the aaline drawn contains any gaps.
See: #512
"""
expected_color = (255, 255, 255)
for surface in self._create_surfaces():
width = surface.get_width()
self.draw_aaline(surface, expected_color, (0, 0), (width - 1, 0))
for x in range(width):
pos = (x, 0)
self.assertEqual(
surface.get_at(pos), expected_color, "pos={}".format(pos)
)
def test_aaline__bounding_rect(self):
"""Ensures draw aaline returns the correct bounding rect.
Tests lines with endpoints on and off the surface and blending
enabled and disabled.
"""
line_color = pygame.Color("red")
surf_color = pygame.Color("blue")
width = height = 30
# Using a rect to help manage where the lines are drawn.
helper_rect = pygame.Rect((0, 0), (width, height))
# Testing surfaces of different sizes. One larger than the helper_rect
# and one smaller (to test lines that span the surface).
for size in ((width + 5, height + 5), (width - 5, height - 5)):
surface = pygame.Surface(size, 0, 32)
surf_rect = surface.get_rect()
# Move the helper rect to different positions to test line
# endpoints on and off the surface.
for pos in rect_corners_mids_and_center(surf_rect):
helper_rect.center = pos
for blend in (False, True): # Test non-blending and blending.
for start, end in self._rect_lines(helper_rect):
surface.fill(surf_color) # Clear for each test.
bounding_rect = self.draw_aaline(
surface, line_color, start, end, blend
)
# Calculating the expected_rect after the line is
# drawn (it uses what is actually drawn).
expected_rect = create_bounding_rect(surface, surf_color, start)
self.assertEqual(bounding_rect, expected_rect)
def test_aaline__surface_clip(self):
"""Ensures draw aaline respects a surface's clip area."""
surfw = surfh = 30
aaline_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
clip_rect = pygame.Rect((0, 0), (11, 11))
clip_rect.center = surface.get_rect().center
pos_rect = clip_rect.copy() # Manages the aaline's pos.
# Test centering the pos_rect along the clip rect's edge to allow for
# drawing the aaline over the clip_rect's bounds.
for center in rect_corners_mids_and_center(clip_rect):
pos_rect.center = center
for blend in (0, 1): # Test non-blending and blending.
# Get the expected points by drawing the aaline without the
# clip area set.
surface.set_clip(None)
surface.fill(surface_color)
self.draw_aaline(
surface, aaline_color, pos_rect.midtop, pos_rect.midbottom, blend
)
# Need to get the points that are NOT surface_color due to the
# way blend=0 uses the color black to antialias.
expected_pts = get_color_points(
surface, surface_color, clip_rect, False
)
# Clear the surface and set the clip area. Redraw the aaline
# and check that only the clip area is modified.
surface.fill(surface_color)
surface.set_clip(clip_rect)
self.draw_aaline(
surface, aaline_color, pos_rect.midtop, pos_rect.midbottom, blend
)
surface.lock() # For possible speed up.
# Check all the surface points to ensure the expected_pts
# are not surface_color.
for pt in ((x, y) for x in range(surfw) for y in range(surfh)):
if pt in expected_pts:
self.assertNotEqual(surface.get_at(pt), surface_color, pt)
else:
self.assertEqual(surface.get_at(pt), surface_color, pt)
surface.unlock()
# Commented out to avoid cluttering the test output. Add back in if draw_py
# ever fully supports drawing single aalines.
# class PythonDrawAALineTest(AALineMixin, PythonDrawTestCase):
# """Test draw_py module function aaline.
#
# This class inherits the general tests from AALineMixin. It is also the
# class to add any draw_py.draw_aaline specific tests to.
# """
class DrawAALineTest(AALineMixin, DrawTestCase):
"""Test draw module function aaline.
This class inherits the general tests from AALineMixin. It is also the
class to add any draw.aaline specific tests to.
"""
def test_aaline_endianness(self):
""" test color component order """
for depth in (24, 32):
surface = pygame.Surface((5, 3), 0, depth)
surface.fill(pygame.Color(0, 0, 0))
self.draw_aaline(surface, pygame.Color(255, 0, 0), (0, 1), (2, 1), 1)
self.assertGreater(surface.get_at((1, 1)).r, 0, "there should be red here")
surface.fill(pygame.Color(0, 0, 0))
self.draw_aaline(surface, pygame.Color(0, 0, 255), (0, 1), (2, 1), 1)
self.assertGreater(surface.get_at((1, 1)).b, 0, "there should be blue here")
def _check_antialiasing(
self, from_point, to_point, should, check_points, set_endpoints=True
):
"""Draw a line between two points and check colors of check_points."""
if set_endpoints:
should[from_point] = should[to_point] = FG_GREEN
def check_one_direction(from_point, to_point, should):
self.draw_aaline(self.surface, FG_GREEN, from_point, to_point, True)
for pt in check_points:
color = should.get(pt, BG_RED)
if PY3: # "subTest" is sooo helpful, but does not exist in PY2
with self.subTest(from_pt=from_point, pt=pt, to=to_point):
self.assertEqual(self.surface.get_at(pt), color)
else:
self.assertEqual(self.surface.get_at(pt), color)
# reset
draw.rect(self.surface, BG_RED, (0, 0, 10, 10), 0)
# it is important to test also opposite direction, the algorithm
# is (#512) or was not symmetric
check_one_direction(from_point, to_point, should)
if from_point != to_point:
check_one_direction(to_point, from_point, should)
def test_short_non_antialiased_lines(self):
"""test very short not anti aliased lines in all directions."""
if isinstance(self, DrawTestCase):
self.skipTest("not working with draw.aaline")
# Horizontal, vertical and diagonal lines should not be anti-aliased,
# even with draw.aaline ...
self.surface = pygame.Surface((10, 10))
draw.rect(self.surface, BG_RED, (0, 0, 10, 10), 0)
check_points = [(i, j) for i in range(3, 8) for j in range(3, 8)]
def check_both_directions(from_pt, to_pt, other_points):
should = {pt: FG_GREEN for pt in other_points}
self._check_antialiasing(from_pt, to_pt, should, check_points)
# 0. one point
check_both_directions((5, 5), (5, 5), [])
# 1. horizontal
check_both_directions((4, 7), (5, 7), [])
check_both_directions((5, 4), (7, 4), [(6, 4)])
# 2. vertical
check_both_directions((5, 5), (5, 6), [])
check_both_directions((6, 4), (6, 6), [(6, 5)])
# 3. diagonals
check_both_directions((5, 5), (6, 6), [])
check_both_directions((5, 5), (7, 7), [(6, 6)])
check_both_directions((5, 6), (6, 5), [])
check_both_directions((6, 4), (4, 6), [(5, 5)])
def test_short_line_anti_aliasing(self):
if isinstance(self, DrawTestCase):
self.skipTest("not working with draw.aaline")
self.surface = pygame.Surface((10, 10))
draw.rect(self.surface, BG_RED, (0, 0, 10, 10), 0)
check_points = [(i, j) for i in range(3, 8) for j in range(3, 8)]
def check_both_directions(from_pt, to_pt, should):
self._check_antialiasing(from_pt, to_pt, should, check_points)
# lets say dx = abs(x0 - x1) ; dy = abs(y0 - y1)
brown = (127, 127, 0)
# dy / dx = 0.5
check_both_directions((4, 4), (6, 5), {(5, 4): brown, (5, 5): brown})
check_both_directions((4, 5), (6, 4), {(5, 4): brown, (5, 5): brown})
# dy / dx = 2
check_both_directions((4, 4), (5, 6), {(4, 5): brown, (5, 5): brown})
check_both_directions((5, 4), (4, 6), {(4, 5): brown, (5, 5): brown})
# some little longer lines; so we need to check more points:
check_points = [(i, j) for i in range(2, 9) for j in range(2, 9)]
# dy / dx = 0.25
reddish = (191, 63, 0)
greenish = (63, 191, 0)
should = {
(4, 3): greenish,
(5, 3): brown,
(6, 3): reddish,
(4, 4): reddish,
(5, 4): brown,
(6, 4): greenish,
}
check_both_directions((3, 3), (7, 4), should)
should = {
(4, 3): reddish,
(5, 3): brown,
(6, 3): greenish,
(4, 4): greenish,
(5, 4): brown,
(6, 4): reddish,
}
check_both_directions((3, 4), (7, 3), should)
# dy / dx = 4
should = {
(4, 4): greenish,
(4, 5): brown,
(4, 6): reddish,
(5, 4): reddish,
(5, 5): brown,
(5, 6): greenish,
}
check_both_directions((4, 3), (5, 7), should)
should = {
(4, 4): reddish,
(4, 5): brown,
(4, 6): greenish,
(5, 4): greenish,
(5, 5): brown,
(5, 6): reddish,
}
check_both_directions((5, 3), (4, 7), should)
def test_anti_aliasing_float_coordinates(self):
"""Float coordinates should be blended smoothly."""
if isinstance(self, DrawTestCase):
self.skipTest("not working with draw.aaline")
self.surface = pygame.Surface((10, 10))
draw.rect(self.surface, BG_RED, (0, 0, 10, 10), 0)
check_points = [(i, j) for i in range(5) for j in range(5)]
brown = (127, 127, 0)
# 0. identical point : current implementation does no smoothing...
expected = {(1, 2): FG_GREEN}
self._check_antialiasing(
(1.5, 2), (1.5, 2), expected, check_points, set_endpoints=False
)
expected = {(2, 2): FG_GREEN}
self._check_antialiasing(
(2.5, 2.7), (2.5, 2.7), expected, check_points, set_endpoints=False
)
# 1. horizontal lines
# a) blend endpoints
expected = {(1, 2): brown, (2, 2): FG_GREEN}
self._check_antialiasing(
(1.5, 2), (2, 2), expected, check_points, set_endpoints=False
)
expected = {(1, 2): brown, (2, 2): FG_GREEN, (3, 2): brown}
self._check_antialiasing(
(1.5, 2), (2.5, 2), expected, check_points, set_endpoints=False
)
expected = {(2, 2): brown, (1, 2): FG_GREEN}
self._check_antialiasing(
(1, 2), (1.5, 2), expected, check_points, set_endpoints=False
)
expected = {(1, 2): brown, (2, 2): (63, 191, 0)}
self._check_antialiasing(
(1.5, 2), (1.75, 2), expected, check_points, set_endpoints=False
)
# b) blend y-coordinate
expected = {(x, y): brown for x in range(2, 5) for y in (1, 2)}
self._check_antialiasing(
(2, 1.5), (4, 1.5), expected, check_points, set_endpoints=False
)
# 2. vertical lines
# a) blend endpoints
expected = {(2, 1): brown, (2, 2): FG_GREEN, (2, 3): brown}
self._check_antialiasing(
(2, 1.5), (2, 2.5), expected, check_points, set_endpoints=False
)
expected = {(2, 1): brown, (2, 2): (63, 191, 0)}
self._check_antialiasing(
(2, 1.5), (2, 1.75), expected, check_points, set_endpoints=False
)
# b) blend x-coordinate
expected = {(x, y): brown for x in (1, 2) for y in range(2, 5)}
self._check_antialiasing(
(1.5, 2), (1.5, 4), expected, check_points, set_endpoints=False
)
# 3. diagonal lines
# a) blend endpoints
expected = {(1, 1): brown, (2, 2): FG_GREEN, (3, 3): brown}
self._check_antialiasing(
(1.5, 1.5), (2.5, 2.5), expected, check_points, set_endpoints=False
)
expected = {(3, 1): brown, (2, 2): FG_GREEN, (1, 3): brown}
self._check_antialiasing(
(2.5, 1.5), (1.5, 2.5), expected, check_points, set_endpoints=False
)
# b) blend sidewards
expected = {(2, 1): brown, (2, 2): brown, (3, 2): brown, (3, 3): brown}
self._check_antialiasing(
(2, 1.5), (3, 2.5), expected, check_points, set_endpoints=False
)
reddish = (191, 63, 0)
greenish = (63, 191, 0)
expected = {
(2, 1): greenish,
(2, 2): reddish,
(3, 2): greenish,
(3, 3): reddish,
(4, 3): greenish,
(4, 4): reddish,
}
self._check_antialiasing(
(2, 1.25), (4, 3.25), expected, check_points, set_endpoints=False
)
def test_anti_aliasing_at_and_outside_the_border(self):
"""Ensures antialiasing works correct at a surface's borders."""
if isinstance(self, DrawTestCase):
self.skipTest("not working with draw.aaline")
self.surface = pygame.Surface((10, 10))
draw.rect(self.surface, BG_RED, (0, 0, 10, 10), 0)
check_points = [(i, j) for i in range(10) for j in range(10)]
reddish = (191, 63, 0)
brown = (127, 127, 0)
greenish = (63, 191, 0)
from_point, to_point = (3, 3), (7, 4)
should = {
(4, 3): greenish,
(5, 3): brown,
(6, 3): reddish,
(4, 4): reddish,
(5, 4): brown,
(6, 4): greenish,
}
for dx, dy in (
(-4, 0),
(4, 0), # moved to left and right borders
(0, -5),
(0, -4),
(0, -3), # upper border
(0, 5),
(0, 6),
(0, 7), # lower border
(-4, -4),
(-4, -3),
(-3, -4),
): # upper left corner
first = from_point[0] + dx, from_point[1] + dy
second = to_point[0] + dx, to_point[1] + dy
expected = {(x + dx, y + dy): color for (x, y), color in should.items()}
self._check_antialiasing(first, second, expected, check_points)
### AALines Testing ###########################################################
class AALinesMixin(BaseLineMixin):
"""Mixin test for drawing aalines.
This class contains all the general aalines drawing tests.
"""
def test_aalines__args(self):
"""Ensures draw aalines accepts the correct args."""
bounds_rect = self.draw_aalines(
pygame.Surface((3, 3)), (0, 10, 0, 50), False, ((0, 0), (1, 1)), 1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aalines__args_without_blend(self):
"""Ensures draw aalines accepts the args without a blend."""
bounds_rect = self.draw_aalines(
pygame.Surface((2, 2)), (0, 0, 0, 50), False, ((0, 0), (1, 1))
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aalines__kwargs(self):
"""Ensures draw aalines accepts the correct kwargs
with and without a blend arg.
"""
surface = pygame.Surface((4, 4))
color = pygame.Color("yellow")
points = ((0, 0), (1, 1), (2, 2))
kwargs_list = [
{
"surface": surface,
"color": color,
"closed": False,
"points": points,
"blend": 1,
},
{"surface": surface, "color": color, "closed": False, "points": points},
]
for kwargs in kwargs_list:
bounds_rect = self.draw_aalines(**kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aalines__kwargs_order_independent(self):
"""Ensures draw aalines's kwargs are not order dependent."""
bounds_rect = self.draw_aalines(
closed=1,
points=((0, 0), (1, 1), (2, 2)),
blend=1,
color=(10, 20, 30),
surface=pygame.Surface((3, 2)),
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aalines__args_missing(self):
"""Ensures draw aalines detects any missing required args."""
surface = pygame.Surface((1, 1))
color = pygame.Color("blue")
with self.assertRaises(TypeError):
bounds_rect = self.draw_aalines(surface, color, 0)
with self.assertRaises(TypeError):
bounds_rect = self.draw_aalines(surface, color)
with self.assertRaises(TypeError):
bounds_rect = self.draw_aalines(surface)
with self.assertRaises(TypeError):
bounds_rect = self.draw_aalines()
def test_aalines__kwargs_missing(self):
"""Ensures draw aalines detects any missing required kwargs."""
kwargs = {
"surface": pygame.Surface((3, 2)),
"color": pygame.Color("red"),
"closed": 1,
"points": ((2, 2), (1, 1)),
"blend": 1,
}
for name in ("points", "closed", "color", "surface"):
invalid_kwargs = dict(kwargs)
invalid_kwargs.pop(name) # Pop from a copy.
with self.assertRaises(TypeError):
bounds_rect = self.draw_aalines(**invalid_kwargs)
def test_aalines__arg_invalid_types(self):
"""Ensures draw aalines detects invalid arg types."""
surface = pygame.Surface((2, 2))
color = pygame.Color("blue")
closed = 0
points = ((1, 2), (2, 1))
with self.assertRaises(TypeError):
# Invalid blend.
bounds_rect = self.draw_aalines(surface, color, closed, points, "1")
with self.assertRaises(TypeError):
# Invalid points.
bounds_rect = self.draw_aalines(surface, color, closed, (1, 2, 3))
with self.assertRaises(TypeError):
# Invalid closed.
bounds_rect = self.draw_aalines(surface, color, InvalidBool(), points)
with self.assertRaises(TypeError):
# Invalid color.
bounds_rect = self.draw_aalines(surface, 2.3, closed, points)
with self.assertRaises(TypeError):
# Invalid surface.
bounds_rect = self.draw_aalines((1, 2, 3, 4), color, closed, points)
def test_aalines__kwarg_invalid_types(self):
"""Ensures draw aalines detects invalid kwarg types."""
valid_kwargs = {
"surface": pygame.Surface((3, 3)),
"color": pygame.Color("green"),
"closed": False,
"points": ((1, 2), (2, 1)),
"blend": 1,
}
invalid_kwargs = {
"surface": pygame.Surface,
"color": 2.3,
"closed": InvalidBool(),
"points": (0, 0, 0),
"blend": 1.2,
}
for kwarg in ("surface", "color", "closed", "points", "blend"):
kwargs = dict(valid_kwargs)
kwargs[kwarg] = invalid_kwargs[kwarg]
with self.assertRaises(TypeError):
bounds_rect = self.draw_aalines(**kwargs)
def test_aalines__kwarg_invalid_name(self):
"""Ensures draw aalines detects invalid kwarg names."""
surface = pygame.Surface((2, 3))
color = pygame.Color("cyan")
closed = 1
points = ((1, 2), (2, 1))
kwargs_list = [
{
"surface": surface,
"color": color,
"closed": closed,
"points": points,
"blend": 1,
"invalid": 1,
},
{
"surface": surface,
"color": color,
"closed": closed,
"points": points,
"invalid": 1,
},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_aalines(**kwargs)
def test_aalines__args_and_kwargs(self):
"""Ensures draw aalines accepts a combination of args/kwargs"""
surface = pygame.Surface((3, 2))
color = (255, 255, 0, 0)
closed = 0
points = ((1, 2), (2, 1))
blend = 1
kwargs = {
"surface": surface,
"color": color,
"closed": closed,
"points": points,
"blend": blend,
}
for name in ("surface", "color", "closed", "points", "blend"):
kwargs.pop(name)
if "surface" == name:
bounds_rect = self.draw_aalines(surface, **kwargs)
elif "color" == name:
bounds_rect = self.draw_aalines(surface, color, **kwargs)
elif "closed" == name:
bounds_rect = self.draw_aalines(surface, color, closed, **kwargs)
elif "points" == name:
bounds_rect = self.draw_aalines(
surface, color, closed, points, **kwargs
)
else:
bounds_rect = self.draw_aalines(
surface, color, closed, points, blend, **kwargs
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aalines__valid_blend_values(self):
"""Ensures draw aalines accepts different blend values."""
expected_color = pygame.Color("yellow")
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
pos = (1, 1)
kwargs = {
"surface": surface,
"color": expected_color,
"closed": False,
"points": (pos, (1, 3)),
"blend": None,
}
for blend in (-10, -2, -1, 0, 1, 2, 10):
surface.fill(surface_color) # Clear for each test.
kwargs["blend"] = blend
bounds_rect = self.draw_aalines(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color, blend)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aalines__valid_points_format(self):
"""Ensures draw aalines accepts different points formats."""
expected_color = (10, 20, 30, 255)
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
kwargs = {
"surface": surface,
"color": expected_color,
"closed": False,
"points": None,
"blend": 0,
}
# The point type can be a tuple/list/Vector2.
point_types = (
(tuple, tuple, tuple, tuple), # all tuples
(list, list, list, list), # all lists
(Vector2, Vector2, Vector2, Vector2), # all Vector2s
(list, Vector2, tuple, Vector2),
) # mix
# The point values can be ints or floats.
point_values = (
((1, 1), (2, 1), (2, 2), (1, 2)),
((1, 1), (2.2, 1), (2.1, 2.2), (1, 2.1)),
)
# Each sequence of points can be a tuple or a list.
seq_types = (tuple, list)
for point_type in point_types:
for values in point_values:
check_pos = values[0]
points = [point_type[i](pt) for i, pt in enumerate(values)]
for seq_type in seq_types:
surface.fill(surface_color) # Clear for each test.
kwargs["points"] = seq_type(points)
bounds_rect = self.draw_aalines(**kwargs)
self.assertEqual(surface.get_at(check_pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aalines__invalid_points_formats(self):
"""Ensures draw aalines handles invalid points formats correctly."""
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"closed": False,
"points": None,
"blend": 1,
}
points_fmts = (
((1, 1), (2,)), # Too few coords.
((1, 1), (2, 2, 2)), # Too many coords.
((1, 1), (2, "2")), # Wrong type.
((1, 1), set([2, 3])), # Wrong type.
((1, 1), dict(((2, 2), (3, 3)))), # Wrong type.
set(((1, 1), (1, 2))), # Wrong type.
dict(((1, 1), (4, 4))),
) # Wrong type.
for points in points_fmts:
kwargs["points"] = points
with self.assertRaises(TypeError):
bounds_rect = self.draw_aalines(**kwargs)
def test_aalines__invalid_points_values(self):
"""Ensures draw aalines handles invalid points values correctly."""
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"closed": False,
"points": None,
"blend": 1,
}
for points in ([], ((1, 1),)): # Too few points.
for seq_type in (tuple, list): # Test as tuples and lists.
kwargs["points"] = seq_type(points)
with self.assertRaises(ValueError):
bounds_rect = self.draw_aalines(**kwargs)
def test_aalines__valid_closed_values(self):
"""Ensures draw aalines accepts different closed values."""
line_color = pygame.Color("blue")
surface_color = pygame.Color("white")
surface = pygame.Surface((5, 5))
pos = (1, 3)
kwargs = {
"surface": surface,
"color": line_color,
"closed": None,
"points": ((1, 1), (4, 1), (4, 4), (1, 4)),
"blend": 0,
}
true_values = (-7, 1, 10, "2", 3.1, (4,), [5], True)
false_values = (None, "", 0, (), [], False)
for closed in true_values + false_values:
surface.fill(surface_color) # Clear for each test.
kwargs["closed"] = closed
expected_color = line_color if closed else surface_color
bounds_rect = self.draw_aalines(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aalines__valid_color_formats(self):
"""Ensures draw aalines accepts different color formats."""
green_color = pygame.Color("green")
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
pos = (1, 1)
kwargs = {
"surface": surface,
"color": None,
"closed": False,
"points": (pos, (2, 1)),
"blend": 0,
}
greens = (
(0, 255, 0),
(0, 255, 0, 255),
surface.map_rgb(green_color),
green_color,
)
for color in greens:
surface.fill(surface_color) # Clear for each test.
kwargs["color"] = color
if isinstance(color, int):
expected_color = surface.unmap_rgb(color)
else:
expected_color = green_color
bounds_rect = self.draw_aalines(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aalines__invalid_color_formats(self):
"""Ensures draw aalines handles invalid color formats correctly."""
kwargs = {
"surface": pygame.Surface((4, 3)),
"color": None,
"closed": False,
"points": ((1, 1), (1, 2)),
"blend": 0,
}
for expected_color in (2.3, self):
kwargs["color"] = expected_color
with self.assertRaises(TypeError):
bounds_rect = self.draw_aalines(**kwargs)
def test_aalines__color(self):
"""Tests if the aalines drawn are the correct color.
Draws aalines around the border of the given surface and checks if all
borders of the surface only contain the given color.
"""
for surface in self._create_surfaces():
for expected_color in self.COLORS:
self.draw_aalines(surface, expected_color, True, corners(surface))
for pos, color in border_pos_and_color(surface):
self.assertEqual(color, expected_color, "pos={}".format(pos))
def test_aalines__gaps(self):
"""Tests if the aalines drawn contain any gaps.
Draws aalines around the border of the given surface and checks if
all borders of the surface contain any gaps.
See: #512
"""
expected_color = (255, 255, 255)
for surface in self._create_surfaces():
self.draw_aalines(surface, expected_color, True, corners(surface))
for pos, color in border_pos_and_color(surface):
self.assertEqual(color, expected_color, "pos={}".format(pos))
def test_aalines__bounding_rect(self):
"""Ensures draw aalines returns the correct bounding rect.
Tests lines with endpoints on and off the surface and blending
enabled and disabled.
"""
line_color = pygame.Color("red")
surf_color = pygame.Color("blue")
width = height = 30
# Using a rect to help manage where the lines are drawn.
pos_rect = pygame.Rect((0, 0), (width, height))
# Testing surfaces of different sizes. One larger than the pos_rect
# and one smaller (to test lines that span the surface).
for size in ((width + 5, height + 5), (width - 5, height - 5)):
surface = pygame.Surface(size, 0, 32)
surf_rect = surface.get_rect()
# Move pos_rect to different positions to test line endpoints on
# and off the surface.
for pos in rect_corners_mids_and_center(surf_rect):
pos_rect.center = pos
# Shape: Triangle (if closed), ^ caret (if not closed).
pts = (pos_rect.midleft, pos_rect.midtop, pos_rect.midright)
pos = pts[0] # Rect position if nothing drawn.
for blend in (False, True): # Test non-blending and blending.
for closed in (True, False):
surface.fill(surf_color) # Clear for each test.
bounding_rect = self.draw_aalines(
surface, line_color, closed, pts, blend
)
# Calculating the expected_rect after the lines are
# drawn (it uses what is actually drawn).
expected_rect = create_bounding_rect(surface, surf_color, pos)
self.assertEqual(bounding_rect, expected_rect)
def test_aalines__surface_clip(self):
"""Ensures draw aalines respects a surface's clip area."""
surfw = surfh = 30
aaline_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
clip_rect = pygame.Rect((0, 0), (11, 11))
clip_rect.center = surface.get_rect().center
pos_rect = clip_rect.copy() # Manages the aalines's pos.
# Test centering the pos_rect along the clip rect's edge to allow for
# drawing the aalines over the clip_rect's bounds.
for center in rect_corners_mids_and_center(clip_rect):
pos_rect.center = center
pts = (pos_rect.midtop, pos_rect.center, pos_rect.midbottom)
for closed in (True, False): # Test closed and not closed.
for blend in (0, 1): # Test non-blending and blending.
# Get the expected points by drawing the aalines without
# the clip area set.
surface.set_clip(None)
surface.fill(surface_color)
self.draw_aalines(surface, aaline_color, closed, pts, blend)
# Need to get the points that are NOT surface_color due to
# the way blend=0 uses the color black to antialias.
expected_pts = get_color_points(
surface, surface_color, clip_rect, False
)
# Clear the surface and set the clip area. Redraw the
# aalines and check that only the clip area is modified.
surface.fill(surface_color)
surface.set_clip(clip_rect)
self.draw_aalines(surface, aaline_color, closed, pts, blend)
surface.lock() # For possible speed up.
# Check all the surface points to ensure the expected_pts
# are not surface_color.
for pt in ((x, y) for x in range(surfw) for y in range(surfh)):
if pt in expected_pts:
self.assertNotEqual(surface.get_at(pt), surface_color, pt)
else:
self.assertEqual(surface.get_at(pt), surface_color, pt)
surface.unlock()
# Commented out to avoid cluttering the test output. Add back in if draw_py
# ever fully supports drawing aalines.
# class PythonDrawAALinesTest(AALinesMixin, PythonDrawTestCase):
# """Test draw_py module function aalines.
#
# This class inherits the general tests from AALinesMixin. It is also the
# class to add any draw_py.draw_aalines specific tests to.
# """
class DrawAALinesTest(AALinesMixin, DrawTestCase):
"""Test draw module function aalines.
This class inherits the general tests from AALinesMixin. It is also the
class to add any draw.aalines specific tests to.
"""
### Polygon Testing ###########################################################
SQUARE = ([0, 0], [3, 0], [3, 3], [0, 3])
DIAMOND = [(1, 3), (3, 5), (5, 3), (3, 1)]
CROSS = (
[2, 0],
[4, 0],
[4, 2],
[6, 2],
[6, 4],
[4, 4],
[4, 6],
[2, 6],
[2, 4],
[0, 4],
[0, 2],
[2, 2],
)
class DrawPolygonMixin(object):
"""Mixin tests for drawing polygons.
This class contains all the general polygon drawing tests.
"""
def setUp(self):
self.surface = pygame.Surface((20, 20))
def test_polygon__args(self):
"""Ensures draw polygon accepts the correct args."""
bounds_rect = self.draw_polygon(
pygame.Surface((3, 3)), (0, 10, 0, 50), ((0, 0), (1, 1), (2, 2)), 1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_polygon__args_without_width(self):
"""Ensures draw polygon accepts the args without a width."""
bounds_rect = self.draw_polygon(
pygame.Surface((2, 2)), (0, 0, 0, 50), ((0, 0), (1, 1), (2, 2))
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_polygon__kwargs(self):
"""Ensures draw polygon accepts the correct kwargs
with and without a width arg.
"""
surface = pygame.Surface((4, 4))
color = pygame.Color("yellow")
points = ((0, 0), (1, 1), (2, 2))
kwargs_list = [
{"surface": surface, "color": color, "points": points, "width": 1},
{"surface": surface, "color": color, "points": points},
]
for kwargs in kwargs_list:
bounds_rect = self.draw_polygon(**kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_polygon__kwargs_order_independent(self):
"""Ensures draw polygon's kwargs are not order dependent."""
bounds_rect = self.draw_polygon(
color=(10, 20, 30),
surface=pygame.Surface((3, 2)),
width=0,
points=((0, 1), (1, 2), (2, 3)),
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_polygon__args_missing(self):
"""Ensures draw polygon detects any missing required args."""
surface = pygame.Surface((1, 1))
color = pygame.Color("blue")
with self.assertRaises(TypeError):
bounds_rect = self.draw_polygon(surface, color)
with self.assertRaises(TypeError):
bounds_rect = self.draw_polygon(surface)
with self.assertRaises(TypeError):
bounds_rect = self.draw_polygon()
def test_polygon__kwargs_missing(self):
"""Ensures draw polygon detects any missing required kwargs."""
kwargs = {
"surface": pygame.Surface((1, 2)),
"color": pygame.Color("red"),
"points": ((2, 1), (2, 2), (2, 3)),
"width": 1,
}
for name in ("points", "color", "surface"):
invalid_kwargs = dict(kwargs)
invalid_kwargs.pop(name) # Pop from a copy.
with self.assertRaises(TypeError):
bounds_rect = self.draw_polygon(**invalid_kwargs)
def test_polygon__arg_invalid_types(self):
"""Ensures draw polygon detects invalid arg types."""
surface = pygame.Surface((2, 2))
color = pygame.Color("blue")
points = ((0, 1), (1, 2), (1, 3))
with self.assertRaises(TypeError):
# Invalid width.
bounds_rect = self.draw_polygon(surface, color, points, "1")
with self.assertRaises(TypeError):
# Invalid points.
bounds_rect = self.draw_polygon(surface, color, (1, 2, 3))
with self.assertRaises(TypeError):
# Invalid color.
bounds_rect = self.draw_polygon(surface, 2.3, points)
with self.assertRaises(TypeError):
# Invalid surface.
bounds_rect = self.draw_polygon((1, 2, 3, 4), color, points)
def test_polygon__kwarg_invalid_types(self):
"""Ensures draw polygon detects invalid kwarg types."""
surface = pygame.Surface((3, 3))
color = pygame.Color("green")
points = ((0, 0), (1, 0), (2, 0))
width = 1
kwargs_list = [
{
"surface": pygame.Surface, # Invalid surface.
"color": color,
"points": points,
"width": width,
},
{
"surface": surface,
"color": 2.3, # Invalid color.
"points": points,
"width": width,
},
{
"surface": surface,
"color": color,
"points": ((1,), (1,), (1,)), # Invalid points.
"width": width,
},
{"surface": surface, "color": color, "points": points, "width": 1.2},
] # Invalid width.
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_polygon(**kwargs)
def test_polygon__kwarg_invalid_name(self):
"""Ensures draw polygon detects invalid kwarg names."""
surface = pygame.Surface((2, 3))
color = pygame.Color("cyan")
points = ((1, 1), (1, 2), (1, 3))
kwargs_list = [
{
"surface": surface,
"color": color,
"points": points,
"width": 1,
"invalid": 1,
},
{"surface": surface, "color": color, "points": points, "invalid": 1},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_polygon(**kwargs)
def test_polygon__args_and_kwargs(self):
"""Ensures draw polygon accepts a combination of args/kwargs"""
surface = pygame.Surface((3, 1))
color = (255, 255, 0, 0)
points = ((0, 1), (1, 2), (2, 3))
width = 0
kwargs = {"surface": surface, "color": color, "points": points, "width": width}
for name in ("surface", "color", "points", "width"):
kwargs.pop(name)
if "surface" == name:
bounds_rect = self.draw_polygon(surface, **kwargs)
elif "color" == name:
bounds_rect = self.draw_polygon(surface, color, **kwargs)
elif "points" == name:
bounds_rect = self.draw_polygon(surface, color, points, **kwargs)
else:
bounds_rect = self.draw_polygon(surface, color, points, width, **kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_polygon__valid_width_values(self):
"""Ensures draw polygon accepts different width values."""
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
color = (10, 20, 30, 255)
kwargs = {
"surface": surface,
"color": color,
"points": ((1, 1), (2, 1), (2, 2), (1, 2)),
"width": None,
}
pos = kwargs["points"][0]
for width in (-100, -10, -1, 0, 1, 10, 100):
surface.fill(surface_color) # Clear for each test.
kwargs["width"] = width
expected_color = color if width >= 0 else surface_color
bounds_rect = self.draw_polygon(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_polygon__valid_points_format(self):
"""Ensures draw polygon accepts different points formats."""
expected_color = (10, 20, 30, 255)
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
kwargs = {
"surface": surface,
"color": expected_color,
"points": None,
"width": 0,
}
# The point type can be a tuple/list/Vector2.
point_types = (
(tuple, tuple, tuple, tuple), # all tuples
(list, list, list, list), # all lists
(Vector2, Vector2, Vector2, Vector2), # all Vector2s
(list, Vector2, tuple, Vector2),
) # mix
# The point values can be ints or floats.
point_values = (
((1, 1), (2, 1), (2, 2), (1, 2)),
((1, 1), (2.2, 1), (2.1, 2.2), (1, 2.1)),
)
# Each sequence of points can be a tuple or a list.
seq_types = (tuple, list)
for point_type in point_types:
for values in point_values:
check_pos = values[0]
points = [point_type[i](pt) for i, pt in enumerate(values)]
for seq_type in seq_types:
surface.fill(surface_color) # Clear for each test.
kwargs["points"] = seq_type(points)
bounds_rect = self.draw_polygon(**kwargs)
self.assertEqual(surface.get_at(check_pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_polygon__invalid_points_formats(self):
"""Ensures draw polygon handles invalid points formats correctly."""
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"points": None,
"width": 0,
}
points_fmts = (
((1, 1), (2, 1), (2,)), # Too few coords.
((1, 1), (2, 1), (2, 2, 2)), # Too many coords.
((1, 1), (2, 1), (2, "2")), # Wrong type.
((1, 1), (2, 1), set([2, 3])), # Wrong type.
((1, 1), (2, 1), dict(((2, 2), (3, 3)))), # Wrong type.
set(((1, 1), (2, 1), (2, 2), (1, 2))), # Wrong type.
dict(((1, 1), (2, 2), (3, 3), (4, 4))),
) # Wrong type.
for points in points_fmts:
kwargs["points"] = points
with self.assertRaises(TypeError):
bounds_rect = self.draw_polygon(**kwargs)
def test_polygon__invalid_points_values(self):
"""Ensures draw polygon handles invalid points values correctly."""
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"points": None,
"width": 0,
}
points_fmts = (
tuple(), # Too few points.
((1, 1),), # Too few points.
((1, 1), (2, 1)),
) # Too few points.
for points in points_fmts:
for seq_type in (tuple, list): # Test as tuples and lists.
kwargs["points"] = seq_type(points)
with self.assertRaises(ValueError):
bounds_rect = self.draw_polygon(**kwargs)
def test_polygon__valid_color_formats(self):
"""Ensures draw polygon accepts different color formats."""
green_color = pygame.Color("green")
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
kwargs = {
"surface": surface,
"color": None,
"points": ((1, 1), (2, 1), (2, 2), (1, 2)),
"width": 0,
}
pos = kwargs["points"][0]
greens = (
(0, 255, 0),
(0, 255, 0, 255),
surface.map_rgb(green_color),
green_color,
)
for color in greens:
surface.fill(surface_color) # Clear for each test.
kwargs["color"] = color
if isinstance(color, int):
expected_color = surface.unmap_rgb(color)
else:
expected_color = green_color
bounds_rect = self.draw_polygon(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_polygon__invalid_color_formats(self):
"""Ensures draw polygon handles invalid color formats correctly."""
kwargs = {
"surface": pygame.Surface((4, 3)),
"color": None,
"points": ((1, 1), (2, 1), (2, 2), (1, 2)),
"width": 0,
}
for expected_color in (2.3, self):
kwargs["color"] = expected_color
with self.assertRaises(TypeError):
bounds_rect = self.draw_polygon(**kwargs)
def test_draw_square(self):
self.draw_polygon(self.surface, RED, SQUARE, 0)
# note : there is a discussion (#234) if draw.polygon should include or
# not the right or lower border; here we stick with current behavior,
# eg include those borders ...
for x in range(4):
for y in range(4):
self.assertEqual(self.surface.get_at((x, y)), RED)
def test_draw_diamond(self):
pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0)
self.draw_polygon(self.surface, GREEN, DIAMOND, 0)
# this diamond shape is equivalent to its four corners, plus inner square
for x, y in DIAMOND:
self.assertEqual(self.surface.get_at((x, y)), GREEN, msg=str((x, y)))
for x in range(2, 5):
for y in range(2, 5):
self.assertEqual(self.surface.get_at((x, y)), GREEN)
def test_1_pixel_high_or_wide_shapes(self):
# 1. one-pixel-high, filled
pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0)
self.draw_polygon(self.surface, GREEN, [(x, 2) for x, _y in CROSS], 0)
cross_size = 6 # the maximum x or y coordinate of the cross
for x in range(cross_size + 1):
self.assertEqual(self.surface.get_at((x, 1)), RED)
self.assertEqual(self.surface.get_at((x, 2)), GREEN)
self.assertEqual(self.surface.get_at((x, 3)), RED)
pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0)
# 2. one-pixel-high, not filled
self.draw_polygon(self.surface, GREEN, [(x, 5) for x, _y in CROSS], 1)
for x in range(cross_size + 1):
self.assertEqual(self.surface.get_at((x, 4)), RED)
self.assertEqual(self.surface.get_at((x, 5)), GREEN)
self.assertEqual(self.surface.get_at((x, 6)), RED)
pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0)
# 3. one-pixel-wide, filled
self.draw_polygon(self.surface, GREEN, [(3, y) for _x, y in CROSS], 0)
for y in range(cross_size + 1):
self.assertEqual(self.surface.get_at((2, y)), RED)
self.assertEqual(self.surface.get_at((3, y)), GREEN)
self.assertEqual(self.surface.get_at((4, y)), RED)
pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0)
# 4. one-pixel-wide, not filled
self.draw_polygon(self.surface, GREEN, [(4, y) for _x, y in CROSS], 1)
for y in range(cross_size + 1):
self.assertEqual(self.surface.get_at((3, y)), RED)
self.assertEqual(self.surface.get_at((4, y)), GREEN)
self.assertEqual(self.surface.get_at((5, y)), RED)
def test_draw_symetric_cross(self):
"""non-regression on issue #234 : x and y where handled inconsistently.
Also, the result is/was different whether we fill or not the polygon.
"""
# 1. case width = 1 (not filled: `polygon` calls internally the `lines` function)
pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0)
self.draw_polygon(self.surface, GREEN, CROSS, 1)
inside = [(x, 3) for x in range(1, 6)] + [(3, y) for y in range(1, 6)]
for x in range(10):
for y in range(10):
if (x, y) in inside:
self.assertEqual(self.surface.get_at((x, y)), RED)
elif (x in range(2, 5) and y < 7) or (y in range(2, 5) and x < 7):
# we are on the border of the cross:
self.assertEqual(self.surface.get_at((x, y)), GREEN)
else:
# we are outside
self.assertEqual(self.surface.get_at((x, y)), RED)
# 2. case width = 0 (filled; this is the example from #234)
pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0)
self.draw_polygon(self.surface, GREEN, CROSS, 0)
inside = [(x, 3) for x in range(1, 6)] + [(3, y) for y in range(1, 6)]
for x in range(10):
for y in range(10):
if (x in range(2, 5) and y < 7) or (y in range(2, 5) and x < 7):
# we are on the border of the cross:
self.assertEqual(
self.surface.get_at((x, y)), GREEN, msg=str((x, y))
)
else:
# we are outside
self.assertEqual(self.surface.get_at((x, y)), RED)
def test_illumine_shape(self):
"""non-regression on issue #313"""
rect = pygame.Rect((0, 0, 20, 20))
path_data = [
(0, 0),
(rect.width - 1, 0), # upper border
(rect.width - 5, 5 - 1),
(5 - 1, 5 - 1), # upper inner
(5 - 1, rect.height - 5),
(0, rect.height - 1),
] # lower diagonal
# The shape looks like this (the numbers are the indices of path_data)
# 0**********************1 <-- upper border
# ***********************
# **********************
# *********************
# ****3**************2 <-- upper inner border
# *****
# ***** (more lines here)
# *****
# ****4
# ****
# ***
# **
# 5
#
# the current bug is that the "upper inner" line is not drawn, but only
# if 4 or some lower corner exists
pygame.draw.rect(self.surface, RED, (0, 0, 20, 20), 0)
# 1. First without the corners 4 & 5
self.draw_polygon(self.surface, GREEN, path_data[:4], 0)
for x in range(20):
self.assertEqual(self.surface.get_at((x, 0)), GREEN) # upper border
for x in range(4, rect.width - 5 + 1):
self.assertEqual(self.surface.get_at((x, 4)), GREEN) # upper inner
# 2. with the corners 4 & 5
pygame.draw.rect(self.surface, RED, (0, 0, 20, 20), 0)
self.draw_polygon(self.surface, GREEN, path_data, 0)
for x in range(4, rect.width - 5 + 1):
self.assertEqual(self.surface.get_at((x, 4)), GREEN) # upper inner
def test_invalid_points(self):
self.assertRaises(
TypeError,
lambda: self.draw_polygon(
self.surface, RED, ((0, 0), (0, 20), (20, 20), 20), 0
),
)
def test_polygon__bounding_rect(self):
"""Ensures draw polygon returns the correct bounding rect.
Tests polygons on and off the surface and a range of width/thickness
values.
"""
polygon_color = pygame.Color("red")
surf_color = pygame.Color("black")
min_width = min_height = 5
max_width = max_height = 7
sizes = ((min_width, min_height), (max_width, max_height))
surface = pygame.Surface((20, 20), 0, 32)
surf_rect = surface.get_rect()
# Make a rect that is bigger than the surface to help test drawing
# polygons off and partially off the surface.
big_rect = surf_rect.inflate(min_width * 2 + 1, min_height * 2 + 1)
for pos in rect_corners_mids_and_center(
surf_rect
) + rect_corners_mids_and_center(big_rect):
# A rect (pos_rect) is used to help create and position the
# polygon. Each of this rect's position attributes will be set to
# the pos value.
for attr in RECT_POSITION_ATTRIBUTES:
# Test using different rect sizes and thickness values.
for width, height in sizes:
pos_rect = pygame.Rect((0, 0), (width, height))
setattr(pos_rect, attr, pos)
# Points form a triangle with no fully
# horizontal/vertical lines.
vertices = (
pos_rect.midleft,
pos_rect.midtop,
pos_rect.bottomright,
)
for thickness in range(4):
surface.fill(surf_color) # Clear for each test.
bounding_rect = self.draw_polygon(
surface, polygon_color, vertices, thickness
)
# Calculating the expected_rect after the polygon
# is drawn (it uses what is actually drawn).
expected_rect = create_bounding_rect(
surface, surf_color, vertices[0]
)
self.assertEqual(
bounding_rect,
expected_rect,
"thickness={}".format(thickness),
)
def test_polygon__surface_clip(self):
"""Ensures draw polygon respects a surface's clip area.
Tests drawing the polygon filled and unfilled.
"""
surfw = surfh = 30
polygon_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
clip_rect = pygame.Rect((0, 0), (8, 10))
clip_rect.center = surface.get_rect().center
pos_rect = clip_rect.copy() # Manages the polygon's pos.
for width in (0, 1): # Filled and unfilled.
# Test centering the polygon along the clip rect's edge.
for center in rect_corners_mids_and_center(clip_rect):
# Get the expected points by drawing the polygon without the
# clip area set.
pos_rect.center = center
vertices = (
pos_rect.topleft,
pos_rect.topright,
pos_rect.bottomright,
pos_rect.bottomleft,
)
surface.set_clip(None)
surface.fill(surface_color)
self.draw_polygon(surface, polygon_color, vertices, width)
expected_pts = get_color_points(surface, polygon_color, clip_rect)
# Clear the surface and set the clip area. Redraw the polygon
# and check that only the clip area is modified.
surface.fill(surface_color)
surface.set_clip(clip_rect)
self.draw_polygon(surface, polygon_color, vertices, width)
surface.lock() # For possible speed up.
# Check all the surface points to ensure only the expected_pts
# are the polygon_color.
for pt in ((x, y) for x in range(surfw) for y in range(surfh)):
if pt in expected_pts:
expected_color = polygon_color
else:
expected_color = surface_color
self.assertEqual(surface.get_at(pt), expected_color, pt)
surface.unlock()
class DrawPolygonTest(DrawPolygonMixin, DrawTestCase):
"""Test draw module function polygon.
This class inherits the general tests from DrawPolygonMixin. It is also
the class to add any draw.polygon specific tests to.
"""
# Commented out to avoid cluttering the test output. Add back in if draw_py
# ever fully supports drawing polygons.
# @unittest.skip('draw_py.draw_polygon not fully supported yet')
# class PythonDrawPolygonTest(DrawPolygonMixin, PythonDrawTestCase):
# """Test draw_py module function draw_polygon.
#
# This class inherits the general tests from DrawPolygonMixin. It is also
# the class to add any draw_py.draw_polygon specific tests to.
# """
### Rect Testing ##############################################################
class DrawRectMixin(object):
"""Mixin tests for drawing rects.
This class contains all the general rect drawing tests.
"""
def test_rect__args(self):
"""Ensures draw rect accepts the correct args."""
bounds_rect = self.draw_rect(
pygame.Surface((2, 2)), (20, 10, 20, 150), pygame.Rect((0, 0), (1, 1)), 2, 1, 2, 3, 4, 5
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_rect__args_without_width(self):
"""Ensures draw rect accepts the args without a width and borders."""
bounds_rect = self.draw_rect(
pygame.Surface((3, 5)), (0, 0, 0, 255), pygame.Rect((0, 0), (1, 1))
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_rect__kwargs(self):
"""Ensures draw rect accepts the correct kwargs
with and without a width and border_radius arg.
"""
kwargs_list = [
{
"surface": pygame.Surface((5, 5)),
"color": pygame.Color("red"),
"rect": pygame.Rect((0, 0), (1, 2)),
"width": 1,
"border_radius": 10,
"border_top_left_radius": 5,
"border_top_right_radius": 20,
"border_bottom_left_radius": 15,
"border_bottom_right_radius": 0
},
{
"surface": pygame.Surface((1, 2)),
"color": (0, 100, 200),
"rect": (0, 0, 1, 1),
},
]
for kwargs in kwargs_list:
bounds_rect = self.draw_rect(**kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_rect__kwargs_order_independent(self):
"""Ensures draw rect's kwargs are not order dependent."""
bounds_rect = self.draw_rect(
color=(0, 1, 2),
border_radius=10,
surface=pygame.Surface((2, 3)),
border_top_left_radius=5,
width=-2,
border_top_right_radius=20,
border_bottom_right_radius=0,
rect=pygame.Rect((0, 0), (0, 0)),
border_bottom_left_radius=15,
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_rect__args_missing(self):
"""Ensures draw rect detects any missing required args."""
surface = pygame.Surface((1, 1))
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(surface, pygame.Color("white"))
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(surface)
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect()
def test_rect__kwargs_missing(self):
"""Ensures draw rect detects any missing required kwargs."""
kwargs = {
"surface": pygame.Surface((1, 3)),
"color": pygame.Color("red"),
"rect": pygame.Rect((0, 0), (2, 2)),
"width": 5,
"border_radius": 10,
"border_top_left_radius": 5,
"border_top_right_radius": 20,
"border_bottom_left_radius": 15,
"border_bottom_right_radius": 0
}
for name in ("rect", "color", "surface"):
invalid_kwargs = dict(kwargs)
invalid_kwargs.pop(name) # Pop from a copy.
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(**invalid_kwargs)
def test_rect__arg_invalid_types(self):
"""Ensures draw rect detects invalid arg types."""
surface = pygame.Surface((3, 3))
color = pygame.Color("white")
rect = pygame.Rect((1, 1), (1, 1))
with self.assertRaises(TypeError):
# Invalid border_bottom_right_radius.
bounds_rect = self.draw_rect(surface, color, rect, 2,
border_bottom_right_radius="rad")
with self.assertRaises(TypeError):
# Invalid border_bottom_left_radius.
bounds_rect = self.draw_rect(surface, color, rect, 2,
border_bottom_left_radius="rad")
with self.assertRaises(TypeError):
# Invalid border_top_right_radius.
bounds_rect = self.draw_rect(surface, color, rect, 2,
border_top_right_radius="rad")
with self.assertRaises(TypeError):
# Invalid border_top_left_radius.
bounds_rect = self.draw_rect(surface, color, rect, 2,
border_top_left_radius="draw")
with self.assertRaises(TypeError):
# Invalid border_radius.
bounds_rect = self.draw_rect(surface, color, rect, 2, "rad")
with self.assertRaises(TypeError):
# Invalid width.
bounds_rect = self.draw_rect(surface, color, rect, "2", 4)
with self.assertRaises(TypeError):
# Invalid rect.
bounds_rect = self.draw_rect(surface, color, (1, 2, 3), 2, 6)
with self.assertRaises(TypeError):
# Invalid color.
bounds_rect = self.draw_rect(surface, 2.3, rect, 3, 8)
with self.assertRaises(TypeError):
# Invalid surface.
bounds_rect = self.draw_rect(rect, color, rect, 4, 10)
def test_rect__kwarg_invalid_types(self):
"""Ensures draw rect detects invalid kwarg types."""
surface = pygame.Surface((2, 3))
color = pygame.Color("red")
rect = pygame.Rect((0, 0), (1, 1))
kwargs_list = [
{
"surface": pygame.Surface, # Invalid surface.
"color": color,
"rect": rect,
"width": 1,
"border_radius": 10,
"border_top_left_radius": 5,
"border_top_right_radius": 20,
"border_bottom_left_radius": 15,
"border_bottom_right_radius": 0
},
{
"surface": surface,
"color": 2.3, # Invalid color.
"rect": rect,
"width": 1,
"border_radius": 10,
"border_top_left_radius": 5,
"border_top_right_radius": 20,
"border_bottom_left_radius": 15,
"border_bottom_right_radius": 0
},
{
"surface": surface,
"color": color,
"rect": (1, 1, 2), # Invalid rect.
"width": 1,
"border_radius": 10,
"border_top_left_radius": 5,
"border_top_right_radius": 20,
"border_bottom_left_radius": 15,
"border_bottom_right_radius": 0
},
{
"surface": surface,
"color": color,
"rect": rect,
"width": 1.1, # Invalid width.
"border_radius": 10,
"border_top_left_radius": 5,
"border_top_right_radius": 20,
"border_bottom_left_radius": 15,
"border_bottom_right_radius": 0
},
{
"surface": surface,
"color": color,
"rect": rect,
"width": 1,
"border_radius": 10.5, # Invalid border_radius.
"border_top_left_radius": 5,
"border_top_right_radius": 20,
"border_bottom_left_radius": 15,
"border_bottom_right_radius": 0
},
{
"surface": surface,
"color": color,
"rect": rect,
"width": 1,
"border_radius": 10,
"border_top_left_radius": 5.5, # Invalid top_left_radius.
"border_top_right_radius": 20,
"border_bottom_left_radius": 15,
"border_bottom_right_radius": 0
},
{
"surface": surface,
"color": color,
"rect": rect,
"width": 1,
"border_radius": 10,
"border_top_left_radius": 5,
"border_top_right_radius": "a", # Invalid top_right_radius.
"border_bottom_left_radius": 15,
"border_bottom_right_radius": 0
},
{
"surface": surface,
"color": color,
"rect": rect,
"width": 1,
"border_radius": 10,
"border_top_left_radius": 5,
"border_top_right_radius": 20,
"border_bottom_left_radius": "c", # Invalid bottom_left_radius
"border_bottom_right_radius": 0
},
{
"surface": surface,
"color": color,
"rect": rect,
"width": 1,
"border_radius": 10,
"border_top_left_radius": 5,
"border_top_right_radius": 20,
"border_bottom_left_radius": 15,
"border_bottom_right_radius": "d" # Invalid bottom_right.
},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(**kwargs)
def test_rect__kwarg_invalid_name(self):
"""Ensures draw rect detects invalid kwarg names."""
surface = pygame.Surface((2, 1))
color = pygame.Color("green")
rect = pygame.Rect((0, 0), (3, 3))
kwargs_list = [
{
"surface": surface,
"color": color,
"rect": rect,
"width": 1,
"border_radius": 10,
"border_top_left_radius": 5,
"border_top_right_radius": 20,
"border_bottom_left_radius": 15,
"border_bottom_right_radius": 0,
"invalid": 1,
},
{"surface": surface, "color": color, "rect": rect, "invalid": 1},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(**kwargs)
def test_rect__args_and_kwargs(self):
"""Ensures draw rect accepts a combination of args/kwargs"""
surface = pygame.Surface((3, 1))
color = (255, 255, 255, 0)
rect = pygame.Rect((1, 0), (2, 5))
width = 0
kwargs = {"surface": surface, "color": color, "rect": rect,
"width": width}
for name in ("surface", "color", "rect", "width"):
kwargs.pop(name)
if "surface" == name:
bounds_rect = self.draw_rect(surface, **kwargs)
elif "color" == name:
bounds_rect = self.draw_rect(surface, color, **kwargs)
elif "rect" == name:
bounds_rect = self.draw_rect(surface, color, rect, **kwargs)
else:
bounds_rect = self.draw_rect(surface, color, rect, width, **kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_rect__valid_width_values(self):
"""Ensures draw rect accepts different width values."""
pos = (1, 1)
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
color = (1, 2, 3, 255)
kwargs = {
"surface": surface,
"color": color,
"rect": pygame.Rect(pos, (2, 2)),
"width": None,
}
for width in (-1000, -10, -1, 0, 1, 10, 1000):
surface.fill(surface_color) # Clear for each test.
kwargs["width"] = width
expected_color = color if width >= 0 else surface_color
bounds_rect = self.draw_rect(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_rect__valid_rect_formats(self):
"""Ensures draw rect accepts different rect formats."""
pos = (1, 1)
expected_color = pygame.Color("yellow")
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
kwargs = {"surface": surface, "color": expected_color, "rect": None, "width": 0}
rects = (
pygame.Rect(pos, (1, 1)),
(pos, (2, 2)),
(pos[0], pos[1], 3, 3),
[pos, (2.1, 2.2)],
)
for rect in rects:
surface.fill(surface_color) # Clear for each test.
kwargs["rect"] = rect
bounds_rect = self.draw_rect(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_rect__invalid_rect_formats(self):
"""Ensures draw rect handles invalid rect formats correctly."""
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"rect": None,
"width": 0,
}
invalid_fmts = (
[],
[1],
[1, 2],
[1, 2, 3],
[1, 2, 3, 4, 5],
set([1, 2, 3, 4]),
[1, 2, 3, "4"],
)
for rect in invalid_fmts:
kwargs["rect"] = rect
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(**kwargs)
def test_rect__valid_color_formats(self):
"""Ensures draw rect accepts different color formats."""
pos = (1, 1)
red_color = pygame.Color("red")
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
kwargs = {
"surface": surface,
"color": None,
"rect": pygame.Rect(pos, (1, 1)),
"width": 3,
}
reds = ((255, 0, 0), (255, 0, 0, 255), surface.map_rgb(red_color), red_color)
for color in reds:
surface.fill(surface_color) # Clear for each test.
kwargs["color"] = color
if isinstance(color, int):
expected_color = surface.unmap_rgb(color)
else:
expected_color = red_color
bounds_rect = self.draw_rect(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_rect__invalid_color_formats(self):
"""Ensures draw rect handles invalid color formats correctly."""
pos = (1, 1)
surface = pygame.Surface((3, 4))
kwargs = {
"surface": surface,
"color": None,
"rect": pygame.Rect(pos, (1, 1)),
"width": 1,
}
for expected_color in (2.3, self):
kwargs["color"] = expected_color
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(**kwargs)
def test_rect__fill(self):
self.surf_w, self.surf_h = self.surf_size = (320, 200)
self.surf = pygame.Surface(self.surf_size, pygame.SRCALPHA)
self.color = (1, 13, 24, 205)
rect = pygame.Rect(10, 10, 25, 20)
drawn = self.draw_rect(self.surf, self.color, rect, 0)
self.assertEqual(drawn, rect)
# Should be colored where it's supposed to be
for pt in test_utils.rect_area_pts(rect):
color_at_pt = self.surf.get_at(pt)
self.assertEqual(color_at_pt, self.color)
# And not where it shouldn't
for pt in test_utils.rect_outer_bounds(rect):
color_at_pt = self.surf.get_at(pt)
self.assertNotEqual(color_at_pt, self.color)
# Issue #310: Cannot draw rectangles that are 1 pixel high
bgcolor = pygame.Color("black")
self.surf.fill(bgcolor)
hrect = pygame.Rect(1, 1, self.surf_w - 2, 1)
vrect = pygame.Rect(1, 3, 1, self.surf_h - 4)
drawn = self.draw_rect(self.surf, self.color, hrect, 0)
self.assertEqual(drawn, hrect)
x, y = hrect.topleft
w, h = hrect.size
self.assertEqual(self.surf.get_at((x - 1, y)), bgcolor)
self.assertEqual(self.surf.get_at((x + w, y)), bgcolor)
for i in range(x, x + w):
self.assertEqual(self.surf.get_at((i, y)), self.color)
drawn = self.draw_rect(self.surf, self.color, vrect, 0)
self.assertEqual(drawn, vrect)
x, y = vrect.topleft
w, h = vrect.size
self.assertEqual(self.surf.get_at((x, y - 1)), bgcolor)
self.assertEqual(self.surf.get_at((x, y + h)), bgcolor)
for i in range(y, y + h):
self.assertEqual(self.surf.get_at((x, i)), self.color)
def test_rect__one_pixel_lines(self):
self.surf = pygame.Surface((320, 200), pygame.SRCALPHA)
self.color = (1, 13, 24, 205)
rect = pygame.Rect(10, 10, 56, 20)
drawn = self.draw_rect(self.surf, self.color, rect, 1)
self.assertEqual(drawn, rect)
# Should be colored where it's supposed to be
for pt in test_utils.rect_perimeter_pts(drawn):
color_at_pt = self.surf.get_at(pt)
self.assertEqual(color_at_pt, self.color)
# And not where it shouldn't
for pt in test_utils.rect_outer_bounds(drawn):
color_at_pt = self.surf.get_at(pt)
self.assertNotEqual(color_at_pt, self.color)
def test_rect__bounding_rect(self):
"""Ensures draw rect returns the correct bounding rect.
Tests rects on and off the surface and a range of width/thickness
values.
"""
rect_color = pygame.Color("red")
surf_color = pygame.Color("black")
min_width = min_height = 5
max_width = max_height = 7
sizes = ((min_width, min_height), (max_width, max_height))
surface = pygame.Surface((20, 20), 0, 32)
surf_rect = surface.get_rect()
# Make a rect that is bigger than the surface to help test drawing
# rects off and partially off the surface.
big_rect = surf_rect.inflate(min_width * 2 + 1, min_height * 2 + 1)
for pos in rect_corners_mids_and_center(
surf_rect
) + rect_corners_mids_and_center(big_rect):
# Each of the rect's position attributes will be set to the pos
# value.
for attr in RECT_POSITION_ATTRIBUTES:
# Test using different rect sizes and thickness values.
for width, height in sizes:
rect = pygame.Rect((0, 0), (width, height))
setattr(rect, attr, pos)
for thickness in range(4):
surface.fill(surf_color) # Clear for each test.
bounding_rect = self.draw_rect(
surface, rect_color, rect, thickness
)
# Calculating the expected_rect after the rect is
# drawn (it uses what is actually drawn).
expected_rect = create_bounding_rect(
surface, surf_color, rect.topleft
)
self.assertEqual(
bounding_rect,
expected_rect,
"thickness={}".format(thickness),
)
def test_rect__surface_clip(self):
"""Ensures draw rect respects a surface's clip area.
Tests drawing the rect filled and unfilled.
"""
surfw = surfh = 30
rect_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
clip_rect = pygame.Rect((0, 0), (8, 10))
clip_rect.center = surface.get_rect().center
test_rect = clip_rect.copy() # Manages the rect's pos.
for width in (0, 1): # Filled and unfilled.
# Test centering the rect along the clip rect's edge.
for center in rect_corners_mids_and_center(clip_rect):
# Get the expected points by drawing the rect without the
# clip area set.
test_rect.center = center
surface.set_clip(None)
surface.fill(surface_color)
self.draw_rect(surface, rect_color, test_rect, width)
expected_pts = get_color_points(surface, rect_color, clip_rect)
# Clear the surface and set the clip area. Redraw the rect
# and check that only the clip area is modified.
surface.fill(surface_color)
surface.set_clip(clip_rect)
self.draw_rect(surface, rect_color, test_rect, width)
surface.lock() # For possible speed up.
# Check all the surface points to ensure only the expected_pts
# are the rect_color.
for pt in ((x, y) for x in range(surfw) for y in range(surfh)):
if pt in expected_pts:
expected_color = rect_color
else:
expected_color = surface_color
self.assertEqual(surface.get_at(pt), expected_color, pt)
surface.unlock()
class DrawRectTest(DrawRectMixin, DrawTestCase):
"""Test draw module function rect.
This class inherits the general tests from DrawRectMixin. It is also the
class to add any draw.rect specific tests to.
"""
# Commented out to avoid cluttering the test output. Add back in if draw_py
# ever properly supports drawing rects.
# @unittest.skip('draw_py.draw_rect not supported yet')
# class PythonDrawRectTest(DrawRectMixin, PythonDrawTestCase):
# """Test draw_py module function draw_rect.
#
# This class inherits the general tests from DrawRectMixin. It is also the
# class to add any draw_py.draw_rect specific tests to.
# """
### Circle Testing ############################################################
class DrawCircleMixin(object):
"""Mixin tests for drawing circles.
This class contains all the general circle drawing tests.
"""
def test_circle__args(self):
"""Ensures draw circle accepts the correct args."""
bounds_rect = self.draw_circle(
pygame.Surface((3, 3)), (0, 10, 0, 50), (0, 0), 3, 1, 1, 0, 1, 1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_circle__args_without_width(self):
"""Ensures draw circle accepts the args without a width and
quadrants. """
bounds_rect = self.draw_circle(pygame.Surface((2, 2)), (0, 0, 0, 50),
(1, 1), 1)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_circle__args_with_negative_width(self):
"""Ensures draw circle accepts the args with negative width."""
bounds_rect = self.draw_circle(
pygame.Surface((2, 2)), (0, 0, 0, 50), (1, 1), 1, -1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
self.assertEqual(bounds_rect, pygame.Rect(1, 1, 0, 0))
def test_circle__args_with_width_gt_radius(self):
"""Ensures draw circle accepts the args with width > radius."""
bounds_rect = self.draw_circle(
pygame.Surface((2, 2)), (0, 0, 0, 50), (1, 1), 2, 3, 0, 0, 0, 0
)
self.assertIsInstance(bounds_rect, pygame.Rect)
self.assertEqual(bounds_rect, pygame.Rect(0, 0, 2, 2))
def test_circle__kwargs(self):
"""Ensures draw circle accepts the correct kwargs
with and without a width and quadrant arguments.
"""
kwargs_list = [
{
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("yellow"),
"center": (2, 2),
"radius": 2,
"width": 1,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": False,
"draw_bottom_right": True
},
{
"surface": pygame.Surface((2, 1)),
"color": (0, 10, 20),
"center": (1, 1),
"radius": 1,
},
]
for kwargs in kwargs_list:
bounds_rect = self.draw_circle(**kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_circle__kwargs_order_independent(self):
"""Ensures draw circle's kwargs are not order dependent."""
bounds_rect = self.draw_circle(
draw_top_right=False,
color=(10, 20, 30),
surface=pygame.Surface((3, 2)),
width=0,
draw_bottom_left=False,
center=(1, 0),
draw_bottom_right=False,
radius=2,
draw_top_left=True,
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_circle__args_missing(self):
"""Ensures draw circle detects any missing required args."""
surface = pygame.Surface((1, 1))
color = pygame.Color("blue")
with self.assertRaises(TypeError):
bounds_rect = self.draw_circle(surface, color, (0, 0))
with self.assertRaises(TypeError):
bounds_rect = self.draw_circle(surface, color)
with self.assertRaises(TypeError):
bounds_rect = self.draw_circle(surface)
with self.assertRaises(TypeError):
bounds_rect = self.draw_circle()
def test_circle__kwargs_missing(self):
"""Ensures draw circle detects any missing required kwargs."""
kwargs = {
"surface": pygame.Surface((1, 2)),
"color": pygame.Color("red"),
"center": (1, 0),
"radius": 2,
"width": 1,
"draw_top_right": False,
"draw_top_left": False,
"draw_bottom_left": False,
"draw_bottom_right": True
}
for name in ("radius", "center", "color", "surface"):
invalid_kwargs = dict(kwargs)
invalid_kwargs.pop(name) # Pop from a copy.
with self.assertRaises(TypeError):
bounds_rect = self.draw_circle(**invalid_kwargs)
def test_circle__arg_invalid_types(self):
"""Ensures draw circle detects invalid arg types."""
surface = pygame.Surface((2, 2))
color = pygame.Color("blue")
center = (1, 1)
radius = 1
with self.assertRaises(TypeError):
# Invalid draw_top_right.
bounds_rect = self.draw_circle(surface, color, center, radius, 1,
"a", 1, 1, 1)
with self.assertRaises(TypeError):
# Invalid draw_top_left.
bounds_rect = self.draw_circle(surface, color, center, radius, 1,
1, "b", 1, 1)
with self.assertRaises(TypeError):
# Invalid draw_bottom_left.
bounds_rect = self.draw_circle(surface, color, center, radius, 1,
1, 1, "c", 1)
with self.assertRaises(TypeError):
# Invalid draw_bottom_right.
bounds_rect = self.draw_circle(surface, color, center, radius, 1,
1, 1, 1, "d")
with self.assertRaises(TypeError):
# Invalid width.
bounds_rect = self.draw_circle(surface, color, center, radius, "1")
with self.assertRaises(TypeError):
# Invalid radius.
bounds_rect = self.draw_circle(surface, color, center, "2")
with self.assertRaises(TypeError):
# Invalid center.
bounds_rect = self.draw_circle(surface, color, (1, 2, 3), radius)
with self.assertRaises(TypeError):
# Invalid color.
bounds_rect = self.draw_circle(surface, 2.3, center, radius)
with self.assertRaises(TypeError):
# Invalid surface.
bounds_rect = self.draw_circle((1, 2, 3, 4), color, center, radius)
def test_circle__kwarg_invalid_types(self):
"""Ensures draw circle detects invalid kwarg types."""
surface = pygame.Surface((3, 3))
color = pygame.Color("green")
center = (0, 1)
radius = 1
width = 1
quadrant = 1
kwargs_list = [
{
"surface": pygame.Surface, # Invalid surface.
"color": color,
"center": center,
"radius": radius,
"width": width,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
},
{
"surface": surface,
"color": 2.3, # Invalid color.
"center": center,
"radius": radius,
"width": width,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
},
{
"surface": surface,
"color": color,
"center": (1, 1, 1), # Invalid center.
"radius": radius,
"width": width,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
},
{
"surface": surface,
"color": color,
"center": center,
"radius": "1", # Invalid radius.
"width": width,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
},
{
"surface": surface,
"color": color,
"center": center,
"radius": radius,
"width": 1.2, # Invalid width.
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
},
{
"surface": surface,
"color": color,
"center": center,
"radius": radius,
"width": width,
"draw_top_right": "True", # Invalid draw_top_right
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
},
{
"surface": surface,
"color": color,
"center": center,
"radius": radius,
"width": width,
"draw_top_right": True,
"draw_top_left": 'True', # Invalid draw_top_left
"draw_bottom_left": True,
"draw_bottom_right": True
},
{
"surface": surface,
"color": color,
"center": center,
"radius": radius,
"width": width,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": 3.14, # Invalid draw_bottom_left
"draw_bottom_right": True
},
{
"surface": surface,
"color": color,
"center": center,
"radius": radius,
"width": width,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": "quadrant" # Invalid draw_bottom_right
},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_circle(**kwargs)
def test_circle__kwarg_invalid_name(self):
"""Ensures draw circle detects invalid kwarg names."""
surface = pygame.Surface((2, 3))
color = pygame.Color("cyan")
center = (0, 0)
radius = 2
kwargs_list = [
{
"surface": surface,
"color": color,
"center": center,
"radius": radius,
"width": 1,
"quadrant": 1,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
},
{
"surface": surface,
"color": color,
"center": center,
"radius": radius,
"invalid": 1,
},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_circle(**kwargs)
def test_circle__args_and_kwargs(self):
"""Ensures draw circle accepts a combination of args/kwargs"""
surface = pygame.Surface((3, 1))
color = (255, 255, 0, 0)
center = (1, 0)
radius = 2
width = 0
draw_top_right = True
draw_top_left = False
draw_bottom_left = False
draw_bottom_right = True
kwargs = {
"surface": surface,
"color": color,
"center": center,
"radius": radius,
"width": width,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
}
for name in ("surface", "color", "center", "radius", "width",
"draw_top_right", "draw_top_left", "draw_bottom_left",
"draw_bottom_right"):
kwargs.pop(name)
if "surface" == name:
bounds_rect = self.draw_circle(surface, **kwargs)
elif "color" == name:
bounds_rect = self.draw_circle(surface, color, **kwargs)
elif "center" == name:
bounds_rect = self.draw_circle(surface, color, center, **kwargs)
elif "radius" == name:
bounds_rect = self.draw_circle(surface, color, center, radius, **kwargs)
elif "width" == name:
bounds_rect = self.draw_circle(
surface, color, center, radius, width, **kwargs
)
elif "draw_top_right" == name:
bounds_rect = self.draw_circle(
surface, color, center, radius, width, draw_top_right, **kwargs
)
elif "draw_top_left" == name:
bounds_rect = self.draw_circle(
surface, color, center, radius, width, draw_top_right, draw_top_left, **kwargs
)
elif "draw_bottom_left" == name:
bounds_rect = self.draw_circle(
surface, color, center, radius, width, draw_top_right, draw_top_left, draw_bottom_left, **kwargs
)
else:
bounds_rect = self.draw_circle(
surface, color, center, radius, width, draw_top_right, draw_top_left, draw_bottom_left, draw_bottom_right, **kwargs
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_circle__valid_width_values(self):
"""Ensures draw circle accepts different width values."""
center = (2, 2)
radius = 1
pos = (center[0] - radius, center[1])
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
color = (10, 20, 30, 255)
kwargs = {
"surface": surface,
"color": color,
"center": center,
"radius": radius,
"width": None,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
}
for width in (-100, -10, -1, 0, 1, 10, 100):
surface.fill(surface_color) # Clear for each test.
kwargs["width"] = width
expected_color = color if width >= 0 else surface_color
bounds_rect = self.draw_circle(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_circle__valid_radius_values(self):
"""Ensures draw circle accepts different radius values."""
pos = center = (2, 2)
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
color = (10, 20, 30, 255)
kwargs = {
"surface": surface,
"color": color,
"center": center,
"radius": None,
"width": 0,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
}
for radius in (-10, -1, 0, 1, 10):
surface.fill(surface_color) # Clear for each test.
kwargs["radius"] = radius
expected_color = color if radius > 0 else surface_color
bounds_rect = self.draw_circle(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_circle__valid_center_formats(self):
"""Ensures draw circle accepts different center formats."""
expected_color = pygame.Color("red")
surface_color = pygame.Color("black")
surface = pygame.Surface((4, 4))
kwargs = {
"surface": surface,
"color": expected_color,
"center": None,
"radius": 1,
"width": 0,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
}
x, y = 2, 2 # center position
# The center values can be ints or floats.
for center in ((x, y), (x + 0.1, y), (x, y + 0.1), (x + 0.1, y + 0.1)):
# The center type can be a tuple/list/Vector2.
for seq_type in (tuple, list, Vector2):
surface.fill(surface_color) # Clear for each test.
kwargs["center"] = seq_type(center)
bounds_rect = self.draw_circle(**kwargs)
self.assertEqual(surface.get_at((x, y)), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_circle__valid_color_formats(self):
"""Ensures draw circle accepts different color formats."""
center = (2, 2)
radius = 1
pos = (center[0] - radius, center[1])
green_color = pygame.Color("green")
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
kwargs = {
"surface": surface,
"color": None,
"center": center,
"radius": radius,
"width": 0,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
}
greens = (
(0, 255, 0),
(0, 255, 0, 255),
surface.map_rgb(green_color),
green_color,
)
for color in greens:
surface.fill(surface_color) # Clear for each test.
kwargs["color"] = color
if isinstance(color, int):
expected_color = surface.unmap_rgb(color)
else:
expected_color = green_color
bounds_rect = self.draw_circle(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_circle__invalid_color_formats(self):
"""Ensures draw circle handles invalid color formats correctly."""
kwargs = {
"surface": pygame.Surface((4, 3)),
"color": None,
"center": (1, 2),
"radius": 1,
"width": 0,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
}
for expected_color in (2.3, self):
kwargs["color"] = expected_color
with self.assertRaises(TypeError):
bounds_rect = self.draw_circle(**kwargs)
def test_circle__floats(self):
"""Ensure that floats are accepted."""
draw.circle(
surface=pygame.Surface((4, 4)),
color=(255, 255, 127),
center=(1.5, 1.5),
radius=1.3,
width=0,
draw_top_right=True,
draw_top_left=True,
draw_bottom_left=True,
draw_bottom_right=True
)
draw.circle(
surface=pygame.Surface((4, 4)),
color=(255, 255, 127),
center=Vector2(1.5, 1.5),
radius=1.3,
width=0,
draw_top_right=True,
draw_top_left=True,
draw_bottom_left=True,
draw_bottom_right=True
)
draw.circle(pygame.Surface((2, 2)), (0, 0, 0, 50), (1.3, 1.3), 1.2)
# def test_circle_clip(self):
# """ maybe useful to help work out circle clip algorithm."""
# MAX = max
# MIN = min
# posx=30
# posy=15
# radius=1
# l=29
# t=14
# r=30
# b=16
# clip_rect_x=0
# clip_rect_y=0
# clip_rect_w=30
# clip_rect_h=30
# l = MAX(posx - radius, clip_rect_x)
# t = MAX(posy - radius, clip_rect_y)
# r = MIN(posx + radius, clip_rect_x + clip_rect_w)
# b = MIN(posy + radius, clip_rect_y + clip_rect_h)
# l, t, MAX(r - l, 0), MAX(b - t, 0)
def test_circle__bounding_rect(self):
"""Ensures draw circle returns the correct bounding rect.
Tests circles on and off the surface and a range of width/thickness
values.
"""
circle_color = pygame.Color("red")
surf_color = pygame.Color("black")
max_radius = 3
surface = pygame.Surface((30, 30), 0, 32)
surf_rect = surface.get_rect()
# Make a rect that is bigger than the surface to help test drawing
# circles off and partially off the surface. Make this rect such that
# when centering the test circle on one of its corners, the circle is
# drawn fully off the test surface, but a rect bounding the circle
# would still overlap with the test surface.
big_rect = surf_rect.inflate(max_radius * 2 - 1, max_radius * 2 - 1)
for pos in rect_corners_mids_and_center(
surf_rect
) + rect_corners_mids_and_center(big_rect):
# Test using different radius and thickness values.
for radius in range(max_radius + 1):
for thickness in range(radius + 1):
surface.fill(surf_color) # Clear for each test.
bounding_rect = self.draw_circle(
surface, circle_color, pos, radius, thickness
)
# Calculating the expected_rect after the circle is
# drawn (it uses what is actually drawn).
expected_rect = create_bounding_rect(surface, surf_color, pos)
# print("pos:%s:, radius:%s:, thickness:%s:" % (pos, radius, thickness))
self.assertEqual(bounding_rect, expected_rect)
def test_circle_negative_radius(self):
""" Ensures negative radius circles return zero sized bounding rect.
"""
surf = pygame.Surface((200, 200))
color = (0, 0, 0, 50)
center = surf.get_height() // 2, surf.get_height() // 2
bounding_rect = self.draw_circle(surf, color, center, radius=-1, width=1)
self.assertEqual(bounding_rect.size, (0, 0))
def test_circle_zero_radius(self):
""" Ensures zero radius circles does not draw a center pixel.
NOTE: This is backwards incompatible behaviour with 1.9.x.
"""
surf = pygame.Surface((200, 200))
circle_color = pygame.Color("red")
surf_color = pygame.Color("black")
surf.fill((0, 0, 0))
center = (100, 100)
radius = 0
width = 1
bounding_rect = self.draw_circle(surf, circle_color, center, radius, width)
expected_rect = create_bounding_rect(surf, surf_color, center)
self.assertEqual(bounding_rect, expected_rect)
self.assertEqual(bounding_rect, pygame.Rect(100, 100, 0, 0))
def test_circle__surface_clip(self):
"""Ensures draw circle respects a surface's clip area.
Tests drawing the circle filled and unfilled.
"""
surfw = surfh = 25
circle_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
clip_rect = pygame.Rect((0, 0), (10, 10))
clip_rect.center = surface.get_rect().center
radius = clip_rect.w // 2 + 1
for width in (0, 1): # Filled and unfilled.
# Test centering the circle along the clip rect's edge.
for center in rect_corners_mids_and_center(clip_rect):
# Get the expected points by drawing the circle without the
# clip area set.
surface.set_clip(None)
surface.fill(surface_color)
self.draw_circle(surface, circle_color, center, radius, width)
expected_pts = get_color_points(surface, circle_color, clip_rect)
# Clear the surface and set the clip area. Redraw the circle
# and check that only the clip area is modified.
surface.fill(surface_color)
surface.set_clip(clip_rect)
self.draw_circle(surface, circle_color, center, radius, width)
surface.lock() # For possible speed up.
# Check all the surface points to ensure only the expected_pts
# are the circle_color.
for pt in ((x, y) for x in range(surfw) for y in range(surfh)):
if pt in expected_pts:
expected_color = circle_color
else:
expected_color = surface_color
self.assertEqual(surface.get_at(pt), expected_color, pt)
surface.unlock()
def test_circle_shape(self):
"""Ensures there are no holes in the circle, and no overdrawing.
Tests drawing a thick circle.
Measures the distance of the drawn pixels from the circle center.
"""
surfw = surfh = 100
circle_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
(cx, cy) = center = (50, 50)
radius = 45
width = 25
dest_rect = self.draw_circle(surface, circle_color, center, radius, width)
for pt in test_utils.rect_area_pts(dest_rect):
x, y = pt
sqr_distance = (x - cx) ** 2 + (y - cy) ** 2
if (radius - width + 1) ** 2 < sqr_distance < (radius - 1) ** 2:
self.assertEqual(surface.get_at(pt), circle_color)
if (
sqr_distance < (radius - width - 1) ** 2
or sqr_distance > (radius + 1) ** 2
):
self.assertEqual(surface.get_at(pt), surface_color)
def test_circle__diameter(self):
""" Ensures draw circle is twice size of radius high and wide."""
surf = pygame.Surface((200, 200))
color = (0, 0, 0, 50)
center = surf.get_height() // 2, surf.get_height() // 2
width = 1
radius = 6
for radius in range(1, 65):
bounding_rect = self.draw_circle(surf, color, center, radius, width)
self.assertEqual(bounding_rect.width, radius * 2)
self.assertEqual(bounding_rect.height, radius * 2)
class DrawCircleTest(DrawCircleMixin, DrawTestCase):
"""Test draw module function circle.
This class inherits the general tests from DrawCircleMixin. It is also
the class to add any draw.circle specific tests to.
"""
# Commented out to avoid cluttering the test output. Add back in if draw_py
# ever properly supports drawing circles.
# @unittest.skip('draw_py.draw_circle not supported yet')
# class PythonDrawCircleTest(DrawCircleMixin, PythonDrawTestCase):
# """Test draw_py module function draw_circle."
#
# This class inherits the general tests from DrawCircleMixin. It is also
# the class to add any draw_py.draw_circle specific tests to.
# """
### Arc Testing ###############################################################
class DrawArcMixin(object):
"""Mixin tests for drawing arcs.
This class contains all the general arc drawing tests.
"""
def test_arc__args(self):
"""Ensures draw arc accepts the correct args."""
bounds_rect = self.draw_arc(
pygame.Surface((3, 3)), (0, 10, 0, 50), (1, 1, 2, 2), 0, 1, 1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_arc__args_without_width(self):
"""Ensures draw arc accepts the args without a width."""
bounds_rect = self.draw_arc(
pygame.Surface((2, 2)), (1, 1, 1, 99), pygame.Rect((0, 0), (2, 2)), 1.1, 2.1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_arc__args_with_negative_width(self):
"""Ensures draw arc accepts the args with negative width."""
bounds_rect = self.draw_arc(
pygame.Surface((3, 3)), (10, 10, 50, 50), (1, 1, 2, 2), 0, 1, -1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
self.assertEqual(bounds_rect, pygame.Rect(1, 1, 0, 0))
def test_arc__args_with_width_gt_radius(self):
"""Ensures draw arc accepts the args with
width > rect.w // 2 and width > rect.h // 2.
"""
rect = pygame.Rect((0, 0), (4, 4))
bounds_rect = self.draw_arc(
pygame.Surface((3, 3)), (10, 10, 50, 50), rect, 0, 45, rect.w // 2 + 1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
bounds_rect = self.draw_arc(
pygame.Surface((3, 3)), (10, 10, 50, 50), rect, 0, 45, rect.h // 2 + 1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_arc__kwargs(self):
"""Ensures draw arc accepts the correct kwargs
with and without a width arg.
"""
kwargs_list = [
{
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("yellow"),
"rect": pygame.Rect((0, 0), (3, 2)),
"start_angle": 0.5,
"stop_angle": 3,
"width": 1,
},
{
"surface": pygame.Surface((2, 1)),
"color": (0, 10, 20),
"rect": (0, 0, 2, 2),
"start_angle": 1,
"stop_angle": 3.1,
},
]
for kwargs in kwargs_list:
bounds_rect = self.draw_arc(**kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_arc__kwargs_order_independent(self):
"""Ensures draw arc's kwargs are not order dependent."""
bounds_rect = self.draw_arc(
stop_angle=1,
start_angle=2.2,
color=(1, 2, 3),
surface=pygame.Surface((3, 2)),
width=1,
rect=pygame.Rect((1, 0), (2, 3)),
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_arc__args_missing(self):
"""Ensures draw arc detects any missing required args."""
surface = pygame.Surface((1, 1))
color = pygame.Color("red")
rect = pygame.Rect((0, 0), (2, 2))
with self.assertRaises(TypeError):
bounds_rect = self.draw_arc(surface, color, rect, 0.1)
with self.assertRaises(TypeError):
bounds_rect = self.draw_arc(surface, color, rect)
with self.assertRaises(TypeError):
bounds_rect = self.draw_arc(surface, color)
with self.assertRaises(TypeError):
bounds_rect = self.draw_arc(surface)
with self.assertRaises(TypeError):
bounds_rect = self.draw_arc()
def test_arc__kwargs_missing(self):
"""Ensures draw arc detects any missing required kwargs."""
kwargs = {
"surface": pygame.Surface((1, 2)),
"color": pygame.Color("red"),
"rect": pygame.Rect((1, 0), (2, 2)),
"start_angle": 0.1,
"stop_angle": 2,
"width": 1,
}
for name in ("stop_angle", "start_angle", "rect", "color", "surface"):
invalid_kwargs = dict(kwargs)
invalid_kwargs.pop(name) # Pop from a copy.
with self.assertRaises(TypeError):
bounds_rect = self.draw_arc(**invalid_kwargs)
def test_arc__arg_invalid_types(self):
"""Ensures draw arc detects invalid arg types."""
surface = pygame.Surface((2, 2))
color = pygame.Color("blue")
rect = pygame.Rect((1, 1), (3, 3))
with self.assertRaises(TypeError):
# Invalid width.
bounds_rect = self.draw_arc(surface, color, rect, 0, 1, "1")
with self.assertRaises(TypeError):
# Invalid stop_angle.
bounds_rect = self.draw_arc(surface, color, rect, 0, "1", 1)
with self.assertRaises(TypeError):
# Invalid start_angle.
bounds_rect = self.draw_arc(surface, color, rect, "1", 0, 1)
with self.assertRaises(TypeError):
# Invalid rect.
bounds_rect = self.draw_arc(surface, color, (1, 2, 3, 4, 5), 0, 1, 1)
with self.assertRaises(TypeError):
# Invalid color.
bounds_rect = self.draw_arc(surface, 2.3, rect, 0, 1, 1)
with self.assertRaises(TypeError):
# Invalid surface.
bounds_rect = self.draw_arc(rect, color, rect, 0, 1, 1)
def test_arc__kwarg_invalid_types(self):
"""Ensures draw arc detects invalid kwarg types."""
surface = pygame.Surface((3, 3))
color = pygame.Color("green")
rect = pygame.Rect((0, 1), (4, 2))
start = 3
stop = 4
kwargs_list = [
{
"surface": pygame.Surface, # Invalid surface.
"color": color,
"rect": rect,
"start_angle": start,
"stop_angle": stop,
"width": 1,
},
{
"surface": surface,
"color": 2.3, # Invalid color.
"rect": rect,
"start_angle": start,
"stop_angle": stop,
"width": 1,
},
{
"surface": surface,
"color": color,
"rect": (0, 0, 0), # Invalid rect.
"start_angle": start,
"stop_angle": stop,
"width": 1,
},
{
"surface": surface,
"color": color,
"rect": rect,
"start_angle": "1", # Invalid start_angle.
"stop_angle": stop,
"width": 1,
},
{
"surface": surface,
"color": color,
"rect": rect,
"start_angle": start,
"stop_angle": "1", # Invalid stop_angle.
"width": 1,
},
{
"surface": surface,
"color": color,
"rect": rect,
"start_angle": start,
"stop_angle": stop,
"width": 1.1,
},
] # Invalid width.
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_arc(**kwargs)
def test_arc__kwarg_invalid_name(self):
"""Ensures draw arc detects invalid kwarg names."""
surface = pygame.Surface((2, 3))
color = pygame.Color("cyan")
rect = pygame.Rect((0, 1), (2, 2))
start = 0.9
stop = 2.3
kwargs_list = [
{
"surface": surface,
"color": color,
"rect": rect,
"start_angle": start,
"stop_angle": stop,
"width": 1,
"invalid": 1,
},
{
"surface": surface,
"color": color,
"rect": rect,
"start_angle": start,
"stop_angle": stop,
"invalid": 1,
},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_arc(**kwargs)
def test_arc__args_and_kwargs(self):
"""Ensures draw arc accepts a combination of args/kwargs"""
surface = pygame.Surface((3, 1))
color = (255, 255, 0, 0)
rect = pygame.Rect((1, 0), (2, 3))
start = 0.6
stop = 2
width = 1
kwargs = {
"surface": surface,
"color": color,
"rect": rect,
"start_angle": start,
"stop_angle": stop,
"width": width,
}
for name in ("surface", "color", "rect", "start_angle", "stop_angle"):
kwargs.pop(name)
if "surface" == name:
bounds_rect = self.draw_arc(surface, **kwargs)
elif "color" == name:
bounds_rect = self.draw_arc(surface, color, **kwargs)
elif "rect" == name:
bounds_rect = self.draw_arc(surface, color, rect, **kwargs)
elif "start_angle" == name:
bounds_rect = self.draw_arc(surface, color, rect, start, **kwargs)
elif "stop_angle" == name:
bounds_rect = self.draw_arc(surface, color, rect, start, stop, **kwargs)
else:
bounds_rect = self.draw_arc(
surface, color, rect, start, stop, width, **kwargs
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_arc__valid_width_values(self):
"""Ensures draw arc accepts different width values."""
arc_color = pygame.Color("yellow")
surface_color = pygame.Color("white")
surface = pygame.Surface((6, 6))
rect = pygame.Rect((0, 0), (4, 4))
rect.center = surface.get_rect().center
pos = rect.centerx + 1, rect.centery + 1
kwargs = {
"surface": surface,
"color": arc_color,
"rect": rect,
"start_angle": 0,
"stop_angle": 7,
"width": None,
}
for width in (-50, -10, -3, -2, -1, 0, 1, 2, 3, 10, 50):
msg = "width={}".format(width)
surface.fill(surface_color) # Clear for each test.
kwargs["width"] = width
expected_color = arc_color if width > 0 else surface_color
bounds_rect = self.draw_arc(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color, msg)
self.assertIsInstance(bounds_rect, pygame.Rect, msg)
def test_arc__valid_stop_angle_values(self):
"""Ensures draw arc accepts different stop_angle values."""
expected_color = pygame.Color("blue")
surface_color = pygame.Color("white")
surface = pygame.Surface((6, 6))
rect = pygame.Rect((0, 0), (4, 4))
rect.center = surface.get_rect().center
pos = rect.centerx, rect.centery + 1
kwargs = {
"surface": surface,
"color": expected_color,
"rect": rect,
"start_angle": -17,
"stop_angle": None,
"width": 1,
}
for stop_angle in (-10, -5.5, -1, 0, 1, 5.5, 10):
msg = "stop_angle={}".format(stop_angle)
surface.fill(surface_color) # Clear for each test.
kwargs["stop_angle"] = stop_angle
bounds_rect = self.draw_arc(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color, msg)
self.assertIsInstance(bounds_rect, pygame.Rect, msg)
def test_arc__valid_start_angle_values(self):
"""Ensures draw arc accepts different start_angle values."""
expected_color = pygame.Color("blue")
surface_color = pygame.Color("white")
surface = pygame.Surface((6, 6))
rect = pygame.Rect((0, 0), (4, 4))
rect.center = surface.get_rect().center
pos = rect.centerx + 1, rect.centery + 1
kwargs = {
"surface": surface,
"color": expected_color,
"rect": rect,
"start_angle": None,
"stop_angle": 17,
"width": 1,
}
for start_angle in (-10.0, -5.5, -1, 0, 1, 5.5, 10.0):
msg = "start_angle={}".format(start_angle)
surface.fill(surface_color) # Clear for each test.
kwargs["start_angle"] = start_angle
bounds_rect = self.draw_arc(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color, msg)
self.assertIsInstance(bounds_rect, pygame.Rect, msg)
def test_arc__valid_rect_formats(self):
"""Ensures draw arc accepts different rect formats."""
expected_color = pygame.Color("red")
surface_color = pygame.Color("black")
surface = pygame.Surface((6, 6))
rect = pygame.Rect((0, 0), (4, 4))
rect.center = surface.get_rect().center
pos = rect.centerx + 1, rect.centery + 1
kwargs = {
"surface": surface,
"color": expected_color,
"rect": None,
"start_angle": 0,
"stop_angle": 7,
"width": 1,
}
rects = (rect, (rect.topleft, rect.size), (rect.x, rect.y, rect.w, rect.h))
for rect in rects:
surface.fill(surface_color) # Clear for each test.
kwargs["rect"] = rect
bounds_rect = self.draw_arc(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_arc__valid_color_formats(self):
"""Ensures draw arc accepts different color formats."""
green_color = pygame.Color("green")
surface_color = pygame.Color("black")
surface = pygame.Surface((6, 6))
rect = pygame.Rect((0, 0), (4, 4))
rect.center = surface.get_rect().center
pos = rect.centerx + 1, rect.centery + 1
kwargs = {
"surface": surface,
"color": None,
"rect": rect,
"start_angle": 0,
"stop_angle": 7,
"width": 1,
}
greens = (
(0, 255, 0),
(0, 255, 0, 255),
surface.map_rgb(green_color),
green_color,
)
for color in greens:
surface.fill(surface_color) # Clear for each test.
kwargs["color"] = color
if isinstance(color, int):
expected_color = surface.unmap_rgb(color)
else:
expected_color = green_color
bounds_rect = self.draw_arc(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_arc__invalid_color_formats(self):
"""Ensures draw arc handles invalid color formats correctly."""
pos = (1, 1)
surface = pygame.Surface((4, 3))
kwargs = {
"surface": surface,
"color": None,
"rect": pygame.Rect(pos, (2, 2)),
"start_angle": 5,
"stop_angle": 6.1,
"width": 1,
}
for expected_color in (2.3, self):
kwargs["color"] = expected_color
with self.assertRaises(TypeError):
bounds_rect = self.draw_arc(**kwargs)
def todo_test_arc(self):
"""Ensure draw arc works correctly."""
self.fail()
def test_arc__bounding_rect(self):
"""Ensures draw arc returns the correct bounding rect.
Tests arcs on and off the surface and a range of width/thickness
values.
"""
arc_color = pygame.Color("red")
surf_color = pygame.Color("black")
min_width = min_height = 5
max_width = max_height = 7
sizes = ((min_width, min_height), (max_width, max_height))
surface = pygame.Surface((20, 20), 0, 32)
surf_rect = surface.get_rect()
# Make a rect that is bigger than the surface to help test drawing
# arcs off and partially off the surface.
big_rect = surf_rect.inflate(min_width * 2 + 1, min_height * 2 + 1)
# Max angle allows for a full circle to be drawn.
start_angle = 0
stop_angles = (0, 2, 3, 5, math.ceil(2 * math.pi))
for pos in rect_corners_mids_and_center(
surf_rect
) + rect_corners_mids_and_center(big_rect):
# Each of the arc's rect position attributes will be set to the pos
# value.
for attr in RECT_POSITION_ATTRIBUTES:
# Test using different rect sizes, thickness values and stop
# angles.
for width, height in sizes:
arc_rect = pygame.Rect((0, 0), (width, height))
setattr(arc_rect, attr, pos)
for thickness in (0, 1, 2, 3, min(width, height)):
for stop_angle in stop_angles:
surface.fill(surf_color) # Clear for each test.
bounding_rect = self.draw_arc(
surface,
arc_color,
arc_rect,
start_angle,
stop_angle,
thickness,
)
# Calculating the expected_rect after the arc
# is drawn (it uses what is actually drawn).
expected_rect = create_bounding_rect(
surface, surf_color, arc_rect.topleft
)
self.assertEqual(
bounding_rect,
expected_rect,
"thickness={}".format(thickness),
)
def test_arc__surface_clip(self):
"""Ensures draw arc respects a surface's clip area."""
surfw = surfh = 30
start = 0.1
end = 0 # end < start so a full circle will be drawn
arc_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
clip_rect = pygame.Rect((0, 0), (11, 11))
clip_rect.center = surface.get_rect().center
pos_rect = clip_rect.copy() # Manages the arc's pos.
for thickness in (1, 3): # Different line widths.
# Test centering the arc along the clip rect's edge.
for center in rect_corners_mids_and_center(clip_rect):
# Get the expected points by drawing the arc without the
# clip area set.
pos_rect.center = center
surface.set_clip(None)
surface.fill(surface_color)
self.draw_arc(surface, arc_color, pos_rect, start, end, thickness)
expected_pts = get_color_points(surface, arc_color, clip_rect)
# Clear the surface and set the clip area. Redraw the arc
# and check that only the clip area is modified.
surface.fill(surface_color)
surface.set_clip(clip_rect)
self.draw_arc(surface, arc_color, pos_rect, start, end, thickness)
surface.lock() # For possible speed up.
# Check all the surface points to ensure only the expected_pts
# are the arc_color.
for pt in ((x, y) for x in range(surfw) for y in range(surfh)):
if pt in expected_pts:
expected_color = arc_color
else:
expected_color = surface_color
self.assertEqual(surface.get_at(pt), expected_color, pt)
surface.unlock()
class DrawArcTest(DrawArcMixin, DrawTestCase):
"""Test draw module function arc.
This class inherits the general tests from DrawArcMixin. It is also the
class to add any draw.arc specific tests to.
"""
# Commented out to avoid cluttering the test output. Add back in if draw_py
# ever properly supports drawing arcs.
# @unittest.skip('draw_py.draw_arc not supported yet')
# class PythonDrawArcTest(DrawArcMixin, PythonDrawTestCase):
# """Test draw_py module function draw_arc.
#
# This class inherits the general tests from DrawArcMixin. It is also the
# class to add any draw_py.draw_arc specific tests to.
# """
### Draw Module Testing #######################################################
class DrawModuleTest(unittest.TestCase):
"""General draw module tests."""
def test_path_data_validation(self):
"""Test validation of multi-point drawing methods.
See bug #521
"""
surf = pygame.Surface((5, 5))
rect = pygame.Rect(0, 0, 5, 5)
bad_values = (
"text",
b"bytes",
1 + 1j, # string, bytes, complex,
object(),
(lambda x: x),
) # object, function
bad_points = list(bad_values) + [(1,), (1, 2, 3)] # wrong tuple length
bad_points.extend((1, v) for v in bad_values) # one wrong value
good_path = [(1, 1), (1, 3), (3, 3), (3, 1)]
# A) draw.lines
check_pts = [(x, y) for x in range(5) for y in range(5)]
for method, is_polgon in (
(draw.lines, 0),
(draw.aalines, 0),
(draw.polygon, 1),
):
for val in bad_values:
# 1. at the beginning
draw.rect(surf, RED, rect, 0)
with self.assertRaises(TypeError):
if is_polgon:
method(surf, GREEN, [val] + good_path, 0)
else:
method(surf, GREEN, True, [val] + good_path)
# make sure, nothing was drawn :
self.assertTrue(all(surf.get_at(pt) == RED for pt in check_pts))
# 2. not at the beginning (was not checked)
draw.rect(surf, RED, rect, 0)
with self.assertRaises(TypeError):
path = good_path[:2] + [val] + good_path[2:]
if is_polgon:
method(surf, GREEN, path, 0)
else:
method(surf, GREEN, True, path)
# make sure, nothing was drawn :
self.assertTrue(all(surf.get_at(pt) == RED for pt in check_pts))
def test_color_validation(self):
surf = pygame.Surface((10, 10))
colors = 123456, (1, 10, 100), RED, '#ab12df', 'red'
points = ((0, 0), (1, 1), (1, 0))
# 1. valid colors
for col in colors:
draw.line(surf, col, (0, 0), (1, 1))
draw.aaline(surf, col, (0, 0), (1, 1))
draw.aalines(surf, col, True, points)
draw.lines(surf, col, True, points)
draw.arc(surf, col, pygame.Rect(0, 0, 3, 3), 15, 150)
draw.ellipse(surf, col, pygame.Rect(0, 0, 3, 6), 1)
draw.circle(surf, col, (7, 3), 2)
draw.polygon(surf, col, points, 0)
# 2. invalid colors
for col in (1.256, object(), None):
with self.assertRaises(TypeError):
draw.line(surf, col, (0, 0), (1, 1))
with self.assertRaises(TypeError):
draw.aaline(surf, col, (0, 0), (1, 1))
with self.assertRaises(TypeError):
draw.aalines(surf, col, True, points)
with self.assertRaises(TypeError):
draw.lines(surf, col, True, points)
with self.assertRaises(TypeError):
draw.arc(surf, col, pygame.Rect(0, 0, 3, 3), 15, 150)
with self.assertRaises(TypeError):
draw.ellipse(surf, col, pygame.Rect(0, 0, 3, 6), 1)
with self.assertRaises(TypeError):
draw.circle(surf, col, (7, 3), 2)
with self.assertRaises(TypeError):
draw.polygon(surf, col, points, 0)
###############################################################################
if __name__ == "__main__":
unittest.main()
| 36.119384 | 135 | 0.533909 | import math
import unittest
import sys
import pygame
from pygame import draw
from pygame import draw_py
from pygame.locals import SRCALPHA
from pygame.tests import test_utils
from pygame.math import Vector2
PY3 = sys.version_info >= (3, 0, 0)
RED = BG_RED = pygame.Color("red")
GREEN = FG_GREEN = pygame.Color("green")
RECT_POSITION_ATTRIBUTES = (
"topleft",
"midtop",
"topright",
"midright",
"bottomright",
"midbottom",
"bottomleft",
"midleft",
"center",
)
def get_border_values(surface, width, height):
border_top = [surface.get_at((x, 0)) for x in range(width)]
border_left = [surface.get_at((0, y)) for y in range(height)]
border_right = [surface.get_at((width - 1, y)) for y in range(height)]
border_bottom = [surface.get_at((x, height - 1)) for x in range(width)]
return [border_top, border_left, border_right, border_bottom]
def corners(surface):
width, height = surface.get_size()
return ((0, 0), (width - 1, 0), (width - 1, height - 1), (0, height - 1))
def rect_corners_mids_and_center(rect):
return (
rect.topleft,
rect.midtop,
rect.topright,
rect.midright,
rect.bottomright,
rect.midbottom,
rect.bottomleft,
rect.midleft,
rect.center,
)
def border_pos_and_color(surface):
width, height = surface.get_size()
right, bottom = width - 1, height - 1
for x in range(width):
pos = (x, 0)
yield pos, surface.get_at(pos)
for y in range(1, height):
pos = (right, y)
yield pos, surface.get_at(pos)
for x in range(right - 1, -1, -1):
pos = (x, bottom)
yield pos, surface.get_at(pos)
for y in range(bottom - 1, 0, -1):
pos = (0, y)
yield pos, surface.get_at(pos)
def get_color_points(surface, color, bounds_rect=None, match_color=True):
get_at = surface.get_at
if bounds_rect is None:
x_range = range(surface.get_width())
y_range = range(surface.get_height())
else:
x_range = range(bounds_rect.left, bounds_rect.right)
y_range = range(bounds_rect.top, bounds_rect.bottom)
surface.lock()
if match_color:
pts = [(x, y) for x in x_range for y in y_range if get_at((x, y)) == color]
else:
pts = [(x, y) for x in x_range for y in y_range if get_at((x, y)) != color]
surface.unlock()
return pts
def create_bounding_rect(surface, surf_color, default_pos):
width, height = surface.get_clip().size
xmin, ymin = width, height
xmax, ymax = -1, -1
get_at = surface.get_at
surface.lock()
for y in range(height):
for x in range(width):
if get_at((x, y)) != surf_color:
xmin = min(x, xmin)
xmax = max(x, xmax)
ymin = min(y, ymin)
ymax = max(y, ymax)
surface.unlock()
if -1 == xmax:
return pygame.Rect(default_pos, (0, 0))
return pygame.Rect((xmin, ymin), (xmax - xmin + 1, ymax - ymin + 1))
class InvalidBool(object):
__nonzero__ = None
__bool__ = None
class DrawTestCase(unittest.TestCase):
draw_rect = staticmethod(draw.rect)
draw_polygon = staticmethod(draw.polygon)
draw_circle = staticmethod(draw.circle)
draw_ellipse = staticmethod(draw.ellipse)
draw_arc = staticmethod(draw.arc)
draw_line = staticmethod(draw.line)
draw_lines = staticmethod(draw.lines)
draw_aaline = staticmethod(draw.aaline)
draw_aalines = staticmethod(draw.aalines)
class PythonDrawTestCase(unittest.TestCase):
draw_polygon = staticmethod(draw_py.draw_polygon)
draw_line = staticmethod(draw_py.draw_line)
draw_lines = staticmethod(draw_py.draw_lines)
draw_aaline = staticmethod(draw_py.draw_aaline)
draw_aalines = staticmethod(draw_py.draw_aalines)
**kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_ellipse__kwargs_order_independent(self):
bounds_rect = self.draw_ellipse(
color=(1, 2, 3),
surface=pygame.Surface((3, 2)),
width=0,
rect=pygame.Rect((1, 0), (1, 1)),
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_ellipse__args_missing(self):
surface = pygame.Surface((1, 1))
with self.assertRaises(TypeError):
bounds_rect = self.draw_ellipse(surface, pygame.Color("red"))
with self.assertRaises(TypeError):
bounds_rect = self.draw_ellipse(surface)
with self.assertRaises(TypeError):
bounds_rect = self.draw_ellipse()
def test_ellipse__kwargs_missing(self):
kwargs = {
"surface": pygame.Surface((1, 2)),
"color": pygame.Color("red"),
"rect": pygame.Rect((1, 0), (2, 2)),
"width": 2,
}
for name in ("rect", "color", "surface"):
invalid_kwargs = dict(kwargs)
invalid_kwargs.pop(name)
with self.assertRaises(TypeError):
bounds_rect = self.draw_ellipse(**invalid_kwargs)
def test_ellipse__arg_invalid_types(self):
surface = pygame.Surface((2, 2))
color = pygame.Color("blue")
rect = pygame.Rect((1, 1), (1, 1))
with self.assertRaises(TypeError):
bounds_rect = self.draw_ellipse(surface, color, rect, "1")
with self.assertRaises(TypeError):
bounds_rect = self.draw_ellipse(surface, color, (1, 2, 3, 4, 5), 1)
with self.assertRaises(TypeError):
bounds_rect = self.draw_ellipse(surface, 2.3, rect, 0)
with self.assertRaises(TypeError):
bounds_rect = self.draw_ellipse(rect, color, rect, 2)
def test_ellipse__kwarg_invalid_types(self):
surface = pygame.Surface((3, 3))
color = pygame.Color("green")
rect = pygame.Rect((0, 1), (1, 1))
kwargs_list = [
{
"surface": pygame.Surface,
"color": color,
"rect": rect,
"width": 1,
},
{
"surface": surface,
"color": 2.3,
"rect": rect,
"width": 1,
},
{
"surface": surface,
"color": color,
"rect": (0, 0, 0),
"width": 1,
},
{"surface": surface, "color": color, "rect": rect, "width": 1.1},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_ellipse(**kwargs)
def test_ellipse__kwarg_invalid_name(self):
surface = pygame.Surface((2, 3))
color = pygame.Color("cyan")
rect = pygame.Rect((0, 1), (2, 2))
kwargs_list = [
{
"surface": surface,
"color": color,
"rect": rect,
"width": 1,
"invalid": 1,
},
{"surface": surface, "color": color, "rect": rect, "invalid": 1},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_ellipse(**kwargs)
def test_ellipse__args_and_kwargs(self):
surface = pygame.Surface((3, 1))
color = (255, 255, 0, 0)
rect = pygame.Rect((1, 0), (2, 1))
width = 0
kwargs = {"surface": surface, "color": color, "rect": rect, "width": width}
for name in ("surface", "color", "rect", "width"):
kwargs.pop(name)
if "surface" == name:
bounds_rect = self.draw_ellipse(surface, **kwargs)
elif "color" == name:
bounds_rect = self.draw_ellipse(surface, color, **kwargs)
elif "rect" == name:
bounds_rect = self.draw_ellipse(surface, color, rect, **kwargs)
else:
bounds_rect = self.draw_ellipse(surface, color, rect, width, **kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_ellipse__valid_width_values(self):
pos = (1, 1)
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
color = (10, 20, 30, 255)
kwargs = {
"surface": surface,
"color": color,
"rect": pygame.Rect(pos, (3, 2)),
"width": None,
}
for width in (-1000, -10, -1, 0, 1, 10, 1000):
surface.fill(surface_color)
kwargs["width"] = width
expected_color = color if width >= 0 else surface_color
bounds_rect = self.draw_ellipse(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_ellipse__valid_rect_formats(self):
pos = (1, 1)
expected_color = pygame.Color("red")
surface_color = pygame.Color("black")
surface = pygame.Surface((4, 4))
kwargs = {"surface": surface, "color": expected_color, "rect": None, "width": 0}
rects = (pygame.Rect(pos, (1, 3)), (pos, (2, 1)), (pos[0], pos[1], 1, 1))
for rect in rects:
surface.fill(surface_color)
kwargs["rect"] = rect
bounds_rect = self.draw_ellipse(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_ellipse__valid_color_formats(self):
pos = (1, 1)
green_color = pygame.Color("green")
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
kwargs = {
"surface": surface,
"color": None,
"rect": pygame.Rect(pos, (1, 2)),
"width": 0,
}
reds = (
(0, 255, 0),
(0, 255, 0, 255),
surface.map_rgb(green_color),
green_color,
)
for color in reds:
surface.fill(surface_color)
kwargs["color"] = color
if isinstance(color, int):
expected_color = surface.unmap_rgb(color)
else:
expected_color = green_color
bounds_rect = self.draw_ellipse(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_ellipse__invalid_color_formats(self):
pos = (1, 1)
surface = pygame.Surface((4, 3))
kwargs = {
"surface": surface,
"color": None,
"rect": pygame.Rect(pos, (2, 2)),
"width": 1,
}
for expected_color in (2.3, surface):
kwargs["color"] = expected_color
with self.assertRaises(TypeError):
bounds_rect = self.draw_ellipse(**kwargs)
def test_ellipse(self):
left_top = [(0, 0), (1, 0), (0, 1), (1, 1)]
sizes = [(4, 4), (5, 4), (4, 5), (5, 5)]
color = (1, 13, 24, 255)
def same_size(width, height, border_width):
surface = pygame.Surface((width, height))
self.draw_ellipse(surface, color, (0, 0, width, height), border_width)
borders = get_border_values(surface, width, height)
for border in borders:
self.assertTrue(color in border)
def not_same_size(width, height, border_width, left, top):
surface = pygame.Surface((width, height))
self.draw_ellipse(
surface, color, (left, top, width - 1, height - 1), border_width
)
borders = get_border_values(surface, width, height)
sides_touching = [color in border for border in borders].count(True)
self.assertEqual(sides_touching, 2)
for width, height in sizes:
for border_width in (0, 1):
same_size(width, height, border_width)
for left, top in left_top:
not_same_size(width, height, border_width, left, top)
def test_ellipse__thick_line(self):
ellipse_color = pygame.Color("yellow")
surface_color = pygame.Color("black")
surface = pygame.Surface((40, 40))
rect = pygame.Rect((0, 0), (31, 23))
rect.center = surface.get_rect().center
for thickness in range(1, min(*rect.size) // 2 - 2):
surface.fill(surface_color)
self.draw_ellipse(surface, ellipse_color, rect, thickness)
surface.lock()
x = rect.centerx
y_start = rect.top
y_end = rect.top + thickness - 1
for y in range(y_start, y_end + 1):
self.assertEqual(surface.get_at((x, y)), ellipse_color, thickness)
# Check pixels above and below this line.
self.assertEqual(surface.get_at((x, y_start - 1)), surface_color, thickness)
self.assertEqual(surface.get_at((x, y_end + 1)), surface_color, thickness)
# Check vertical thickness on the ellipse's bottom.
x = rect.centerx
y_start = rect.bottom - thickness
y_end = rect.bottom - 1
for y in range(y_start, y_end + 1):
self.assertEqual(surface.get_at((x, y)), ellipse_color, thickness)
self.assertEqual(surface.get_at((x, y_start - 1)), surface_color, thickness)
self.assertEqual(surface.get_at((x, y_end + 1)), surface_color, thickness)
x_start = rect.left
x_end = rect.left + thickness - 1
y = rect.centery
for x in range(x_start, x_end + 1):
self.assertEqual(surface.get_at((x, y)), ellipse_color, thickness)
# Check pixels to the left and right of this line.
self.assertEqual(surface.get_at((x_start - 1, y)), surface_color, thickness)
self.assertEqual(surface.get_at((x_end + 1, y)), surface_color, thickness)
# Check horizontal thickness on the ellipse's right.
x_start = rect.right - thickness
x_end = rect.right - 1
y = rect.centery
for x in range(x_start, x_end + 1):
self.assertEqual(surface.get_at((x, y)), ellipse_color, thickness)
self.assertEqual(surface.get_at((x_start - 1, y)), surface_color, thickness)
self.assertEqual(surface.get_at((x_end + 1, y)), surface_color, thickness)
surface.unlock()
def test_ellipse__max_width(self):
ellipse_color = pygame.Color("yellow")
surface_color = pygame.Color("black")
surface = pygame.Surface((40, 40))
rect = pygame.Rect((0, 0), (31, 21))
rect.center = surface.get_rect().center
max_thickness = (min(*rect.size) + 1) // 2
for thickness in range(max_thickness, max_thickness + 3):
surface.fill(surface_color)
self.draw_ellipse(surface, ellipse_color, rect, thickness)
surface.lock()
for y in range(rect.top, rect.bottom):
self.assertEqual(surface.get_at((rect.centerx, y)), ellipse_color)
for x in range(rect.left, rect.right):
self.assertEqual(surface.get_at((x, rect.centery)), ellipse_color)
self.assertEqual(
surface.get_at((rect.centerx, rect.top - 1)), surface_color
)
self.assertEqual(
surface.get_at((rect.centerx, rect.bottom + 1)), surface_color
)
self.assertEqual(
surface.get_at((rect.left - 1, rect.centery)), surface_color
)
self.assertEqual(
surface.get_at((rect.right + 1, rect.centery)), surface_color
)
surface.unlock()
def _check_1_pixel_sized_ellipse(
self, surface, collide_rect, surface_color, ellipse_color
):
surf_w, surf_h = surface.get_size()
surface.lock()
for pos in ((x, y) for y in range(surf_h) for x in range(surf_w)):
if collide_rect.collidepoint(pos):
expected_color = ellipse_color
else:
expected_color = surface_color
self.assertEqual(
surface.get_at(pos),
expected_color,
"collide_rect={}, pos={}".format(collide_rect, pos),
)
surface.unlock()
def test_ellipse__1_pixel_width(self):
ellipse_color = pygame.Color("red")
surface_color = pygame.Color("black")
surf_w, surf_h = 10, 20
surface = pygame.Surface((surf_w, surf_h))
rect = pygame.Rect((0, 0), (1, 0))
collide_rect = rect.copy()
off_left = -1
off_right = surf_w
off_bottom = surf_h
center_x = surf_w // 2
center_y = surf_h // 2
for ellipse_h in range(6, 10):
collide_rect.h = ellipse_h + 1
rect.h = ellipse_h
# Calculate some variable positions.
off_top = -(ellipse_h + 1)
half_off_top = -(ellipse_h // 2)
half_off_bottom = surf_h - (ellipse_h // 2)
# Draw the ellipse in different positions: fully on-surface,
# partially off-surface, and fully off-surface.
positions = (
(off_left, off_top),
(off_left, half_off_top),
(off_left, center_y),
(off_left, half_off_bottom),
(off_left, off_bottom),
(center_x, off_top),
(center_x, half_off_top),
(center_x, center_y),
(center_x, half_off_bottom),
(center_x, off_bottom),
(off_right, off_top),
(off_right, half_off_top),
(off_right, center_y),
(off_right, half_off_bottom),
(off_right, off_bottom),
)
for rect_pos in positions:
surface.fill(surface_color) # Clear before each draw.
rect.topleft = rect_pos
collide_rect.topleft = rect_pos
self.draw_ellipse(surface, ellipse_color, rect)
self._check_1_pixel_sized_ellipse(
surface, collide_rect, surface_color, ellipse_color
)
def test_ellipse__1_pixel_width_spanning_surface(self):
ellipse_color = pygame.Color("red")
surface_color = pygame.Color("black")
surf_w, surf_h = 10, 20
surface = pygame.Surface((surf_w, surf_h))
rect = pygame.Rect((0, 0), (1, surf_h + 2)) # Longer than the surface.
# Draw the ellipse in different positions: on-surface and off-surface.
positions = (
(-1, -1), # (off_left, off_top)
(0, -1), # (left_edge, off_top)
(surf_w // 2, -1), # (center_x, off_top)
(surf_w - 1, -1), # (right_edge, off_top)
(surf_w, -1),
) # (off_right, off_top)
for rect_pos in positions:
surface.fill(surface_color) # Clear before each draw.
rect.topleft = rect_pos
self.draw_ellipse(surface, ellipse_color, rect)
self._check_1_pixel_sized_ellipse(
surface, rect, surface_color, ellipse_color
)
def test_ellipse__1_pixel_height(self):
ellipse_color = pygame.Color("red")
surface_color = pygame.Color("black")
surf_w, surf_h = 20, 10
surface = pygame.Surface((surf_w, surf_h))
rect = pygame.Rect((0, 0), (0, 1))
collide_rect = rect.copy()
# Calculate some positions.
off_right = surf_w
off_top = -1
off_bottom = surf_h
center_x = surf_w // 2
center_y = surf_h // 2
# Test some even and odd widths.
for ellipse_w in range(6, 10):
# The ellipse is drawn on the edge of the rect so collide_rect
# needs +1 width to track where it's drawn.
collide_rect.w = ellipse_w + 1
rect.w = ellipse_w
off_left = -(ellipse_w + 1)
half_off_left = -(ellipse_w // 2)
half_off_right = surf_w - (ellipse_w // 2)
positions = (
(off_left, off_top),
(half_off_left, off_top),
(center_x, off_top),
(half_off_right, off_top),
(off_right, off_top),
(off_left, center_y),
(half_off_left, center_y),
(center_x, center_y),
(half_off_right, center_y),
(off_right, center_y),
(off_left, off_bottom),
(half_off_left, off_bottom),
(center_x, off_bottom),
(half_off_right, off_bottom),
(off_right, off_bottom),
)
for rect_pos in positions:
surface.fill(surface_color)
rect.topleft = rect_pos
collide_rect.topleft = rect_pos
self.draw_ellipse(surface, ellipse_color, rect)
self._check_1_pixel_sized_ellipse(
surface, collide_rect, surface_color, ellipse_color
)
def test_ellipse__1_pixel_height_spanning_surface(self):
ellipse_color = pygame.Color("red")
surface_color = pygame.Color("black")
surf_w, surf_h = 20, 10
surface = pygame.Surface((surf_w, surf_h))
rect = pygame.Rect((0, 0), (surf_w + 2, 1))
positions = (
(-1, -1),
(-1, 0),
(-1, surf_h // 2),
(-1, surf_h - 1),
(-1, surf_h),
)
for rect_pos in positions:
surface.fill(surface_color)
rect.topleft = rect_pos
self.draw_ellipse(surface, ellipse_color, rect)
self._check_1_pixel_sized_ellipse(
surface, rect, surface_color, ellipse_color
)
def test_ellipse__1_pixel_width_and_height(self):
ellipse_color = pygame.Color("red")
surface_color = pygame.Color("black")
surf_w, surf_h = 10, 10
surface = pygame.Surface((surf_w, surf_h))
rect = pygame.Rect((0, 0), (1, 1))
off_left = -1
off_right = surf_w
off_top = -1
off_bottom = surf_h
left_edge = 0
right_edge = surf_w - 1
top_edge = 0
bottom_edge = surf_h - 1
center_x = surf_w // 2
center_y = surf_h // 2
positions = (
(off_left, off_top),
(off_left, top_edge),
(off_left, center_y),
(off_left, bottom_edge),
(off_left, off_bottom),
(left_edge, off_top),
(left_edge, top_edge),
(left_edge, center_y),
(left_edge, bottom_edge),
(left_edge, off_bottom),
(center_x, off_top),
(center_x, top_edge),
(center_x, center_y),
(center_x, bottom_edge),
(center_x, off_bottom),
(right_edge, off_top),
(right_edge, top_edge),
(right_edge, center_y),
(right_edge, bottom_edge),
(right_edge, off_bottom),
(off_right, off_top),
(off_right, top_edge),
(off_right, center_y),
(off_right, bottom_edge),
(off_right, off_bottom),
)
for rect_pos in positions:
surface.fill(surface_color)
rect.topleft = rect_pos
self.draw_ellipse(surface, ellipse_color, rect)
self._check_1_pixel_sized_ellipse(
surface, rect, surface_color, ellipse_color
)
def test_ellipse__bounding_rect(self):
ellipse_color = pygame.Color("red")
surf_color = pygame.Color("black")
min_width = min_height = 5
max_width = max_height = 7
sizes = ((min_width, min_height), (max_width, max_height))
surface = pygame.Surface((20, 20), 0, 32)
surf_rect = surface.get_rect()
big_rect = surf_rect.inflate(min_width * 2 + 1, min_height * 2 + 1)
for pos in rect_corners_mids_and_center(
surf_rect
) + rect_corners_mids_and_center(big_rect):
# the pos value.
for attr in RECT_POSITION_ATTRIBUTES:
# Test using different rect sizes and thickness values.
for width, height in sizes:
ellipse_rect = pygame.Rect((0, 0), (width, height))
setattr(ellipse_rect, attr, pos)
for thickness in (0, 1, 2, 3, min(width, height)):
surface.fill(surf_color) # Clear for each test.
bounding_rect = self.draw_ellipse(
surface, ellipse_color, ellipse_rect, thickness
)
# Calculating the expected_rect after the ellipse
# is drawn (it uses what is actually drawn).
expected_rect = create_bounding_rect(
surface, surf_color, ellipse_rect.topleft
)
self.assertEqual(bounding_rect, expected_rect)
def test_ellipse__surface_clip(self):
surfw = surfh = 30
ellipse_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
clip_rect = pygame.Rect((0, 0), (11, 11))
clip_rect.center = surface.get_rect().center
pos_rect = clip_rect.copy() # Manages the ellipse's pos.
for width in (0, 1):
for center in rect_corners_mids_and_center(clip_rect):
# Get the expected points by drawing the ellipse without the
# clip area set.
pos_rect.center = center
surface.set_clip(None)
surface.fill(surface_color)
self.draw_ellipse(surface, ellipse_color, pos_rect, width)
expected_pts = get_color_points(surface, ellipse_color, clip_rect)
# Clear the surface and set the clip area. Redraw the ellipse
# and check that only the clip area is modified.
surface.fill(surface_color)
surface.set_clip(clip_rect)
self.draw_ellipse(surface, ellipse_color, pos_rect, width)
surface.lock() # For possible speed up.
# Check all the surface points to ensure only the expected_pts
# are the ellipse_color.
for pt in ((x, y) for x in range(surfw) for y in range(surfh)):
if pt in expected_pts:
expected_color = ellipse_color
else:
expected_color = surface_color
self.assertEqual(surface.get_at(pt), expected_color, pt)
surface.unlock()
class DrawEllipseTest(DrawEllipseMixin, DrawTestCase):
# Commented out to avoid cluttering the test output. Add back in if draw_py
# ever properly supports drawing ellipses.
# @unittest.skip('draw_py.draw_ellipse not supported yet')
# class PythonDrawEllipseTest(DrawEllipseMixin, PythonDrawTestCase):
# """Test draw_py module function draw_ellipse.
#
# This class inherits the general tests from DrawEllipseMixin. It is also
# the class to add any draw_py.draw_ellipse specific tests to.
# """
### Line/Lines/AALine/AALines Testing #########################################
class BaseLineMixin(object):
COLORS = (
(0, 0, 0),
(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
(255, 255, 0),
(255, 0, 255),
(0, 255, 255),
(255, 255, 255),
)
@staticmethod
def _create_surfaces():
# Create some surfaces with different sizes, depths, and flags.
surfaces = []
for size in ((49, 49), (50, 50)):
for depth in (8, 16, 24, 32):
for flags in (0, SRCALPHA):
surface = pygame.display.set_mode(size, flags, depth)
surfaces.append(surface)
surfaces.append(surface.convert_alpha())
return surfaces
@staticmethod
def _rect_lines(rect):
# Yields pairs of end points and their reverse (to test symmetry).
# Uses a rect with the points radiating from its midleft.
for pt in rect_corners_mids_and_center(rect):
if pt == rect.midleft or pt == rect.center:
# Don't bother with these points.
continue
yield (rect.midleft, pt)
yield (pt, rect.midleft)
,
"color": pygame.Color("red"),
"start_pos": (2, 1),
"end_pos": (2, 2),
"width": 1,
}
for name in ("end_pos", "start_pos", "color", "surface"):
invalid_kwargs = dict(kwargs)
invalid_kwargs.pop(name)
with self.assertRaises(TypeError):
bounds_rect = self.draw_line(**invalid_kwargs)
def test_line__arg_invalid_types(self):
surface = pygame.Surface((2, 2))
color = pygame.Color("blue")
start_pos = (0, 1)
end_pos = (1, 2)
with self.assertRaises(TypeError):
bounds_rect = self.draw_line(surface, color, start_pos, end_pos, "1")
with self.assertRaises(TypeError):
bounds_rect = self.draw_line(surface, color, start_pos, (1, 2, 3))
with self.assertRaises(TypeError):
bounds_rect = self.draw_line(surface, color, (1,), end_pos)
with self.assertRaises(TypeError):
bounds_rect = self.draw_line(surface, 2.3, start_pos, end_pos)
with self.assertRaises(TypeError):
bounds_rect = self.draw_line((1, 2, 3, 4), color, start_pos, end_pos)
def test_line__kwarg_invalid_types(self):
surface = pygame.Surface((3, 3))
color = pygame.Color("green")
start_pos = (1, 0)
end_pos = (2, 0)
width = 1
kwargs_list = [
{
"surface": pygame.Surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"width": width,
},
{
"surface": surface,
"color": 2.3,
"start_pos": start_pos,
"end_pos": end_pos,
"width": width,
},
{
"surface": surface,
"color": color,
"start_pos": (0, 0, 0),
"end_pos": end_pos,
"width": width,
},
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": (0,),
"width": width,
},
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"width": 1.2,
},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_line(**kwargs)
def test_line__kwarg_invalid_name(self):
surface = pygame.Surface((2, 3))
color = pygame.Color("cyan")
start_pos = (1, 1)
end_pos = (2, 0)
kwargs_list = [
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"width": 1,
"invalid": 1,
},
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"invalid": 1,
},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_line(**kwargs)
def test_line__args_and_kwargs(self):
surface = pygame.Surface((3, 2))
color = (255, 255, 0, 0)
start_pos = (0, 1)
end_pos = (1, 2)
width = 0
kwargs = {
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"width": width,
}
for name in ("surface", "color", "start_pos", "end_pos", "width"):
kwargs.pop(name)
if "surface" == name:
bounds_rect = self.draw_line(surface, **kwargs)
elif "color" == name:
bounds_rect = self.draw_line(surface, color, **kwargs)
elif "start_pos" == name:
bounds_rect = self.draw_line(surface, color, start_pos, **kwargs)
elif "end_pos" == name:
bounds_rect = self.draw_line(
surface, color, start_pos, end_pos, **kwargs
)
else:
bounds_rect = self.draw_line(
surface, color, start_pos, end_pos, width, **kwargs
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_line__valid_width_values(self):
line_color = pygame.Color("yellow")
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
pos = (2, 1)
kwargs = {
"surface": surface,
"color": line_color,
"start_pos": pos,
"end_pos": (2, 2),
"width": None,
}
for width in (-100, -10, -1, 0, 1, 10, 100):
surface.fill(surface_color)
kwargs["width"] = width
expected_color = line_color if width > 0 else surface_color
bounds_rect = self.draw_line(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_line__valid_start_pos_formats(self):
expected_color = pygame.Color("red")
surface_color = pygame.Color("black")
surface = pygame.Surface((4, 4))
kwargs = {
"surface": surface,
"color": expected_color,
"start_pos": None,
"end_pos": (2, 2),
"width": 2,
}
x, y = 2, 1
for start_pos in ((x, y), (x + 0.1, y), (x, y + 0.1), (x + 0.1, y + 0.1)):
for seq_type in (tuple, list, Vector2):
surface.fill(surface_color)
kwargs["start_pos"] = seq_type(start_pos)
bounds_rect = self.draw_line(**kwargs)
self.assertEqual(surface.get_at((x, y)), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_line__valid_end_pos_formats(self):
expected_color = pygame.Color("red")
surface_color = pygame.Color("black")
surface = pygame.Surface((4, 4))
kwargs = {
"surface": surface,
"color": expected_color,
"start_pos": (2, 1),
"end_pos": None,
"width": 2,
}
x, y = 2, 2
for end_pos in ((x, y), (x + 0.2, y), (x, y + 0.2), (x + 0.2, y + 0.2)):
for seq_type in (tuple, list, Vector2):
surface.fill(surface_color)
kwargs["end_pos"] = seq_type(end_pos)
bounds_rect = self.draw_line(**kwargs)
self.assertEqual(surface.get_at((x, y)), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_line__invalid_start_pos_formats(self):
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"start_pos": None,
"end_pos": (2, 2),
"width": 1,
}
start_pos_fmts = (
(2,),
(2, 1, 0),
(2, "1"),
set([2, 1]),
dict(((2, 1),)),
)
for start_pos in start_pos_fmts:
kwargs["start_pos"] = start_pos
with self.assertRaises(TypeError):
bounds_rect = self.draw_line(**kwargs)
def test_line__invalid_end_pos_formats(self):
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"start_pos": (2, 2),
"end_pos": None,
"width": 1,
}
end_pos_fmts = (
(2,),
(2, 1, 0),
(2, "1"),
set([2, 1]),
dict(((2, 1),)),
)
for end_pos in end_pos_fmts:
kwargs["end_pos"] = end_pos
with self.assertRaises(TypeError):
bounds_rect = self.draw_line(**kwargs)
def test_line__valid_color_formats(self):
green_color = pygame.Color("green")
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
pos = (1, 1)
kwargs = {
"surface": surface,
"color": None,
"start_pos": pos,
"end_pos": (2, 1),
"width": 3,
}
greens = (
(0, 255, 0),
(0, 255, 0, 255),
surface.map_rgb(green_color),
green_color,
)
for color in greens:
surface.fill(surface_color)
kwargs["color"] = color
if isinstance(color, int):
expected_color = surface.unmap_rgb(color)
else:
expected_color = green_color
bounds_rect = self.draw_line(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_line__invalid_color_formats(self):
kwargs = {
"surface": pygame.Surface((4, 3)),
"color": None,
"start_pos": (1, 1),
"end_pos": (2, 1),
"width": 1,
}
for expected_color in (2.3, self):
kwargs["color"] = expected_color
with self.assertRaises(TypeError):
bounds_rect = self.draw_line(**kwargs)
def test_line__color(self):
pos = (0, 0)
for surface in self._create_surfaces():
for expected_color in self.COLORS:
self.draw_line(surface, expected_color, pos, (1, 0))
self.assertEqual(
surface.get_at(pos), expected_color, "pos={}".format(pos)
)
def todo_test_line__color_with_thickness(self):
self.fail()
def test_line__gaps(self):
expected_color = (255, 255, 255)
for surface in self._create_surfaces():
width = surface.get_width()
self.draw_line(surface, expected_color, (0, 0), (width - 1, 0))
for x in range(width):
pos = (x, 0)
self.assertEqual(
surface.get_at(pos), expected_color, "pos={}".format(pos)
)
def todo_test_line__gaps_with_thickness(self):
self.fail()
def test_line__bounding_rect(self):
if isinstance(self, PythonDrawTestCase):
self.skipTest("bounding rects not supported in draw_py.draw_line")
line_color = pygame.Color("red")
surf_color = pygame.Color("black")
width = height = 30
helper_rect = pygame.Rect((0, 0), (width, height))
for size in ((width + 5, height + 5), (width - 5, height - 5)):
surface = pygame.Surface(size, 0, 32)
surf_rect = surface.get_rect()
for pos in rect_corners_mids_and_center(surf_rect):
helper_rect.center = pos
for thickness in range(-1, 5):
for start, end in self._rect_lines(helper_rect):
surface.fill(surf_color)
bounding_rect = self.draw_line(
surface, line_color, start, end, thickness
)
if 0 < thickness:
expected_rect = create_bounding_rect(
surface, surf_color, start
)
else:
expected_rect = pygame.Rect(start, (0, 0))
self.assertEqual(
bounding_rect,
expected_rect,
"start={}, end={}, size={}, thickness={}".format(
start, end, size, thickness
),
)
def test_line__surface_clip(self):
surfw = surfh = 30
line_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
clip_rect = pygame.Rect((0, 0), (11, 11))
clip_rect.center = surface.get_rect().center
pos_rect = clip_rect.copy()
for thickness in (1, 3): # Test different line widths.
# Test centering the line along the clip rect's edge.
for center in rect_corners_mids_and_center(clip_rect):
pos_rect.center = center
surface.set_clip(None)
surface.fill(surface_color)
self.draw_line(
surface, line_color, pos_rect.midtop, pos_rect.midbottom, thickness
)
expected_pts = get_color_points(surface, line_color, clip_rect)
surface.fill(surface_color)
surface.set_clip(clip_rect)
self.draw_line(
surface, line_color, pos_rect.midtop, pos_rect.midbottom, thickness
)
surface.lock()
for pt in ((x, y) for x in range(surfw) for y in range(surfh)):
if pt in expected_pts:
expected_color = line_color
else:
expected_color = surface_color
self.assertEqual(surface.get_at(pt), expected_color, pt)
surface.unlock()
#
# This class inherits the general tests from LineMixin. It is also the class
# to add any draw_py.draw_line specific tests to.
# """
class DrawLineTest(LineMixin, DrawTestCase):
def test_line_endianness(self):
for depth in (24, 32):
surface = pygame.Surface((5, 3), 0, depth)
surface.fill(pygame.Color(0, 0, 0))
self.draw_line(surface, pygame.Color(255, 0, 0), (0, 1), (2, 1), 1)
self.assertGreater(surface.get_at((1, 1)).r, 0, "there should be red here")
surface.fill(pygame.Color(0, 0, 0))
self.draw_line(surface, pygame.Color(0, 0, 255), (0, 1), (2, 1), 1)
self.assertGreater(surface.get_at((1, 1)).b, 0, "there should be blue here")
def test_line(self):
self.surf_size = (320, 200)
self.surf = pygame.Surface(self.surf_size, pygame.SRCALPHA)
self.color = (1, 13, 24, 205)
drawn = draw.line(self.surf, self.color, (1, 0), (200, 0))
self.assertEqual(
drawn.right, 201, "end point arg should be (or at least was) inclusive"
)
for pt in test_utils.rect_area_pts(drawn):
self.assertEqual(self.surf.get_at(pt), self.color)
# And not where it shouldn't
for pt in test_utils.rect_outer_bounds(drawn):
self.assertNotEqual(self.surf.get_at(pt), self.color)
line_width = 2
offset = 5
a = (offset, offset)
b = (self.surf_size[0] - offset, a[1])
c = (a[0], self.surf_size[1] - offset)
d = (b[0], c[1])
e = (a[0] + offset, c[1])
f = (b[0], c[0] + 5)
lines = [
(a, d),
(b, c),
(c, b),
(d, a),
(a, b),
(b, a),
(a, c),
(c, a),
(a, e),
(e, a),
(a, f),
(f, a),
(a, a),
]
for p1, p2 in lines:
msg = "%s - %s" % (p1, p2)
if p1[0] <= p2[0]:
plow = p1
phigh = p2
else:
plow = p2
phigh = p1
self.surf.fill((0, 0, 0))
rec = draw.line(self.surf, (255, 255, 255), p1, p2, line_width)
xinc = yinc = 0
if abs(p1[0] - p2[0]) > abs(p1[1] - p2[1]):
yinc = 1
else:
xinc = 1
for i in range(line_width):
p = (p1[0] + xinc * i, p1[1] + yinc * i)
self.assertEqual(self.surf.get_at(p), (255, 255, 255), msg)
p = (p2[0] + xinc * i, p2[1] + yinc * i)
self.assertEqual(self.surf.get_at(p), (255, 255, 255), msg)
p = (plow[0] - 1, plow[1])
self.assertEqual(self.surf.get_at(p), (0, 0, 0), msg)
p = (plow[0] + xinc * line_width, plow[1] + yinc * line_width)
self.assertEqual(self.surf.get_at(p), (0, 0, 0), msg)
p = (phigh[0] + xinc * line_width, phigh[1] + yinc * line_width)
self.assertEqual(self.surf.get_at(p), (0, 0, 0), msg)
if p1[0] < p2[0]:
rx = p1[0]
else:
rx = p2[0]
if p1[1] < p2[1]:
ry = p1[1]
else:
ry = p2[1]
w = abs(p2[0] - p1[0]) + 1 + xinc * (line_width - 1)
h = abs(p2[1] - p1[1]) + 1 + yinc * (line_width - 1)
msg += ", %s" % (rec,)
self.assertEqual(rec, (rx, ry, w, h), msg)
def test_line_for_gaps(self):
width = 200
height = 200
surf = pygame.Surface((width, height), pygame.SRCALPHA)
def white_surrounded_pixels(x, y):
offsets = [(1, 0), (0, 1), (-1, 0), (0, -1)]
WHITE = (255, 255, 255, 255)
return len(
[1 for dx, dy in offsets if surf.get_at((x + dx, y + dy)) == WHITE]
)
def check_white_line(start, end):
surf.fill((0, 0, 0))
pygame.draw.line(surf, (255, 255, 255), start, end, 30)
BLACK = (0, 0, 0, 255)
for x in range(1, width - 1):
for y in range(1, height - 1):
if surf.get_at((x, y)) == BLACK:
self.assertTrue(white_surrounded_pixels(x, y) < 3)
check_white_line((50, 50), (140, 0))
check_white_line((50, 50), (0, 120))
check_white_line((50, 50), (199, 198))
"color": pygame.Color("red"),
"closed": 1,
"points": ((2, 2), (1, 1)),
"width": 1,
}
for name in ("points", "closed", "color", "surface"):
invalid_kwargs = dict(kwargs)
invalid_kwargs.pop(name)
with self.assertRaises(TypeError):
bounds_rect = self.draw_lines(**invalid_kwargs)
def test_lines__arg_invalid_types(self):
surface = pygame.Surface((2, 2))
color = pygame.Color("blue")
closed = 0
points = ((1, 2), (2, 1))
with self.assertRaises(TypeError):
bounds_rect = self.draw_lines(surface, color, closed, points, "1")
with self.assertRaises(TypeError):
bounds_rect = self.draw_lines(surface, color, closed, (1, 2, 3))
with self.assertRaises(TypeError):
bounds_rect = self.draw_lines(surface, color, InvalidBool(), points)
with self.assertRaises(TypeError):
bounds_rect = self.draw_lines(surface, 2.3, closed, points)
with self.assertRaises(TypeError):
bounds_rect = self.draw_lines((1, 2, 3, 4), color, closed, points)
def test_lines__kwarg_invalid_types(self):
valid_kwargs = {
"surface": pygame.Surface((3, 3)),
"color": pygame.Color("green"),
"closed": False,
"points": ((1, 2), (2, 1)),
"width": 1,
}
invalid_kwargs = {
"surface": pygame.Surface,
"color": 2.3,
"closed": InvalidBool(),
"points": (0, 0, 0),
"width": 1.2,
}
for kwarg in ("surface", "color", "closed", "points", "width"):
kwargs = dict(valid_kwargs)
kwargs[kwarg] = invalid_kwargs[kwarg]
with self.assertRaises(TypeError):
bounds_rect = self.draw_lines(**kwargs)
def test_lines__kwarg_invalid_name(self):
surface = pygame.Surface((2, 3))
color = pygame.Color("cyan")
closed = 1
points = ((1, 2), (2, 1))
kwargs_list = [
{
"surface": surface,
"color": color,
"closed": closed,
"points": points,
"width": 1,
"invalid": 1,
},
{
"surface": surface,
"color": color,
"closed": closed,
"points": points,
"invalid": 1,
},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_lines(**kwargs)
def test_lines__args_and_kwargs(self):
surface = pygame.Surface((3, 2))
color = (255, 255, 0, 0)
closed = 0
points = ((1, 2), (2, 1))
width = 1
kwargs = {
"surface": surface,
"color": color,
"closed": closed,
"points": points,
"width": width,
}
for name in ("surface", "color", "closed", "points", "width"):
kwargs.pop(name)
if "surface" == name:
bounds_rect = self.draw_lines(surface, **kwargs)
elif "color" == name:
bounds_rect = self.draw_lines(surface, color, **kwargs)
elif "closed" == name:
bounds_rect = self.draw_lines(surface, color, closed, **kwargs)
elif "points" == name:
bounds_rect = self.draw_lines(surface, color, closed, points, **kwargs)
else:
bounds_rect = self.draw_lines(
surface, color, closed, points, width, **kwargs
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_lines__valid_width_values(self):
line_color = pygame.Color("yellow")
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
pos = (1, 1)
kwargs = {
"surface": surface,
"color": line_color,
"closed": False,
"points": (pos, (2, 1)),
"width": None,
}
for width in (-100, -10, -1, 0, 1, 10, 100):
surface.fill(surface_color)
kwargs["width"] = width
expected_color = line_color if width > 0 else surface_color
bounds_rect = self.draw_lines(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_lines__valid_points_format(self):
expected_color = (10, 20, 30, 255)
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
kwargs = {
"surface": surface,
"color": expected_color,
"closed": False,
"points": None,
"width": 1,
}
point_types = (
(tuple, tuple, tuple, tuple),
(list, list, list, list),
(Vector2, Vector2, Vector2, Vector2),
(list, Vector2, tuple, Vector2),
)
point_values = (
((1, 1), (2, 1), (2, 2), (1, 2)),
((1, 1), (2.2, 1), (2.1, 2.2), (1, 2.1)),
)
seq_types = (tuple, list)
for point_type in point_types:
for values in point_values:
check_pos = values[0]
points = [point_type[i](pt) for i, pt in enumerate(values)]
for seq_type in seq_types:
surface.fill(surface_color)
kwargs["points"] = seq_type(points)
bounds_rect = self.draw_lines(**kwargs)
self.assertEqual(surface.get_at(check_pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_lines__invalid_points_formats(self):
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"closed": False,
"points": None,
"width": 1,
}
points_fmts = (
((1, 1), (2,)),
((1, 1), (2, 2, 2)),
((1, 1), (2, "2")),
((1, 1), set([2, 3])),
((1, 1), dict(((2, 2), (3, 3)))),
set(((1, 1), (1, 2))),
dict(((1, 1), (4, 4))),
)
for points in points_fmts:
kwargs["points"] = points
with self.assertRaises(TypeError):
bounds_rect = self.draw_lines(**kwargs)
def test_lines__invalid_points_values(self):
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"closed": False,
"points": None,
"width": 1,
}
for points in ([], ((1, 1),)):
for seq_type in (tuple, list):
kwargs["points"] = seq_type(points)
with self.assertRaises(ValueError):
bounds_rect = self.draw_lines(**kwargs)
def test_lines__valid_closed_values(self):
line_color = pygame.Color("blue")
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
pos = (1, 2)
kwargs = {
"surface": surface,
"color": line_color,
"closed": None,
"points": ((1, 1), (3, 1), (3, 3), (1, 3)),
"width": 1,
}
true_values = (-7, 1, 10, "2", 3.1, (4,), [5], True)
false_values = (None, "", 0, (), [], False)
for closed in true_values + false_values:
surface.fill(surface_color)
kwargs["closed"] = closed
expected_color = line_color if closed else surface_color
bounds_rect = self.draw_lines(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_lines__valid_color_formats(self):
green_color = pygame.Color("green")
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
pos = (1, 1)
kwargs = {
"surface": surface,
"color": None,
"closed": False,
"points": (pos, (2, 1)),
"width": 3,
}
greens = (
(0, 255, 0),
(0, 255, 0, 255),
surface.map_rgb(green_color),
green_color,
)
for color in greens:
surface.fill(surface_color)
kwargs["color"] = color
if isinstance(color, int):
expected_color = surface.unmap_rgb(color)
else:
expected_color = green_color
bounds_rect = self.draw_lines(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_lines__invalid_color_formats(self):
kwargs = {
"surface": pygame.Surface((4, 3)),
"color": None,
"closed": False,
"points": ((1, 1), (1, 2)),
"width": 1,
}
for expected_color in (2.3, self):
kwargs["color"] = expected_color
with self.assertRaises(TypeError):
bounds_rect = self.draw_lines(**kwargs)
def test_lines__color(self):
for surface in self._create_surfaces():
for expected_color in self.COLORS:
self.draw_lines(surface, expected_color, True, corners(surface))
for pos, color in border_pos_and_color(surface):
self.assertEqual(color, expected_color, "pos={}".format(pos))
def todo_test_lines__color_with_thickness(self):
self.fail()
def test_lines__gaps(self):
expected_color = (255, 255, 255)
for surface in self._create_surfaces():
self.draw_lines(surface, expected_color, True, corners(surface))
for pos, color in border_pos_and_color(surface):
self.assertEqual(color, expected_color, "pos={}".format(pos))
def todo_test_lines__gaps_with_thickness(self):
self.fail()
def test_lines__bounding_rect(self):
line_color = pygame.Color("red")
surf_color = pygame.Color("black")
width = height = 30
pos_rect = pygame.Rect((0, 0), (width, height))
for size in ((width + 5, height + 5), (width - 5, height - 5)):
surface = pygame.Surface(size, 0, 32)
surf_rect = surface.get_rect()
for pos in rect_corners_mids_and_center(surf_rect):
pos_rect.center = pos
pts = (pos_rect.midleft, pos_rect.midtop, pos_rect.midright)
pos = pts[0]
for thickness in range(-1, 5):
for closed in (True, False):
surface.fill(surf_color)
bounding_rect = self.draw_lines(
surface, line_color, closed, pts, thickness
)
if 0 < thickness:
expected_rect = create_bounding_rect(
surface, surf_color, pos
)
else:
expected_rect = pygame.Rect(pos, (0, 0))
self.assertEqual(bounding_rect, expected_rect)
def test_lines__surface_clip(self):
surfw = surfh = 30
line_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
clip_rect = pygame.Rect((0, 0), (11, 11))
clip_rect.center = surface.get_rect().center
pos_rect = clip_rect.copy()
# Test centering the pos_rect along the clip rect's edge to allow for
for center in rect_corners_mids_and_center(clip_rect):
pos_rect.center = center
pts = (pos_rect.midtop, pos_rect.center, pos_rect.midbottom)
for closed in (True, False): # Test closed and not closed.
for thickness in (1, 3): # Test different line widths.
# Get the expected points by drawing the lines without the
# clip area set.
surface.set_clip(None)
surface.fill(surface_color)
self.draw_lines(surface, line_color, closed, pts, thickness)
expected_pts = get_color_points(surface, line_color, clip_rect)
# Clear the surface and set the clip area. Redraw the lines
# and check that only the clip area is modified.
surface.fill(surface_color)
surface.set_clip(clip_rect)
self.draw_lines(surface, line_color, closed, pts, thickness)
surface.lock() # For possible speed up.
# Check all the surface points to ensure only the
# expected_pts are the line_color.
for pt in ((x, y) for x in range(surfw) for y in range(surfh)):
if pt in expected_pts:
expected_color = line_color
else:
expected_color = surface_color
self.assertEqual(surface.get_at(pt), expected_color, pt)
surface.unlock()
# Commented out to avoid cluttering the test output. Add back in if draw_py
# ever fully supports drawing lines.
# class PythonDrawLinesTest(LinesMixin, PythonDrawTestCase):
# """Test draw_py module function lines.
#
# This class inherits the general tests from LinesMixin. It is also the
# class to add any draw_py.draw_lines specific tests to.
# """
class DrawLinesTest(LinesMixin, DrawTestCase):
### AALine Testing ############################################################
class AALineMixin(BaseLineMixin):
def test_aaline__args(self):
bounds_rect = self.draw_aaline(
pygame.Surface((3, 3)), (0, 10, 0, 50), (0, 0), (1, 1), 1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aaline__args_without_blend(self):
bounds_rect = self.draw_aaline(
pygame.Surface((2, 2)), (0, 0, 0, 50), (0, 0), (2, 2)
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aaline__kwargs(self):
surface = pygame.Surface((4, 4))
color = pygame.Color("yellow")
start_pos = (1, 1)
end_pos = (2, 2)
kwargs_list = [
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"blend": 1,
},
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
},
]
for kwargs in kwargs_list:
bounds_rect = self.draw_aaline(**kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aaline__kwargs_order_independent(self):
bounds_rect = self.draw_aaline(
start_pos=(1, 2),
end_pos=(2, 1),
blend=1,
color=(10, 20, 30),
surface=pygame.Surface((3, 2)),
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aaline__args_missing(self):
surface = pygame.Surface((1, 1))
color = pygame.Color("blue")
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline(surface, color, (0, 0))
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline(surface, color)
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline(surface)
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline()
def test_aaline__kwargs_missing(self):
kwargs = {
"surface": pygame.Surface((3, 2)),
"color": pygame.Color("red"),
"start_pos": (2, 1),
"end_pos": (2, 2),
"blend": 1,
}
for name in ("end_pos", "start_pos", "color", "surface"):
invalid_kwargs = dict(kwargs)
invalid_kwargs.pop(name) # Pop from a copy.
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline(**invalid_kwargs)
def test_aaline__arg_invalid_types(self):
surface = pygame.Surface((2, 2))
color = pygame.Color("blue")
start_pos = (0, 1)
end_pos = (1, 2)
with self.assertRaises(TypeError):
# Invalid blend.
bounds_rect = self.draw_aaline(surface, color, start_pos, end_pos, "1")
with self.assertRaises(TypeError):
# Invalid end_pos.
bounds_rect = self.draw_aaline(surface, color, start_pos, (1, 2, 3))
with self.assertRaises(TypeError):
# Invalid start_pos.
bounds_rect = self.draw_aaline(surface, color, (1,), end_pos)
with self.assertRaises(ValueError):
# Invalid color.
bounds_rect = self.draw_aaline(surface, "invalid-color", start_pos, end_pos)
with self.assertRaises(TypeError):
# Invalid surface.
bounds_rect = self.draw_aaline((1, 2, 3, 4), color, start_pos, end_pos)
def test_aaline__kwarg_invalid_types(self):
surface = pygame.Surface((3, 3))
color = pygame.Color("green")
start_pos = (1, 0)
end_pos = (2, 0)
blend = 1
kwargs_list = [
{
"surface": pygame.Surface, # Invalid surface.
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"blend": blend,
},
{
"surface": surface,
"color": 2.3, # Invalid color.
"start_pos": start_pos,
"end_pos": end_pos,
"blend": blend,
},
{
"surface": surface,
"color": color,
"start_pos": (0, 0, 0), # Invalid start_pos.
"end_pos": end_pos,
"blend": blend,
},
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": (0,), # Invalid end_pos.
"blend": blend,
},
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"blend": 1.2,
},
] # Invalid blend.
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline(**kwargs)
def test_aaline__kwarg_invalid_name(self):
surface = pygame.Surface((2, 3))
color = pygame.Color("cyan")
start_pos = (1, 1)
end_pos = (2, 0)
kwargs_list = [
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"blend": 1,
"invalid": 1,
},
{
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"invalid": 1,
},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline(**kwargs)
def test_aaline__args_and_kwargs(self):
surface = pygame.Surface((3, 2))
color = (255, 255, 0, 0)
start_pos = (0, 1)
end_pos = (1, 2)
blend = 0
kwargs = {
"surface": surface,
"color": color,
"start_pos": start_pos,
"end_pos": end_pos,
"blend": blend,
}
for name in ("surface", "color", "start_pos", "end_pos", "blend"):
kwargs.pop(name)
if "surface" == name:
bounds_rect = self.draw_aaline(surface, **kwargs)
elif "color" == name:
bounds_rect = self.draw_aaline(surface, color, **kwargs)
elif "start_pos" == name:
bounds_rect = self.draw_aaline(surface, color, start_pos, **kwargs)
elif "end_pos" == name:
bounds_rect = self.draw_aaline(
surface, color, start_pos, end_pos, **kwargs
)
else:
bounds_rect = self.draw_aaline(
surface, color, start_pos, end_pos, blend, **kwargs
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aaline__valid_blend_values(self):
expected_color = pygame.Color("yellow")
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
pos = (2, 1)
kwargs = {
"surface": surface,
"color": expected_color,
"start_pos": pos,
"end_pos": (2, 2),
"blend": None,
}
for blend in (-10, -2, -1, 0, 1, 2, 10):
surface.fill(surface_color) # Clear for each test.
kwargs["blend"] = blend
bounds_rect = self.draw_aaline(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aaline__valid_start_pos_formats(self):
expected_color = pygame.Color("red")
surface_color = pygame.Color("black")
surface = pygame.Surface((4, 4))
kwargs = {
"surface": surface,
"color": expected_color,
"start_pos": None,
"end_pos": (2, 2),
"blend": 0,
}
x, y = 2, 1 # start position
positions = ((x, y), (x + 0.01, y), (x, y + 0.01), (x + 0.01, y + 0.01))
for start_pos in positions:
for seq_type in (tuple, list, Vector2):
surface.fill(surface_color) # Clear for each test.
kwargs["start_pos"] = seq_type(start_pos)
bounds_rect = self.draw_aaline(**kwargs)
color = surface.get_at((x, y))
for i, sub_color in enumerate(expected_color):
# The color could be slightly off the expected color due to
# any fractional position arguments.
self.assertGreaterEqual(color[i] + 5, sub_color, start_pos)
self.assertIsInstance(bounds_rect, pygame.Rect, start_pos)
def test_aaline__valid_end_pos_formats(self):
expected_color = pygame.Color("red")
surface_color = pygame.Color("black")
surface = pygame.Surface((4, 4))
kwargs = {
"surface": surface,
"color": expected_color,
"start_pos": (2, 1),
"end_pos": None,
"blend": 0,
}
x, y = 2, 2 # end position
positions = ((x, y), (x + 0.02, y), (x, y + 0.02), (x + 0.02, y + 0.02))
for end_pos in positions:
for seq_type in (tuple, list, Vector2):
surface.fill(surface_color) # Clear for each test.
kwargs["end_pos"] = seq_type(end_pos)
bounds_rect = self.draw_aaline(**kwargs)
color = surface.get_at((x, y))
for i, sub_color in enumerate(expected_color):
# The color could be slightly off the expected color due to
# any fractional position arguments.
self.assertGreaterEqual(color[i] + 15, sub_color, end_pos)
self.assertIsInstance(bounds_rect, pygame.Rect, end_pos)
def test_aaline__invalid_start_pos_formats(self):
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"start_pos": None,
"end_pos": (2, 2),
"blend": 0,
}
start_pos_fmts = (
(2,), # Too few coords.
(2, 1, 0), # Too many coords.
(2, "1"), # Wrong type.
set([2, 1]), # Wrong type.
dict(((2, 1),)),
) # Wrong type.
for start_pos in start_pos_fmts:
kwargs["start_pos"] = start_pos
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline(**kwargs)
def test_aaline__invalid_end_pos_formats(self):
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"start_pos": (2, 2),
"end_pos": None,
"blend": 0,
}
end_pos_fmts = (
(2,), # Too few coords.
(2, 1, 0), # Too many coords.
(2, "1"), # Wrong type.
set([2, 1]), # Wrong type.
dict(((2, 1),)),
) # Wrong type.
for end_pos in end_pos_fmts:
kwargs["end_pos"] = end_pos
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline(**kwargs)
def test_aaline__valid_color_formats(self):
green_color = pygame.Color("green")
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
pos = (1, 1)
kwargs = {
"surface": surface,
"color": None,
"start_pos": pos,
"end_pos": (2, 1),
"blend": 0,
}
greens = (
(0, 255, 0),
(0, 255, 0, 255),
surface.map_rgb(green_color),
green_color,
)
for color in greens:
surface.fill(surface_color) # Clear for each test.
kwargs["color"] = color
if isinstance(color, int):
expected_color = surface.unmap_rgb(color)
else:
expected_color = green_color
bounds_rect = self.draw_aaline(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aaline__invalid_color_formats(self):
kwargs = {
"surface": pygame.Surface((4, 3)),
"color": None,
"start_pos": (1, 1),
"end_pos": (2, 1),
"blend": 0,
}
for expected_color in (2.3, self):
kwargs["color"] = expected_color
with self.assertRaises(TypeError):
bounds_rect = self.draw_aaline(**kwargs)
def test_aaline__color(self):
pos = (0, 0)
for surface in self._create_surfaces():
for expected_color in self.COLORS:
self.draw_aaline(surface, expected_color, pos, (1, 0))
self.assertEqual(
surface.get_at(pos), expected_color, "pos={}".format(pos)
)
def test_aaline__gaps(self):
expected_color = (255, 255, 255)
for surface in self._create_surfaces():
width = surface.get_width()
self.draw_aaline(surface, expected_color, (0, 0), (width - 1, 0))
for x in range(width):
pos = (x, 0)
self.assertEqual(
surface.get_at(pos), expected_color, "pos={}".format(pos)
)
def test_aaline__bounding_rect(self):
line_color = pygame.Color("red")
surf_color = pygame.Color("blue")
width = height = 30
# Using a rect to help manage where the lines are drawn.
helper_rect = pygame.Rect((0, 0), (width, height))
# Testing surfaces of different sizes. One larger than the helper_rect
# and one smaller (to test lines that span the surface).
for size in ((width + 5, height + 5), (width - 5, height - 5)):
surface = pygame.Surface(size, 0, 32)
surf_rect = surface.get_rect()
# Move the helper rect to different positions to test line
# endpoints on and off the surface.
for pos in rect_corners_mids_and_center(surf_rect):
helper_rect.center = pos
for blend in (False, True): # Test non-blending and blending.
for start, end in self._rect_lines(helper_rect):
surface.fill(surf_color) # Clear for each test.
bounding_rect = self.draw_aaline(
surface, line_color, start, end, blend
)
# Calculating the expected_rect after the line is
# drawn (it uses what is actually drawn).
expected_rect = create_bounding_rect(surface, surf_color, start)
self.assertEqual(bounding_rect, expected_rect)
def test_aaline__surface_clip(self):
surfw = surfh = 30
aaline_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
clip_rect = pygame.Rect((0, 0), (11, 11))
clip_rect.center = surface.get_rect().center
pos_rect = clip_rect.copy() # Manages the aaline's pos.
# drawing the aaline over the clip_rect's bounds.
for center in rect_corners_mids_and_center(clip_rect):
pos_rect.center = center
for blend in (0, 1):
surface.set_clip(None)
surface.fill(surface_color)
self.draw_aaline(
surface, aaline_color, pos_rect.midtop, pos_rect.midbottom, blend
)
expected_pts = get_color_points(
surface, surface_color, clip_rect, False
)
surface.fill(surface_color)
surface.set_clip(clip_rect)
self.draw_aaline(
surface, aaline_color, pos_rect.midtop, pos_rect.midbottom, blend
)
surface.lock()
for pt in ((x, y) for x in range(surfw) for y in range(surfh)):
if pt in expected_pts:
self.assertNotEqual(surface.get_at(pt), surface_color, pt)
else:
self.assertEqual(surface.get_at(pt), surface_color, pt)
surface.unlock()
#
# This class inherits the general tests from AALineMixin. It is also the
# class to add any draw_py.draw_aaline specific tests to.
# """
class DrawAALineTest(AALineMixin, DrawTestCase):
def test_aaline_endianness(self):
for depth in (24, 32):
surface = pygame.Surface((5, 3), 0, depth)
surface.fill(pygame.Color(0, 0, 0))
self.draw_aaline(surface, pygame.Color(255, 0, 0), (0, 1), (2, 1), 1)
self.assertGreater(surface.get_at((1, 1)).r, 0, "there should be red here")
surface.fill(pygame.Color(0, 0, 0))
self.draw_aaline(surface, pygame.Color(0, 0, 255), (0, 1), (2, 1), 1)
self.assertGreater(surface.get_at((1, 1)).b, 0, "there should be blue here")
def _check_antialiasing(
self, from_point, to_point, should, check_points, set_endpoints=True
):
if set_endpoints:
should[from_point] = should[to_point] = FG_GREEN
def check_one_direction(from_point, to_point, should):
self.draw_aaline(self.surface, FG_GREEN, from_point, to_point, True)
for pt in check_points:
color = should.get(pt, BG_RED)
if PY3:
with self.subTest(from_pt=from_point, pt=pt, to=to_point):
self.assertEqual(self.surface.get_at(pt), color)
else:
self.assertEqual(self.surface.get_at(pt), color)
draw.rect(self.surface, BG_RED, (0, 0, 10, 10), 0)
on(from_point, to_point, should)
if from_point != to_point:
check_one_direction(to_point, from_point, should)
def test_short_non_antialiased_lines(self):
if isinstance(self, DrawTestCase):
self.skipTest("not working with draw.aaline")
self.surface = pygame.Surface((10, 10))
draw.rect(self.surface, BG_RED, (0, 0, 10, 10), 0)
check_points = [(i, j) for i in range(3, 8) for j in range(3, 8)]
def check_both_directions(from_pt, to_pt, other_points):
should = {pt: FG_GREEN for pt in other_points}
self._check_antialiasing(from_pt, to_pt, should, check_points)
check_both_directions((5, 5), (5, 5), [])
check_both_directions((4, 7), (5, 7), [])
check_both_directions((5, 4), (7, 4), [(6, 4)])
check_both_directions((5, 5), (5, 6), [])
check_both_directions((6, 4), (6, 6), [(6, 5)])
check_both_directions((5, 5), (6, 6), [])
check_both_directions((5, 5), (7, 7), [(6, 6)])
check_both_directions((5, 6), (6, 5), [])
check_both_directions((6, 4), (4, 6), [(5, 5)])
def test_short_line_anti_aliasing(self):
if isinstance(self, DrawTestCase):
self.skipTest("not working with draw.aaline")
self.surface = pygame.Surface((10, 10))
draw.rect(self.surface, BG_RED, (0, 0, 10, 10), 0)
check_points = [(i, j) for i in range(3, 8) for j in range(3, 8)]
def check_both_directions(from_pt, to_pt, should):
self._check_antialiasing(from_pt, to_pt, should, check_points)
brown = (127, 127, 0)
check_both_directions((4, 4), (6, 5), {(5, 4): brown, (5, 5): brown})
check_both_directions((4, 5), (6, 4), {(5, 4): brown, (5, 5): brown})
check_both_directions((4, 4), (5, 6), {(4, 5): brown, (5, 5): brown})
check_both_directions((5, 4), (4, 6), {(4, 5): brown, (5, 5): brown})
check_points = [(i, j) for i in range(2, 9) for j in range(2, 9)]
reddish = (191, 63, 0)
greenish = (63, 191, 0)
should = {
(4, 3): greenish,
(5, 3): brown,
(6, 3): reddish,
(4, 4): reddish,
(5, 4): brown,
(6, 4): greenish,
}
check_both_directions((3, 3), (7, 4), should)
should = {
(4, 3): reddish,
(5, 3): brown,
(6, 3): greenish,
(4, 4): greenish,
(5, 4): brown,
(6, 4): reddish,
}
check_both_directions((3, 4), (7, 3), should)
should = {
(4, 4): greenish,
(4, 5): brown,
(4, 6): reddish,
(5, 4): reddish,
(5, 5): brown,
(5, 6): greenish,
}
check_both_directions((4, 3), (5, 7), should)
should = {
(4, 4): reddish,
(4, 5): brown,
(4, 6): greenish,
(5, 4): greenish,
(5, 5): brown,
(5, 6): reddish,
}
check_both_directions((5, 3), (4, 7), should)
def test_anti_aliasing_float_coordinates(self):
if isinstance(self, DrawTestCase):
self.skipTest("not working with draw.aaline")
self.surface = pygame.Surface((10, 10))
draw.rect(self.surface, BG_RED, (0, 0, 10, 10), 0)
check_points = [(i, j) for i in range(5) for j in range(5)]
brown = (127, 127, 0)
expected = {(1, 2): FG_GREEN}
self._check_antialiasing(
(1.5, 2), (1.5, 2), expected, check_points, set_endpoints=False
)
expected = {(2, 2): FG_GREEN}
self._check_antialiasing(
(2.5, 2.7), (2.5, 2.7), expected, check_points, set_endpoints=False
)
expected = {(1, 2): brown, (2, 2): FG_GREEN}
self._check_antialiasing(
(1.5, 2), (2, 2), expected, check_points, set_endpoints=False
)
expected = {(1, 2): brown, (2, 2): FG_GREEN, (3, 2): brown}
self._check_antialiasing(
(1.5, 2), (2.5, 2), expected, check_points, set_endpoints=False
)
expected = {(2, 2): brown, (1, 2): FG_GREEN}
self._check_antialiasing(
(1, 2), (1.5, 2), expected, check_points, set_endpoints=False
)
expected = {(1, 2): brown, (2, 2): (63, 191, 0)}
self._check_antialiasing(
(1.5, 2), (1.75, 2), expected, check_points, set_endpoints=False
)
expected = {(x, y): brown for x in range(2, 5) for y in (1, 2)}
self._check_antialiasing(
(2, 1.5), (4, 1.5), expected, check_points, set_endpoints=False
)
expected = {(2, 1): brown, (2, 2): FG_GREEN, (2, 3): brown}
self._check_antialiasing(
(2, 1.5), (2, 2.5), expected, check_points, set_endpoints=False
)
expected = {(2, 1): brown, (2, 2): (63, 191, 0)}
self._check_antialiasing(
(2, 1.5), (2, 1.75), expected, check_points, set_endpoints=False
)
expected = {(x, y): brown for x in (1, 2) for y in range(2, 5)}
self._check_antialiasing(
(1.5, 2), (1.5, 4), expected, check_points, set_endpoints=False
)
expected = {(1, 1): brown, (2, 2): FG_GREEN, (3, 3): brown}
self._check_antialiasing(
(1.5, 1.5), (2.5, 2.5), expected, check_points, set_endpoints=False
)
expected = {(3, 1): brown, (2, 2): FG_GREEN, (1, 3): brown}
self._check_antialiasing(
(2.5, 1.5), (1.5, 2.5), expected, check_points, set_endpoints=False
)
expected = {(2, 1): brown, (2, 2): brown, (3, 2): brown, (3, 3): brown}
self._check_antialiasing(
(2, 1.5), (3, 2.5), expected, check_points, set_endpoints=False
)
reddish = (191, 63, 0)
greenish = (63, 191, 0)
expected = {
(2, 1): greenish,
(2, 2): reddish,
(3, 2): greenish,
(3, 3): reddish,
(4, 3): greenish,
(4, 4): reddish,
}
self._check_antialiasing(
(2, 1.25), (4, 3.25), expected, check_points, set_endpoints=False
)
def test_anti_aliasing_at_and_outside_the_border(self):
if isinstance(self, DrawTestCase):
self.skipTest("not working with draw.aaline")
self.surface = pygame.Surface((10, 10))
draw.rect(self.surface, BG_RED, (0, 0, 10, 10), 0)
check_points = [(i, j) for i in range(10) for j in range(10)]
reddish = (191, 63, 0)
brown = (127, 127, 0)
greenish = (63, 191, 0)
from_point, to_point = (3, 3), (7, 4)
should = {
(4, 3): greenish,
(5, 3): brown,
(6, 3): reddish,
(4, 4): reddish,
(5, 4): brown,
(6, 4): greenish,
}
for dx, dy in (
(-4, 0),
(4, 0),
(0, -5),
(0, -4),
(0, -3),
(0, 5),
(0, 6),
(0, 7),
(-4, -4),
(-4, -3),
(-3, -4),
):
first = from_point[0] + dx, from_point[1] + dy
second = to_point[0] + dx, to_point[1] + dy
expected = {(x + dx, y + dy): color for (x, y), color in should.items()}
self._check_antialiasing(first, second, expected, check_points)
bounds_rect = self.draw_aalines()
def test_aalines__kwargs_missing(self):
kwargs = {
"surface": pygame.Surface((3, 2)),
"color": pygame.Color("red"),
"closed": 1,
"points": ((2, 2), (1, 1)),
"blend": 1,
}
for name in ("points", "closed", "color", "surface"):
invalid_kwargs = dict(kwargs)
invalid_kwargs.pop(name)
with self.assertRaises(TypeError):
bounds_rect = self.draw_aalines(**invalid_kwargs)
def test_aalines__arg_invalid_types(self):
surface = pygame.Surface((2, 2))
color = pygame.Color("blue")
closed = 0
points = ((1, 2), (2, 1))
with self.assertRaises(TypeError):
bounds_rect = self.draw_aalines(surface, color, closed, points, "1")
with self.assertRaises(TypeError):
bounds_rect = self.draw_aalines(surface, color, closed, (1, 2, 3))
with self.assertRaises(TypeError):
bounds_rect = self.draw_aalines(surface, color, InvalidBool(), points)
with self.assertRaises(TypeError):
bounds_rect = self.draw_aalines(surface, 2.3, closed, points)
with self.assertRaises(TypeError):
bounds_rect = self.draw_aalines((1, 2, 3, 4), color, closed, points)
def test_aalines__kwarg_invalid_types(self):
valid_kwargs = {
"surface": pygame.Surface((3, 3)),
"color": pygame.Color("green"),
"closed": False,
"points": ((1, 2), (2, 1)),
"blend": 1,
}
invalid_kwargs = {
"surface": pygame.Surface,
"color": 2.3,
"closed": InvalidBool(),
"points": (0, 0, 0),
"blend": 1.2,
}
for kwarg in ("surface", "color", "closed", "points", "blend"):
kwargs = dict(valid_kwargs)
kwargs[kwarg] = invalid_kwargs[kwarg]
with self.assertRaises(TypeError):
bounds_rect = self.draw_aalines(**kwargs)
def test_aalines__kwarg_invalid_name(self):
surface = pygame.Surface((2, 3))
color = pygame.Color("cyan")
closed = 1
points = ((1, 2), (2, 1))
kwargs_list = [
{
"surface": surface,
"color": color,
"closed": closed,
"points": points,
"blend": 1,
"invalid": 1,
},
{
"surface": surface,
"color": color,
"closed": closed,
"points": points,
"invalid": 1,
},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_aalines(**kwargs)
def test_aalines__args_and_kwargs(self):
surface = pygame.Surface((3, 2))
color = (255, 255, 0, 0)
closed = 0
points = ((1, 2), (2, 1))
blend = 1
kwargs = {
"surface": surface,
"color": color,
"closed": closed,
"points": points,
"blend": blend,
}
for name in ("surface", "color", "closed", "points", "blend"):
kwargs.pop(name)
if "surface" == name:
bounds_rect = self.draw_aalines(surface, **kwargs)
elif "color" == name:
bounds_rect = self.draw_aalines(surface, color, **kwargs)
elif "closed" == name:
bounds_rect = self.draw_aalines(surface, color, closed, **kwargs)
elif "points" == name:
bounds_rect = self.draw_aalines(
surface, color, closed, points, **kwargs
)
else:
bounds_rect = self.draw_aalines(
surface, color, closed, points, blend, **kwargs
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aalines__valid_blend_values(self):
expected_color = pygame.Color("yellow")
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
pos = (1, 1)
kwargs = {
"surface": surface,
"color": expected_color,
"closed": False,
"points": (pos, (1, 3)),
"blend": None,
}
for blend in (-10, -2, -1, 0, 1, 2, 10):
surface.fill(surface_color)
kwargs["blend"] = blend
bounds_rect = self.draw_aalines(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color, blend)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aalines__valid_points_format(self):
expected_color = (10, 20, 30, 255)
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
kwargs = {
"surface": surface,
"color": expected_color,
"closed": False,
"points": None,
"blend": 0,
}
point_types = (
(tuple, tuple, tuple, tuple),
(list, list, list, list),
(Vector2, Vector2, Vector2, Vector2),
(list, Vector2, tuple, Vector2),
)
point_values = (
((1, 1), (2, 1), (2, 2), (1, 2)),
((1, 1), (2.2, 1), (2.1, 2.2), (1, 2.1)),
)
seq_types = (tuple, list)
for point_type in point_types:
for values in point_values:
check_pos = values[0]
points = [point_type[i](pt) for i, pt in enumerate(values)]
for seq_type in seq_types:
surface.fill(surface_color)
kwargs["points"] = seq_type(points)
bounds_rect = self.draw_aalines(**kwargs)
self.assertEqual(surface.get_at(check_pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aalines__invalid_points_formats(self):
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"closed": False,
"points": None,
"blend": 1,
}
points_fmts = (
((1, 1), (2,)),
((1, 1), (2, 2, 2)),
((1, 1), (2, "2")),
((1, 1), set([2, 3])),
((1, 1), dict(((2, 2), (3, 3)))),
set(((1, 1), (1, 2))),
dict(((1, 1), (4, 4))),
)
for points in points_fmts:
kwargs["points"] = points
with self.assertRaises(TypeError):
bounds_rect = self.draw_aalines(**kwargs)
def test_aalines__invalid_points_values(self):
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"closed": False,
"points": None,
"blend": 1,
}
for points in ([], ((1, 1),)):
for seq_type in (tuple, list):
kwargs["points"] = seq_type(points)
with self.assertRaises(ValueError):
bounds_rect = self.draw_aalines(**kwargs)
def test_aalines__valid_closed_values(self):
line_color = pygame.Color("blue")
surface_color = pygame.Color("white")
surface = pygame.Surface((5, 5))
pos = (1, 3)
kwargs = {
"surface": surface,
"color": line_color,
"closed": None,
"points": ((1, 1), (4, 1), (4, 4), (1, 4)),
"blend": 0,
}
true_values = (-7, 1, 10, "2", 3.1, (4,), [5], True)
false_values = (None, "", 0, (), [], False)
for closed in true_values + false_values:
surface.fill(surface_color)
kwargs["closed"] = closed
expected_color = line_color if closed else surface_color
bounds_rect = self.draw_aalines(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aalines__valid_color_formats(self):
green_color = pygame.Color("green")
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
pos = (1, 1)
kwargs = {
"surface": surface,
"color": None,
"closed": False,
"points": (pos, (2, 1)),
"blend": 0,
}
greens = (
(0, 255, 0),
(0, 255, 0, 255),
surface.map_rgb(green_color),
green_color,
)
for color in greens:
surface.fill(surface_color)
kwargs["color"] = color
if isinstance(color, int):
expected_color = surface.unmap_rgb(color)
else:
expected_color = green_color
bounds_rect = self.draw_aalines(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_aalines__invalid_color_formats(self):
kwargs = {
"surface": pygame.Surface((4, 3)),
"color": None,
"closed": False,
"points": ((1, 1), (1, 2)),
"blend": 0,
}
for expected_color in (2.3, self):
kwargs["color"] = expected_color
with self.assertRaises(TypeError):
bounds_rect = self.draw_aalines(**kwargs)
def test_aalines__color(self):
for surface in self._create_surfaces():
for expected_color in self.COLORS:
self.draw_aalines(surface, expected_color, True, corners(surface))
for pos, color in border_pos_and_color(surface):
self.assertEqual(color, expected_color, "pos={}".format(pos))
def test_aalines__gaps(self):
expected_color = (255, 255, 255)
for surface in self._create_surfaces():
self.draw_aalines(surface, expected_color, True, corners(surface))
for pos, color in border_pos_and_color(surface):
self.assertEqual(color, expected_color, "pos={}".format(pos))
def test_aalines__bounding_rect(self):
line_color = pygame.Color("red")
surf_color = pygame.Color("blue")
width = height = 30
pos_rect = pygame.Rect((0, 0), (width, height))
for size in ((width + 5, height + 5), (width - 5, height - 5)):
surface = pygame.Surface(size, 0, 32)
surf_rect = surface.get_rect()
for pos in rect_corners_mids_and_center(surf_rect):
pos_rect.center = pos
pts = (pos_rect.midleft, pos_rect.midtop, pos_rect.midright)
pos = pts[0]
for blend in (False, True):
for closed in (True, False):
surface.fill(surf_color)
bounding_rect = self.draw_aalines(
surface, line_color, closed, pts, blend
)
expected_rect = create_bounding_rect(surface, surf_color, pos)
self.assertEqual(bounding_rect, expected_rect)
def test_aalines__surface_clip(self):
surfw = surfh = 30
aaline_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
clip_rect = pygame.Rect((0, 0), (11, 11))
clip_rect.center = surface.get_rect().center
pos_rect = clip_rect.copy()
# Test centering the pos_rect along the clip rect's edge to allow for
for center in rect_corners_mids_and_center(clip_rect):
pos_rect.center = center
pts = (pos_rect.midtop, pos_rect.center, pos_rect.midbottom)
for closed in (True, False): # Test closed and not closed.
for blend in (0, 1): # Test non-blending and blending.
# Get the expected points by drawing the aalines without
# the clip area set.
surface.set_clip(None)
surface.fill(surface_color)
self.draw_aalines(surface, aaline_color, closed, pts, blend)
# Need to get the points that are NOT surface_color due to
# the way blend=0 uses the color black to antialias.
expected_pts = get_color_points(
surface, surface_color, clip_rect, False
)
# Clear the surface and set the clip area. Redraw the
# aalines and check that only the clip area is modified.
surface.fill(surface_color)
surface.set_clip(clip_rect)
self.draw_aalines(surface, aaline_color, closed, pts, blend)
surface.lock() # For possible speed up.
# Check all the surface points to ensure the expected_pts
# are not surface_color.
for pt in ((x, y) for x in range(surfw) for y in range(surfh)):
if pt in expected_pts:
self.assertNotEqual(surface.get_at(pt), surface_color, pt)
else:
self.assertEqual(surface.get_at(pt), surface_color, pt)
surface.unlock()
# Commented out to avoid cluttering the test output. Add back in if draw_py
# ever fully supports drawing aalines.
# class PythonDrawAALinesTest(AALinesMixin, PythonDrawTestCase):
# """Test draw_py module function aalines.
#
# This class inherits the general tests from AALinesMixin. It is also the
# class to add any draw_py.draw_aalines specific tests to.
# """
class DrawAALinesTest(AALinesMixin, DrawTestCase):
### Polygon Testing ###########################################################
SQUARE = ([0, 0], [3, 0], [3, 3], [0, 3])
DIAMOND = [(1, 3), (3, 5), (5, 3), (3, 1)]
CROSS = (
[2, 0],
[4, 0],
[4, 2],
[6, 2],
[6, 4],
[4, 4],
[4, 6],
[2, 6],
[2, 4],
[0, 4],
[0, 2],
[2, 2],
)
class DrawPolygonMixin(object):
def setUp(self):
self.surface = pygame.Surface((20, 20))
def test_polygon__args(self):
bounds_rect = self.draw_polygon(
pygame.Surface((3, 3)), (0, 10, 0, 50), ((0, 0), (1, 1), (2, 2)), 1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_polygon__args_without_width(self):
bounds_rect = self.draw_polygon(
pygame.Surface((2, 2)), (0, 0, 0, 50), ((0, 0), (1, 1), (2, 2))
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_polygon__kwargs(self):
surface = pygame.Surface((4, 4))
color = pygame.Color("yellow")
points = ((0, 0), (1, 1), (2, 2))
kwargs_list = [
{"surface": surface, "color": color, "points": points, "width": 1},
{"surface": surface, "color": color, "points": points},
]
for kwargs in kwargs_list:
bounds_rect = self.draw_polygon(**kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_polygon__kwargs_order_independent(self):
bounds_rect = self.draw_polygon(
color=(10, 20, 30),
surface=pygame.Surface((3, 2)),
width=0,
points=((0, 1), (1, 2), (2, 3)),
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_polygon__args_missing(self):
surface = pygame.Surface((1, 1))
color = pygame.Color("blue")
with self.assertRaises(TypeError):
bounds_rect = self.draw_polygon(surface, color)
with self.assertRaises(TypeError):
bounds_rect = self.draw_polygon(surface)
with self.assertRaises(TypeError):
bounds_rect = self.draw_polygon()
def test_polygon__kwargs_missing(self):
kwargs = {
"surface": pygame.Surface((1, 2)),
"color": pygame.Color("red"),
"points": ((2, 1), (2, 2), (2, 3)),
"width": 1,
}
for name in ("points", "color", "surface"):
invalid_kwargs = dict(kwargs)
invalid_kwargs.pop(name) # Pop from a copy.
with self.assertRaises(TypeError):
bounds_rect = self.draw_polygon(**invalid_kwargs)
def test_polygon__arg_invalid_types(self):
surface = pygame.Surface((2, 2))
color = pygame.Color("blue")
points = ((0, 1), (1, 2), (1, 3))
with self.assertRaises(TypeError):
# Invalid width.
bounds_rect = self.draw_polygon(surface, color, points, "1")
with self.assertRaises(TypeError):
# Invalid points.
bounds_rect = self.draw_polygon(surface, color, (1, 2, 3))
with self.assertRaises(TypeError):
# Invalid color.
bounds_rect = self.draw_polygon(surface, 2.3, points)
with self.assertRaises(TypeError):
# Invalid surface.
bounds_rect = self.draw_polygon((1, 2, 3, 4), color, points)
def test_polygon__kwarg_invalid_types(self):
surface = pygame.Surface((3, 3))
color = pygame.Color("green")
points = ((0, 0), (1, 0), (2, 0))
width = 1
kwargs_list = [
{
"surface": pygame.Surface, # Invalid surface.
"color": color,
"points": points,
"width": width,
},
{
"surface": surface,
"color": 2.3, # Invalid color.
"points": points,
"width": width,
},
{
"surface": surface,
"color": color,
"points": ((1,), (1,), (1,)), # Invalid points.
"width": width,
},
{"surface": surface, "color": color, "points": points, "width": 1.2},
] # Invalid width.
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_polygon(**kwargs)
def test_polygon__kwarg_invalid_name(self):
surface = pygame.Surface((2, 3))
color = pygame.Color("cyan")
points = ((1, 1), (1, 2), (1, 3))
kwargs_list = [
{
"surface": surface,
"color": color,
"points": points,
"width": 1,
"invalid": 1,
},
{"surface": surface, "color": color, "points": points, "invalid": 1},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_polygon(**kwargs)
def test_polygon__args_and_kwargs(self):
surface = pygame.Surface((3, 1))
color = (255, 255, 0, 0)
points = ((0, 1), (1, 2), (2, 3))
width = 0
kwargs = {"surface": surface, "color": color, "points": points, "width": width}
for name in ("surface", "color", "points", "width"):
kwargs.pop(name)
if "surface" == name:
bounds_rect = self.draw_polygon(surface, **kwargs)
elif "color" == name:
bounds_rect = self.draw_polygon(surface, color, **kwargs)
elif "points" == name:
bounds_rect = self.draw_polygon(surface, color, points, **kwargs)
else:
bounds_rect = self.draw_polygon(surface, color, points, width, **kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_polygon__valid_width_values(self):
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
color = (10, 20, 30, 255)
kwargs = {
"surface": surface,
"color": color,
"points": ((1, 1), (2, 1), (2, 2), (1, 2)),
"width": None,
}
pos = kwargs["points"][0]
for width in (-100, -10, -1, 0, 1, 10, 100):
surface.fill(surface_color) # Clear for each test.
kwargs["width"] = width
expected_color = color if width >= 0 else surface_color
bounds_rect = self.draw_polygon(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_polygon__valid_points_format(self):
expected_color = (10, 20, 30, 255)
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
kwargs = {
"surface": surface,
"color": expected_color,
"points": None,
"width": 0,
}
# The point type can be a tuple/list/Vector2.
point_types = (
(tuple, tuple, tuple, tuple), # all tuples
(list, list, list, list), # all lists
(Vector2, Vector2, Vector2, Vector2), # all Vector2s
(list, Vector2, tuple, Vector2),
) # mix
# The point values can be ints or floats.
point_values = (
((1, 1), (2, 1), (2, 2), (1, 2)),
((1, 1), (2.2, 1), (2.1, 2.2), (1, 2.1)),
)
# Each sequence of points can be a tuple or a list.
seq_types = (tuple, list)
for point_type in point_types:
for values in point_values:
check_pos = values[0]
points = [point_type[i](pt) for i, pt in enumerate(values)]
for seq_type in seq_types:
surface.fill(surface_color) # Clear for each test.
kwargs["points"] = seq_type(points)
bounds_rect = self.draw_polygon(**kwargs)
self.assertEqual(surface.get_at(check_pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_polygon__invalid_points_formats(self):
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"points": None,
"width": 0,
}
points_fmts = (
((1, 1), (2, 1), (2,)), # Too few coords.
((1, 1), (2, 1), (2, 2, 2)), # Too many coords.
((1, 1), (2, 1), (2, "2")), # Wrong type.
((1, 1), (2, 1), set([2, 3])), # Wrong type.
((1, 1), (2, 1), dict(((2, 2), (3, 3)))), # Wrong type.
set(((1, 1), (2, 1), (2, 2), (1, 2))), # Wrong type.
dict(((1, 1), (2, 2), (3, 3), (4, 4))),
) # Wrong type.
for points in points_fmts:
kwargs["points"] = points
with self.assertRaises(TypeError):
bounds_rect = self.draw_polygon(**kwargs)
def test_polygon__invalid_points_values(self):
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"points": None,
"width": 0,
}
points_fmts = (
tuple(), # Too few points.
((1, 1),), # Too few points.
((1, 1), (2, 1)),
) # Too few points.
for points in points_fmts:
for seq_type in (tuple, list): # Test as tuples and lists.
kwargs["points"] = seq_type(points)
with self.assertRaises(ValueError):
bounds_rect = self.draw_polygon(**kwargs)
def test_polygon__valid_color_formats(self):
green_color = pygame.Color("green")
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
kwargs = {
"surface": surface,
"color": None,
"points": ((1, 1), (2, 1), (2, 2), (1, 2)),
"width": 0,
}
pos = kwargs["points"][0]
greens = (
(0, 255, 0),
(0, 255, 0, 255),
surface.map_rgb(green_color),
green_color,
)
for color in greens:
surface.fill(surface_color) # Clear for each test.
kwargs["color"] = color
if isinstance(color, int):
expected_color = surface.unmap_rgb(color)
else:
expected_color = green_color
bounds_rect = self.draw_polygon(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_polygon__invalid_color_formats(self):
kwargs = {
"surface": pygame.Surface((4, 3)),
"color": None,
"points": ((1, 1), (2, 1), (2, 2), (1, 2)),
"width": 0,
}
for expected_color in (2.3, self):
kwargs["color"] = expected_color
with self.assertRaises(TypeError):
bounds_rect = self.draw_polygon(**kwargs)
def test_draw_square(self):
self.draw_polygon(self.surface, RED, SQUARE, 0)
# note : there is a discussion (#234) if draw.polygon should include or
# not the right or lower border; here we stick with current behavior,
# eg include those borders ...
for x in range(4):
for y in range(4):
self.assertEqual(self.surface.get_at((x, y)), RED)
def test_draw_diamond(self):
pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0)
self.draw_polygon(self.surface, GREEN, DIAMOND, 0)
# this diamond shape is equivalent to its four corners, plus inner square
for x, y in DIAMOND:
self.assertEqual(self.surface.get_at((x, y)), GREEN, msg=str((x, y)))
for x in range(2, 5):
for y in range(2, 5):
self.assertEqual(self.surface.get_at((x, y)), GREEN)
def test_1_pixel_high_or_wide_shapes(self):
# 1. one-pixel-high, filled
pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0)
self.draw_polygon(self.surface, GREEN, [(x, 2) for x, _y in CROSS], 0)
cross_size = 6 # the maximum x or y coordinate of the cross
for x in range(cross_size + 1):
self.assertEqual(self.surface.get_at((x, 1)), RED)
self.assertEqual(self.surface.get_at((x, 2)), GREEN)
self.assertEqual(self.surface.get_at((x, 3)), RED)
pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0)
# 2. one-pixel-high, not filled
self.draw_polygon(self.surface, GREEN, [(x, 5) for x, _y in CROSS], 1)
for x in range(cross_size + 1):
self.assertEqual(self.surface.get_at((x, 4)), RED)
self.assertEqual(self.surface.get_at((x, 5)), GREEN)
self.assertEqual(self.surface.get_at((x, 6)), RED)
pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0)
# 3. one-pixel-wide, filled
self.draw_polygon(self.surface, GREEN, [(3, y) for _x, y in CROSS], 0)
for y in range(cross_size + 1):
self.assertEqual(self.surface.get_at((2, y)), RED)
self.assertEqual(self.surface.get_at((3, y)), GREEN)
self.assertEqual(self.surface.get_at((4, y)), RED)
pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0)
# 4. one-pixel-wide, not filled
self.draw_polygon(self.surface, GREEN, [(4, y) for _x, y in CROSS], 1)
for y in range(cross_size + 1):
self.assertEqual(self.surface.get_at((3, y)), RED)
self.assertEqual(self.surface.get_at((4, y)), GREEN)
self.assertEqual(self.surface.get_at((5, y)), RED)
def test_draw_symetric_cross(self):
# 1. case width = 1 (not filled: `polygon` calls internally the `lines` function)
pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0)
self.draw_polygon(self.surface, GREEN, CROSS, 1)
inside = [(x, 3) for x in range(1, 6)] + [(3, y) for y in range(1, 6)]
for x in range(10):
for y in range(10):
if (x, y) in inside:
self.assertEqual(self.surface.get_at((x, y)), RED)
elif (x in range(2, 5) and y < 7) or (y in range(2, 5) and x < 7):
# we are on the border of the cross:
self.assertEqual(self.surface.get_at((x, y)), GREEN)
else:
# we are outside
self.assertEqual(self.surface.get_at((x, y)), RED)
# 2. case width = 0 (filled; this is the example from #234)
pygame.draw.rect(self.surface, RED, (0, 0, 10, 10), 0)
self.draw_polygon(self.surface, GREEN, CROSS, 0)
inside = [(x, 3) for x in range(1, 6)] + [(3, y) for y in range(1, 6)]
for x in range(10):
for y in range(10):
if (x in range(2, 5) and y < 7) or (y in range(2, 5) and x < 7):
# we are on the border of the cross:
self.assertEqual(
self.surface.get_at((x, y)), GREEN, msg=str((x, y))
)
else:
# we are outside
self.assertEqual(self.surface.get_at((x, y)), RED)
def test_illumine_shape(self):
rect = pygame.Rect((0, 0, 20, 20))
path_data = [
(0, 0),
(rect.width - 1, 0), # upper border
(rect.width - 5, 5 - 1),
(5 - 1, 5 - 1), # upper inner
(5 - 1, rect.height - 5),
(0, rect.height - 1),
] # lower diagonal
# The shape looks like this (the numbers are the indices of path_data)
# 0**********************1 <-- upper border
# ***********************
# **********************
# *********************
# ****3**************2 <-- upper inner border
# *****
# ***** (more lines here)
# *****
# ****4
# ****
# ***
# **
# 5
#
# the current bug is that the "upper inner" line is not drawn, but only
# if 4 or some lower corner exists
pygame.draw.rect(self.surface, RED, (0, 0, 20, 20), 0)
# 1. First without the corners 4 & 5
self.draw_polygon(self.surface, GREEN, path_data[:4], 0)
for x in range(20):
self.assertEqual(self.surface.get_at((x, 0)), GREEN) # upper border
for x in range(4, rect.width - 5 + 1):
self.assertEqual(self.surface.get_at((x, 4)), GREEN) # upper inner
# 2. with the corners 4 & 5
pygame.draw.rect(self.surface, RED, (0, 0, 20, 20), 0)
self.draw_polygon(self.surface, GREEN, path_data, 0)
for x in range(4, rect.width - 5 + 1):
self.assertEqual(self.surface.get_at((x, 4)), GREEN) # upper inner
def test_invalid_points(self):
self.assertRaises(
TypeError,
lambda: self.draw_polygon(
self.surface, RED, ((0, 0), (0, 20), (20, 20), 20), 0
),
)
def test_polygon__bounding_rect(self):
polygon_color = pygame.Color("red")
surf_color = pygame.Color("black")
min_width = min_height = 5
max_width = max_height = 7
sizes = ((min_width, min_height), (max_width, max_height))
surface = pygame.Surface((20, 20), 0, 32)
surf_rect = surface.get_rect()
# Make a rect that is bigger than the surface to help test drawing
# polygons off and partially off the surface.
big_rect = surf_rect.inflate(min_width * 2 + 1, min_height * 2 + 1)
for pos in rect_corners_mids_and_center(
surf_rect
) + rect_corners_mids_and_center(big_rect):
# A rect (pos_rect) is used to help create and position the
# polygon. Each of this rect's position attributes will be set to
for attr in RECT_POSITION_ATTRIBUTES:
for width, height in sizes:
pos_rect = pygame.Rect((0, 0), (width, height))
setattr(pos_rect, attr, pos)
vertices = (
pos_rect.midleft,
pos_rect.midtop,
pos_rect.bottomright,
)
for thickness in range(4):
surface.fill(surf_color)
bounding_rect = self.draw_polygon(
surface, polygon_color, vertices, thickness
)
expected_rect = create_bounding_rect(
surface, surf_color, vertices[0]
)
self.assertEqual(
bounding_rect,
expected_rect,
"thickness={}".format(thickness),
)
def test_polygon__surface_clip(self):
surfw = surfh = 30
polygon_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
clip_rect = pygame.Rect((0, 0), (8, 10))
clip_rect.center = surface.get_rect().center
pos_rect = clip_rect.copy()
for width in (0, 1): # Filled and unfilled.
# Test centering the polygon along the clip rect's edge.
for center in rect_corners_mids_and_center(clip_rect):
pos_rect.center = center
vertices = (
pos_rect.topleft,
pos_rect.topright,
pos_rect.bottomright,
pos_rect.bottomleft,
)
surface.set_clip(None)
surface.fill(surface_color)
self.draw_polygon(surface, polygon_color, vertices, width)
expected_pts = get_color_points(surface, polygon_color, clip_rect)
surface.fill(surface_color)
surface.set_clip(clip_rect)
self.draw_polygon(surface, polygon_color, vertices, width)
surface.lock()
for pt in ((x, y) for x in range(surfw) for y in range(surfh)):
if pt in expected_pts:
expected_color = polygon_color
else:
expected_color = surface_color
self.assertEqual(surface.get_at(pt), expected_color, pt)
surface.unlock()
class DrawPolygonTest(DrawPolygonMixin, DrawTestCase):
#
# This class inherits the general tests from DrawPolygonMixin. It is also
# the class to add any draw_py.draw_polygon specific tests to.
# """
.assertRaises(TypeError):
bounds_rect = self.draw_rect()
def test_rect__kwargs_missing(self):
kwargs = {
"surface": pygame.Surface((1, 3)),
"color": pygame.Color("red"),
"rect": pygame.Rect((0, 0), (2, 2)),
"width": 5,
"border_radius": 10,
"border_top_left_radius": 5,
"border_top_right_radius": 20,
"border_bottom_left_radius": 15,
"border_bottom_right_radius": 0
}
for name in ("rect", "color", "surface"):
invalid_kwargs = dict(kwargs)
invalid_kwargs.pop(name)
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(**invalid_kwargs)
def test_rect__arg_invalid_types(self):
surface = pygame.Surface((3, 3))
color = pygame.Color("white")
rect = pygame.Rect((1, 1), (1, 1))
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(surface, color, rect, 2,
border_bottom_right_radius="rad")
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(surface, color, rect, 2,
border_bottom_left_radius="rad")
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(surface, color, rect, 2,
border_top_right_radius="rad")
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(surface, color, rect, 2,
border_top_left_radius="draw")
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(surface, color, rect, 2, "rad")
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(surface, color, rect, "2", 4)
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(surface, color, (1, 2, 3), 2, 6)
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(surface, 2.3, rect, 3, 8)
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(rect, color, rect, 4, 10)
def test_rect__kwarg_invalid_types(self):
surface = pygame.Surface((2, 3))
color = pygame.Color("red")
rect = pygame.Rect((0, 0), (1, 1))
kwargs_list = [
{
"surface": pygame.Surface,
"color": color,
"rect": rect,
"width": 1,
"border_radius": 10,
"border_top_left_radius": 5,
"border_top_right_radius": 20,
"border_bottom_left_radius": 15,
"border_bottom_right_radius": 0
},
{
"surface": surface,
"color": 2.3,
"rect": rect,
"width": 1,
"border_radius": 10,
"border_top_left_radius": 5,
"border_top_right_radius": 20,
"border_bottom_left_radius": 15,
"border_bottom_right_radius": 0
},
{
"surface": surface,
"color": color,
"rect": (1, 1, 2),
"width": 1,
"border_radius": 10,
"border_top_left_radius": 5,
"border_top_right_radius": 20,
"border_bottom_left_radius": 15,
"border_bottom_right_radius": 0
},
{
"surface": surface,
"color": color,
"rect": rect,
"width": 1.1,
"border_radius": 10,
"border_top_left_radius": 5,
"border_top_right_radius": 20,
"border_bottom_left_radius": 15,
"border_bottom_right_radius": 0
},
{
"surface": surface,
"color": color,
"rect": rect,
"width": 1,
"border_radius": 10.5,
"border_top_left_radius": 5,
"border_top_right_radius": 20,
"border_bottom_left_radius": 15,
"border_bottom_right_radius": 0
},
{
"surface": surface,
"color": color,
"rect": rect,
"width": 1,
"border_radius": 10,
"border_top_left_radius": 5.5,
"border_top_right_radius": 20,
"border_bottom_left_radius": 15,
"border_bottom_right_radius": 0
},
{
"surface": surface,
"color": color,
"rect": rect,
"width": 1,
"border_radius": 10,
"border_top_left_radius": 5,
"border_top_right_radius": "a",
"border_bottom_left_radius": 15,
"border_bottom_right_radius": 0
},
{
"surface": surface,
"color": color,
"rect": rect,
"width": 1,
"border_radius": 10,
"border_top_left_radius": 5,
"border_top_right_radius": 20,
"border_bottom_left_radius": "c",
"border_bottom_right_radius": 0
},
{
"surface": surface,
"color": color,
"rect": rect,
"width": 1,
"border_radius": 10,
"border_top_left_radius": 5,
"border_top_right_radius": 20,
"border_bottom_left_radius": 15,
"border_bottom_right_radius": "d"
},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(**kwargs)
def test_rect__kwarg_invalid_name(self):
surface = pygame.Surface((2, 1))
color = pygame.Color("green")
rect = pygame.Rect((0, 0), (3, 3))
kwargs_list = [
{
"surface": surface,
"color": color,
"rect": rect,
"width": 1,
"border_radius": 10,
"border_top_left_radius": 5,
"border_top_right_radius": 20,
"border_bottom_left_radius": 15,
"border_bottom_right_radius": 0,
"invalid": 1,
},
{"surface": surface, "color": color, "rect": rect, "invalid": 1},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(**kwargs)
def test_rect__args_and_kwargs(self):
surface = pygame.Surface((3, 1))
color = (255, 255, 255, 0)
rect = pygame.Rect((1, 0), (2, 5))
width = 0
kwargs = {"surface": surface, "color": color, "rect": rect,
"width": width}
for name in ("surface", "color", "rect", "width"):
kwargs.pop(name)
if "surface" == name:
bounds_rect = self.draw_rect(surface, **kwargs)
elif "color" == name:
bounds_rect = self.draw_rect(surface, color, **kwargs)
elif "rect" == name:
bounds_rect = self.draw_rect(surface, color, rect, **kwargs)
else:
bounds_rect = self.draw_rect(surface, color, rect, width, **kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_rect__valid_width_values(self):
pos = (1, 1)
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
color = (1, 2, 3, 255)
kwargs = {
"surface": surface,
"color": color,
"rect": pygame.Rect(pos, (2, 2)),
"width": None,
}
for width in (-1000, -10, -1, 0, 1, 10, 1000):
surface.fill(surface_color)
kwargs["width"] = width
expected_color = color if width >= 0 else surface_color
bounds_rect = self.draw_rect(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_rect__valid_rect_formats(self):
pos = (1, 1)
expected_color = pygame.Color("yellow")
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
kwargs = {"surface": surface, "color": expected_color, "rect": None, "width": 0}
rects = (
pygame.Rect(pos, (1, 1)),
(pos, (2, 2)),
(pos[0], pos[1], 3, 3),
[pos, (2.1, 2.2)],
)
for rect in rects:
surface.fill(surface_color)
kwargs["rect"] = rect
bounds_rect = self.draw_rect(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_rect__invalid_rect_formats(self):
kwargs = {
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("red"),
"rect": None,
"width": 0,
}
invalid_fmts = (
[],
[1],
[1, 2],
[1, 2, 3],
[1, 2, 3, 4, 5],
set([1, 2, 3, 4]),
[1, 2, 3, "4"],
)
for rect in invalid_fmts:
kwargs["rect"] = rect
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(**kwargs)
def test_rect__valid_color_formats(self):
pos = (1, 1)
red_color = pygame.Color("red")
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
kwargs = {
"surface": surface,
"color": None,
"rect": pygame.Rect(pos, (1, 1)),
"width": 3,
}
reds = ((255, 0, 0), (255, 0, 0, 255), surface.map_rgb(red_color), red_color)
for color in reds:
surface.fill(surface_color)
kwargs["color"] = color
if isinstance(color, int):
expected_color = surface.unmap_rgb(color)
else:
expected_color = red_color
bounds_rect = self.draw_rect(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_rect__invalid_color_formats(self):
pos = (1, 1)
surface = pygame.Surface((3, 4))
kwargs = {
"surface": surface,
"color": None,
"rect": pygame.Rect(pos, (1, 1)),
"width": 1,
}
for expected_color in (2.3, self):
kwargs["color"] = expected_color
with self.assertRaises(TypeError):
bounds_rect = self.draw_rect(**kwargs)
def test_rect__fill(self):
self.surf_w, self.surf_h = self.surf_size = (320, 200)
self.surf = pygame.Surface(self.surf_size, pygame.SRCALPHA)
self.color = (1, 13, 24, 205)
rect = pygame.Rect(10, 10, 25, 20)
drawn = self.draw_rect(self.surf, self.color, rect, 0)
self.assertEqual(drawn, rect)
for pt in test_utils.rect_area_pts(rect):
color_at_pt = self.surf.get_at(pt)
self.assertEqual(color_at_pt, self.color)
# And not where it shouldn't
for pt in test_utils.rect_outer_bounds(rect):
color_at_pt = self.surf.get_at(pt)
self.assertNotEqual(color_at_pt, self.color)
elf.surf.fill(bgcolor)
hrect = pygame.Rect(1, 1, self.surf_w - 2, 1)
vrect = pygame.Rect(1, 3, 1, self.surf_h - 4)
drawn = self.draw_rect(self.surf, self.color, hrect, 0)
self.assertEqual(drawn, hrect)
x, y = hrect.topleft
w, h = hrect.size
self.assertEqual(self.surf.get_at((x - 1, y)), bgcolor)
self.assertEqual(self.surf.get_at((x + w, y)), bgcolor)
for i in range(x, x + w):
self.assertEqual(self.surf.get_at((i, y)), self.color)
drawn = self.draw_rect(self.surf, self.color, vrect, 0)
self.assertEqual(drawn, vrect)
x, y = vrect.topleft
w, h = vrect.size
self.assertEqual(self.surf.get_at((x, y - 1)), bgcolor)
self.assertEqual(self.surf.get_at((x, y + h)), bgcolor)
for i in range(y, y + h):
self.assertEqual(self.surf.get_at((x, i)), self.color)
def test_rect__one_pixel_lines(self):
self.surf = pygame.Surface((320, 200), pygame.SRCALPHA)
self.color = (1, 13, 24, 205)
rect = pygame.Rect(10, 10, 56, 20)
drawn = self.draw_rect(self.surf, self.color, rect, 1)
self.assertEqual(drawn, rect)
for pt in test_utils.rect_perimeter_pts(drawn):
color_at_pt = self.surf.get_at(pt)
self.assertEqual(color_at_pt, self.color)
# And not where it shouldn't
for pt in test_utils.rect_outer_bounds(drawn):
color_at_pt = self.surf.get_at(pt)
self.assertNotEqual(color_at_pt, self.color)
def test_rect__bounding_rect(self):
rect_color = pygame.Color("red")
surf_color = pygame.Color("black")
min_width = min_height = 5
max_width = max_height = 7
sizes = ((min_width, min_height), (max_width, max_height))
surface = pygame.Surface((20, 20), 0, 32)
surf_rect = surface.get_rect()
big_rect = surf_rect.inflate(min_width * 2 + 1, min_height * 2 + 1)
for pos in rect_corners_mids_and_center(
surf_rect
) + rect_corners_mids_and_center(big_rect):
# value.
for attr in RECT_POSITION_ATTRIBUTES:
# Test using different rect sizes and thickness values.
for width, height in sizes:
rect = pygame.Rect((0, 0), (width, height))
setattr(rect, attr, pos)
for thickness in range(4):
surface.fill(surf_color) # Clear for each test.
bounding_rect = self.draw_rect(
surface, rect_color, rect, thickness
)
# Calculating the expected_rect after the rect is
# drawn (it uses what is actually drawn).
expected_rect = create_bounding_rect(
surface, surf_color, rect.topleft
)
self.assertEqual(
bounding_rect,
expected_rect,
"thickness={}".format(thickness),
)
def test_rect__surface_clip(self):
surfw = surfh = 30
rect_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
clip_rect = pygame.Rect((0, 0), (8, 10))
clip_rect.center = surface.get_rect().center
test_rect = clip_rect.copy() # Manages the rect's pos.
for width in (0, 1):
for center in rect_corners_mids_and_center(clip_rect):
# Get the expected points by drawing the rect without the
# clip area set.
test_rect.center = center
surface.set_clip(None)
surface.fill(surface_color)
self.draw_rect(surface, rect_color, test_rect, width)
expected_pts = get_color_points(surface, rect_color, clip_rect)
# Clear the surface and set the clip area. Redraw the rect
# and check that only the clip area is modified.
surface.fill(surface_color)
surface.set_clip(clip_rect)
self.draw_rect(surface, rect_color, test_rect, width)
surface.lock() # For possible speed up.
# Check all the surface points to ensure only the expected_pts
# are the rect_color.
for pt in ((x, y) for x in range(surfw) for y in range(surfh)):
if pt in expected_pts:
expected_color = rect_color
else:
expected_color = surface_color
self.assertEqual(surface.get_at(pt), expected_color, pt)
surface.unlock()
class DrawRectTest(DrawRectMixin, DrawTestCase):
# Commented out to avoid cluttering the test output. Add back in if draw_py
# ever properly supports drawing rects.
# @unittest.skip('draw_py.draw_rect not supported yet')
# class PythonDrawRectTest(DrawRectMixin, PythonDrawTestCase):
# """Test draw_py module function draw_rect.
#
# This class inherits the general tests from DrawRectMixin. It is also the
# class to add any draw_py.draw_rect specific tests to.
# """
### Circle Testing ############################################################
class DrawCircleMixin(object):
def test_circle__args(self):
bounds_rect = self.draw_circle(
pygame.Surface((3, 3)), (0, 10, 0, 50), (0, 0), 3, 1, 1, 0, 1, 1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_circle__args_without_width(self):
bounds_rect = self.draw_circle(pygame.Surface((2, 2)), (0, 0, 0, 50),
(1, 1), 1)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_circle__args_with_negative_width(self):
bounds_rect = self.draw_circle(
pygame.Surface((2, 2)), (0, 0, 0, 50), (1, 1), 1, -1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
self.assertEqual(bounds_rect, pygame.Rect(1, 1, 0, 0))
def test_circle__args_with_width_gt_radius(self):
bounds_rect = self.draw_circle(
pygame.Surface((2, 2)), (0, 0, 0, 50), (1, 1), 2, 3, 0, 0, 0, 0
)
self.assertIsInstance(bounds_rect, pygame.Rect)
self.assertEqual(bounds_rect, pygame.Rect(0, 0, 2, 2))
def test_circle__kwargs(self):
kwargs_list = [
{
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("yellow"),
"center": (2, 2),
"radius": 2,
"width": 1,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": False,
"draw_bottom_right": True
},
{
"surface": pygame.Surface((2, 1)),
"color": (0, 10, 20),
"center": (1, 1),
"radius": 1,
},
]
for kwargs in kwargs_list:
bounds_rect = self.draw_circle(**kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_circle__kwargs_order_independent(self):
bounds_rect = self.draw_circle(
draw_top_right=False,
color=(10, 20, 30),
surface=pygame.Surface((3, 2)),
width=0,
draw_bottom_left=False,
center=(1, 0),
draw_bottom_right=False,
radius=2,
draw_top_left=True,
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_circle__args_missing(self):
surface = pygame.Surface((1, 1))
color = pygame.Color("blue")
with self.assertRaises(TypeError):
bounds_rect = self.draw_circle(surface, color, (0, 0))
with self.assertRaises(TypeError):
bounds_rect = self.draw_circle(surface, color)
with self.assertRaises(TypeError):
bounds_rect = self.draw_circle(surface)
with self.assertRaises(TypeError):
bounds_rect = self.draw_circle()
def test_circle__kwargs_missing(self):
kwargs = {
"surface": pygame.Surface((1, 2)),
"color": pygame.Color("red"),
"center": (1, 0),
"radius": 2,
"width": 1,
"draw_top_right": False,
"draw_top_left": False,
"draw_bottom_left": False,
"draw_bottom_right": True
}
for name in ("radius", "center", "color", "surface"):
invalid_kwargs = dict(kwargs)
invalid_kwargs.pop(name) # Pop from a copy.
with self.assertRaises(TypeError):
bounds_rect = self.draw_circle(**invalid_kwargs)
def test_circle__arg_invalid_types(self):
surface = pygame.Surface((2, 2))
color = pygame.Color("blue")
center = (1, 1)
radius = 1
with self.assertRaises(TypeError):
# Invalid draw_top_right.
bounds_rect = self.draw_circle(surface, color, center, radius, 1,
"a", 1, 1, 1)
with self.assertRaises(TypeError):
# Invalid draw_top_left.
bounds_rect = self.draw_circle(surface, color, center, radius, 1,
1, "b", 1, 1)
with self.assertRaises(TypeError):
# Invalid draw_bottom_left.
bounds_rect = self.draw_circle(surface, color, center, radius, 1,
1, 1, "c", 1)
with self.assertRaises(TypeError):
# Invalid draw_bottom_right.
bounds_rect = self.draw_circle(surface, color, center, radius, 1,
1, 1, 1, "d")
with self.assertRaises(TypeError):
# Invalid width.
bounds_rect = self.draw_circle(surface, color, center, radius, "1")
with self.assertRaises(TypeError):
# Invalid radius.
bounds_rect = self.draw_circle(surface, color, center, "2")
with self.assertRaises(TypeError):
# Invalid center.
bounds_rect = self.draw_circle(surface, color, (1, 2, 3), radius)
with self.assertRaises(TypeError):
# Invalid color.
bounds_rect = self.draw_circle(surface, 2.3, center, radius)
with self.assertRaises(TypeError):
# Invalid surface.
bounds_rect = self.draw_circle((1, 2, 3, 4), color, center, radius)
def test_circle__kwarg_invalid_types(self):
surface = pygame.Surface((3, 3))
color = pygame.Color("green")
center = (0, 1)
radius = 1
width = 1
quadrant = 1
kwargs_list = [
{
"surface": pygame.Surface, # Invalid surface.
"color": color,
"center": center,
"radius": radius,
"width": width,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
},
{
"surface": surface,
"color": 2.3, # Invalid color.
"center": center,
"radius": radius,
"width": width,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
},
{
"surface": surface,
"color": color,
"center": (1, 1, 1), # Invalid center.
"radius": radius,
"width": width,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
},
{
"surface": surface,
"color": color,
"center": center,
"radius": "1", # Invalid radius.
"width": width,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
},
{
"surface": surface,
"color": color,
"center": center,
"radius": radius,
"width": 1.2, # Invalid width.
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
},
{
"surface": surface,
"color": color,
"center": center,
"radius": radius,
"width": width,
"draw_top_right": "True", # Invalid draw_top_right
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
},
{
"surface": surface,
"color": color,
"center": center,
"radius": radius,
"width": width,
"draw_top_right": True,
"draw_top_left": 'True', # Invalid draw_top_left
"draw_bottom_left": True,
"draw_bottom_right": True
},
{
"surface": surface,
"color": color,
"center": center,
"radius": radius,
"width": width,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": 3.14, # Invalid draw_bottom_left
"draw_bottom_right": True
},
{
"surface": surface,
"color": color,
"center": center,
"radius": radius,
"width": width,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": "quadrant" # Invalid draw_bottom_right
},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_circle(**kwargs)
def test_circle__kwarg_invalid_name(self):
surface = pygame.Surface((2, 3))
color = pygame.Color("cyan")
center = (0, 0)
radius = 2
kwargs_list = [
{
"surface": surface,
"color": color,
"center": center,
"radius": radius,
"width": 1,
"quadrant": 1,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
},
{
"surface": surface,
"color": color,
"center": center,
"radius": radius,
"invalid": 1,
},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_circle(**kwargs)
def test_circle__args_and_kwargs(self):
surface = pygame.Surface((3, 1))
color = (255, 255, 0, 0)
center = (1, 0)
radius = 2
width = 0
draw_top_right = True
draw_top_left = False
draw_bottom_left = False
draw_bottom_right = True
kwargs = {
"surface": surface,
"color": color,
"center": center,
"radius": radius,
"width": width,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
}
for name in ("surface", "color", "center", "radius", "width",
"draw_top_right", "draw_top_left", "draw_bottom_left",
"draw_bottom_right"):
kwargs.pop(name)
if "surface" == name:
bounds_rect = self.draw_circle(surface, **kwargs)
elif "color" == name:
bounds_rect = self.draw_circle(surface, color, **kwargs)
elif "center" == name:
bounds_rect = self.draw_circle(surface, color, center, **kwargs)
elif "radius" == name:
bounds_rect = self.draw_circle(surface, color, center, radius, **kwargs)
elif "width" == name:
bounds_rect = self.draw_circle(
surface, color, center, radius, width, **kwargs
)
elif "draw_top_right" == name:
bounds_rect = self.draw_circle(
surface, color, center, radius, width, draw_top_right, **kwargs
)
elif "draw_top_left" == name:
bounds_rect = self.draw_circle(
surface, color, center, radius, width, draw_top_right, draw_top_left, **kwargs
)
elif "draw_bottom_left" == name:
bounds_rect = self.draw_circle(
surface, color, center, radius, width, draw_top_right, draw_top_left, draw_bottom_left, **kwargs
)
else:
bounds_rect = self.draw_circle(
surface, color, center, radius, width, draw_top_right, draw_top_left, draw_bottom_left, draw_bottom_right, **kwargs
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_circle__valid_width_values(self):
center = (2, 2)
radius = 1
pos = (center[0] - radius, center[1])
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
color = (10, 20, 30, 255)
kwargs = {
"surface": surface,
"color": color,
"center": center,
"radius": radius,
"width": None,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
}
for width in (-100, -10, -1, 0, 1, 10, 100):
surface.fill(surface_color) # Clear for each test.
kwargs["width"] = width
expected_color = color if width >= 0 else surface_color
bounds_rect = self.draw_circle(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_circle__valid_radius_values(self):
pos = center = (2, 2)
surface_color = pygame.Color("white")
surface = pygame.Surface((3, 4))
color = (10, 20, 30, 255)
kwargs = {
"surface": surface,
"color": color,
"center": center,
"radius": None,
"width": 0,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
}
for radius in (-10, -1, 0, 1, 10):
surface.fill(surface_color) # Clear for each test.
kwargs["radius"] = radius
expected_color = color if radius > 0 else surface_color
bounds_rect = self.draw_circle(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_circle__valid_center_formats(self):
expected_color = pygame.Color("red")
surface_color = pygame.Color("black")
surface = pygame.Surface((4, 4))
kwargs = {
"surface": surface,
"color": expected_color,
"center": None,
"radius": 1,
"width": 0,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
}
x, y = 2, 2 # center position
# The center values can be ints or floats.
for center in ((x, y), (x + 0.1, y), (x, y + 0.1), (x + 0.1, y + 0.1)):
# The center type can be a tuple/list/Vector2.
for seq_type in (tuple, list, Vector2):
surface.fill(surface_color) # Clear for each test.
kwargs["center"] = seq_type(center)
bounds_rect = self.draw_circle(**kwargs)
self.assertEqual(surface.get_at((x, y)), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_circle__valid_color_formats(self):
center = (2, 2)
radius = 1
pos = (center[0] - radius, center[1])
green_color = pygame.Color("green")
surface_color = pygame.Color("black")
surface = pygame.Surface((3, 4))
kwargs = {
"surface": surface,
"color": None,
"center": center,
"radius": radius,
"width": 0,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
}
greens = (
(0, 255, 0),
(0, 255, 0, 255),
surface.map_rgb(green_color),
green_color,
)
for color in greens:
surface.fill(surface_color) # Clear for each test.
kwargs["color"] = color
if isinstance(color, int):
expected_color = surface.unmap_rgb(color)
else:
expected_color = green_color
bounds_rect = self.draw_circle(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_circle__invalid_color_formats(self):
kwargs = {
"surface": pygame.Surface((4, 3)),
"color": None,
"center": (1, 2),
"radius": 1,
"width": 0,
"draw_top_right": True,
"draw_top_left": True,
"draw_bottom_left": True,
"draw_bottom_right": True
}
for expected_color in (2.3, self):
kwargs["color"] = expected_color
with self.assertRaises(TypeError):
bounds_rect = self.draw_circle(**kwargs)
def test_circle__floats(self):
draw.circle(
surface=pygame.Surface((4, 4)),
color=(255, 255, 127),
center=(1.5, 1.5),
radius=1.3,
width=0,
draw_top_right=True,
draw_top_left=True,
draw_bottom_left=True,
draw_bottom_right=True
)
draw.circle(
surface=pygame.Surface((4, 4)),
color=(255, 255, 127),
center=Vector2(1.5, 1.5),
radius=1.3,
width=0,
draw_top_right=True,
draw_top_left=True,
draw_bottom_left=True,
draw_bottom_right=True
)
draw.circle(pygame.Surface((2, 2)), (0, 0, 0, 50), (1.3, 1.3), 1.2)
# def test_circle_clip(self):
# """ maybe useful to help work out circle clip algorithm."""
# MAX = max
# MIN = min
# posx=30
# posy=15
# radius=1
# l=29
# t=14
# r=30
# b=16
# clip_rect_x=0
# clip_rect_y=0
# clip_rect_w=30
# clip_rect_h=30
# l = MAX(posx - radius, clip_rect_x)
# t = MAX(posy - radius, clip_rect_y)
# r = MIN(posx + radius, clip_rect_x + clip_rect_w)
# b = MIN(posy + radius, clip_rect_y + clip_rect_h)
# l, t, MAX(r - l, 0), MAX(b - t, 0)
def test_circle__bounding_rect(self):
circle_color = pygame.Color("red")
surf_color = pygame.Color("black")
max_radius = 3
surface = pygame.Surface((30, 30), 0, 32)
surf_rect = surface.get_rect()
# Make a rect that is bigger than the surface to help test drawing
# circles off and partially off the surface. Make this rect such that
# when centering the test circle on one of its corners, the circle is
# drawn fully off the test surface, but a rect bounding the circle
# would still overlap with the test surface.
big_rect = surf_rect.inflate(max_radius * 2 - 1, max_radius * 2 - 1)
for pos in rect_corners_mids_and_center(
surf_rect
) + rect_corners_mids_and_center(big_rect):
# Test using different radius and thickness values.
for radius in range(max_radius + 1):
for thickness in range(radius + 1):
surface.fill(surf_color) # Clear for each test.
bounding_rect = self.draw_circle(
surface, circle_color, pos, radius, thickness
)
# Calculating the expected_rect after the circle is
# drawn (it uses what is actually drawn).
expected_rect = create_bounding_rect(surface, surf_color, pos)
# print("pos:%s:, radius:%s:, thickness:%s:" % (pos, radius, thickness))
self.assertEqual(bounding_rect, expected_rect)
def test_circle_negative_radius(self):
surf = pygame.Surface((200, 200))
color = (0, 0, 0, 50)
center = surf.get_height() // 2, surf.get_height() // 2
bounding_rect = self.draw_circle(surf, color, center, radius=-1, width=1)
self.assertEqual(bounding_rect.size, (0, 0))
def test_circle_zero_radius(self):
surf = pygame.Surface((200, 200))
circle_color = pygame.Color("red")
surf_color = pygame.Color("black")
surf.fill((0, 0, 0))
center = (100, 100)
radius = 0
width = 1
bounding_rect = self.draw_circle(surf, circle_color, center, radius, width)
expected_rect = create_bounding_rect(surf, surf_color, center)
self.assertEqual(bounding_rect, expected_rect)
self.assertEqual(bounding_rect, pygame.Rect(100, 100, 0, 0))
def test_circle__surface_clip(self):
surfw = surfh = 25
circle_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
clip_rect = pygame.Rect((0, 0), (10, 10))
clip_rect.center = surface.get_rect().center
radius = clip_rect.w // 2 + 1
for width in (0, 1): # Filled and unfilled.
# Test centering the circle along the clip rect's edge.
for center in rect_corners_mids_and_center(clip_rect):
surface.set_clip(None)
surface.fill(surface_color)
self.draw_circle(surface, circle_color, center, radius, width)
expected_pts = get_color_points(surface, circle_color, clip_rect)
surface.fill(surface_color)
surface.set_clip(clip_rect)
self.draw_circle(surface, circle_color, center, radius, width)
surface.lock()
for pt in ((x, y) for x in range(surfw) for y in range(surfh)):
if pt in expected_pts:
expected_color = circle_color
else:
expected_color = surface_color
self.assertEqual(surface.get_at(pt), expected_color, pt)
surface.unlock()
def test_circle_shape(self):
surfw = surfh = 100
circle_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
(cx, cy) = center = (50, 50)
radius = 45
width = 25
dest_rect = self.draw_circle(surface, circle_color, center, radius, width)
for pt in test_utils.rect_area_pts(dest_rect):
x, y = pt
sqr_distance = (x - cx) ** 2 + (y - cy) ** 2
if (radius - width + 1) ** 2 < sqr_distance < (radius - 1) ** 2:
self.assertEqual(surface.get_at(pt), circle_color)
if (
sqr_distance < (radius - width - 1) ** 2
or sqr_distance > (radius + 1) ** 2
):
self.assertEqual(surface.get_at(pt), surface_color)
def test_circle__diameter(self):
surf = pygame.Surface((200, 200))
color = (0, 0, 0, 50)
center = surf.get_height() // 2, surf.get_height() // 2
width = 1
radius = 6
for radius in range(1, 65):
bounding_rect = self.draw_circle(surf, color, center, radius, width)
self.assertEqual(bounding_rect.width, radius * 2)
self.assertEqual(bounding_rect.height, radius * 2)
class DrawCircleTest(DrawCircleMixin, DrawTestCase):
#
# This class inherits the general tests from DrawCircleMixin. It is also
# the class to add any draw_py.draw_circle specific tests to.
# """
### Arc Testing ###############################################################
class DrawArcMixin(object):
def test_arc__args(self):
bounds_rect = self.draw_arc(
pygame.Surface((3, 3)), (0, 10, 0, 50), (1, 1, 2, 2), 0, 1, 1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_arc__args_without_width(self):
bounds_rect = self.draw_arc(
pygame.Surface((2, 2)), (1, 1, 1, 99), pygame.Rect((0, 0), (2, 2)), 1.1, 2.1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_arc__args_with_negative_width(self):
bounds_rect = self.draw_arc(
pygame.Surface((3, 3)), (10, 10, 50, 50), (1, 1, 2, 2), 0, 1, -1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
self.assertEqual(bounds_rect, pygame.Rect(1, 1, 0, 0))
def test_arc__args_with_width_gt_radius(self):
rect = pygame.Rect((0, 0), (4, 4))
bounds_rect = self.draw_arc(
pygame.Surface((3, 3)), (10, 10, 50, 50), rect, 0, 45, rect.w // 2 + 1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
bounds_rect = self.draw_arc(
pygame.Surface((3, 3)), (10, 10, 50, 50), rect, 0, 45, rect.h // 2 + 1
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_arc__kwargs(self):
kwargs_list = [
{
"surface": pygame.Surface((4, 4)),
"color": pygame.Color("yellow"),
"rect": pygame.Rect((0, 0), (3, 2)),
"start_angle": 0.5,
"stop_angle": 3,
"width": 1,
},
{
"surface": pygame.Surface((2, 1)),
"color": (0, 10, 20),
"rect": (0, 0, 2, 2),
"start_angle": 1,
"stop_angle": 3.1,
},
]
for kwargs in kwargs_list:
bounds_rect = self.draw_arc(**kwargs)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_arc__kwargs_order_independent(self):
bounds_rect = self.draw_arc(
stop_angle=1,
start_angle=2.2,
color=(1, 2, 3),
surface=pygame.Surface((3, 2)),
width=1,
rect=pygame.Rect((1, 0), (2, 3)),
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_arc__args_missing(self):
surface = pygame.Surface((1, 1))
color = pygame.Color("red")
rect = pygame.Rect((0, 0), (2, 2))
with self.assertRaises(TypeError):
bounds_rect = self.draw_arc(surface, color, rect, 0.1)
with self.assertRaises(TypeError):
bounds_rect = self.draw_arc(surface, color, rect)
with self.assertRaises(TypeError):
bounds_rect = self.draw_arc(surface, color)
with self.assertRaises(TypeError):
bounds_rect = self.draw_arc(surface)
with self.assertRaises(TypeError):
bounds_rect = self.draw_arc()
def test_arc__kwargs_missing(self):
kwargs = {
"surface": pygame.Surface((1, 2)),
"color": pygame.Color("red"),
"rect": pygame.Rect((1, 0), (2, 2)),
"start_angle": 0.1,
"stop_angle": 2,
"width": 1,
}
for name in ("stop_angle", "start_angle", "rect", "color", "surface"):
invalid_kwargs = dict(kwargs)
invalid_kwargs.pop(name) # Pop from a copy.
with self.assertRaises(TypeError):
bounds_rect = self.draw_arc(**invalid_kwargs)
def test_arc__arg_invalid_types(self):
surface = pygame.Surface((2, 2))
color = pygame.Color("blue")
rect = pygame.Rect((1, 1), (3, 3))
with self.assertRaises(TypeError):
# Invalid width.
bounds_rect = self.draw_arc(surface, color, rect, 0, 1, "1")
with self.assertRaises(TypeError):
# Invalid stop_angle.
bounds_rect = self.draw_arc(surface, color, rect, 0, "1", 1)
with self.assertRaises(TypeError):
# Invalid start_angle.
bounds_rect = self.draw_arc(surface, color, rect, "1", 0, 1)
with self.assertRaises(TypeError):
# Invalid rect.
bounds_rect = self.draw_arc(surface, color, (1, 2, 3, 4, 5), 0, 1, 1)
with self.assertRaises(TypeError):
# Invalid color.
bounds_rect = self.draw_arc(surface, 2.3, rect, 0, 1, 1)
with self.assertRaises(TypeError):
# Invalid surface.
bounds_rect = self.draw_arc(rect, color, rect, 0, 1, 1)
def test_arc__kwarg_invalid_types(self):
surface = pygame.Surface((3, 3))
color = pygame.Color("green")
rect = pygame.Rect((0, 1), (4, 2))
start = 3
stop = 4
kwargs_list = [
{
"surface": pygame.Surface, # Invalid surface.
"color": color,
"rect": rect,
"start_angle": start,
"stop_angle": stop,
"width": 1,
},
{
"surface": surface,
"color": 2.3, # Invalid color.
"rect": rect,
"start_angle": start,
"stop_angle": stop,
"width": 1,
},
{
"surface": surface,
"color": color,
"rect": (0, 0, 0), # Invalid rect.
"start_angle": start,
"stop_angle": stop,
"width": 1,
},
{
"surface": surface,
"color": color,
"rect": rect,
"start_angle": "1", # Invalid start_angle.
"stop_angle": stop,
"width": 1,
},
{
"surface": surface,
"color": color,
"rect": rect,
"start_angle": start,
"stop_angle": "1", # Invalid stop_angle.
"width": 1,
},
{
"surface": surface,
"color": color,
"rect": rect,
"start_angle": start,
"stop_angle": stop,
"width": 1.1,
},
] # Invalid width.
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_arc(**kwargs)
def test_arc__kwarg_invalid_name(self):
surface = pygame.Surface((2, 3))
color = pygame.Color("cyan")
rect = pygame.Rect((0, 1), (2, 2))
start = 0.9
stop = 2.3
kwargs_list = [
{
"surface": surface,
"color": color,
"rect": rect,
"start_angle": start,
"stop_angle": stop,
"width": 1,
"invalid": 1,
},
{
"surface": surface,
"color": color,
"rect": rect,
"start_angle": start,
"stop_angle": stop,
"invalid": 1,
},
]
for kwargs in kwargs_list:
with self.assertRaises(TypeError):
bounds_rect = self.draw_arc(**kwargs)
def test_arc__args_and_kwargs(self):
surface = pygame.Surface((3, 1))
color = (255, 255, 0, 0)
rect = pygame.Rect((1, 0), (2, 3))
start = 0.6
stop = 2
width = 1
kwargs = {
"surface": surface,
"color": color,
"rect": rect,
"start_angle": start,
"stop_angle": stop,
"width": width,
}
for name in ("surface", "color", "rect", "start_angle", "stop_angle"):
kwargs.pop(name)
if "surface" == name:
bounds_rect = self.draw_arc(surface, **kwargs)
elif "color" == name:
bounds_rect = self.draw_arc(surface, color, **kwargs)
elif "rect" == name:
bounds_rect = self.draw_arc(surface, color, rect, **kwargs)
elif "start_angle" == name:
bounds_rect = self.draw_arc(surface, color, rect, start, **kwargs)
elif "stop_angle" == name:
bounds_rect = self.draw_arc(surface, color, rect, start, stop, **kwargs)
else:
bounds_rect = self.draw_arc(
surface, color, rect, start, stop, width, **kwargs
)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_arc__valid_width_values(self):
arc_color = pygame.Color("yellow")
surface_color = pygame.Color("white")
surface = pygame.Surface((6, 6))
rect = pygame.Rect((0, 0), (4, 4))
rect.center = surface.get_rect().center
pos = rect.centerx + 1, rect.centery + 1
kwargs = {
"surface": surface,
"color": arc_color,
"rect": rect,
"start_angle": 0,
"stop_angle": 7,
"width": None,
}
for width in (-50, -10, -3, -2, -1, 0, 1, 2, 3, 10, 50):
msg = "width={}".format(width)
surface.fill(surface_color) # Clear for each test.
kwargs["width"] = width
expected_color = arc_color if width > 0 else surface_color
bounds_rect = self.draw_arc(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color, msg)
self.assertIsInstance(bounds_rect, pygame.Rect, msg)
def test_arc__valid_stop_angle_values(self):
expected_color = pygame.Color("blue")
surface_color = pygame.Color("white")
surface = pygame.Surface((6, 6))
rect = pygame.Rect((0, 0), (4, 4))
rect.center = surface.get_rect().center
pos = rect.centerx, rect.centery + 1
kwargs = {
"surface": surface,
"color": expected_color,
"rect": rect,
"start_angle": -17,
"stop_angle": None,
"width": 1,
}
for stop_angle in (-10, -5.5, -1, 0, 1, 5.5, 10):
msg = "stop_angle={}".format(stop_angle)
surface.fill(surface_color) # Clear for each test.
kwargs["stop_angle"] = stop_angle
bounds_rect = self.draw_arc(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color, msg)
self.assertIsInstance(bounds_rect, pygame.Rect, msg)
def test_arc__valid_start_angle_values(self):
expected_color = pygame.Color("blue")
surface_color = pygame.Color("white")
surface = pygame.Surface((6, 6))
rect = pygame.Rect((0, 0), (4, 4))
rect.center = surface.get_rect().center
pos = rect.centerx + 1, rect.centery + 1
kwargs = {
"surface": surface,
"color": expected_color,
"rect": rect,
"start_angle": None,
"stop_angle": 17,
"width": 1,
}
for start_angle in (-10.0, -5.5, -1, 0, 1, 5.5, 10.0):
msg = "start_angle={}".format(start_angle)
surface.fill(surface_color) # Clear for each test.
kwargs["start_angle"] = start_angle
bounds_rect = self.draw_arc(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color, msg)
self.assertIsInstance(bounds_rect, pygame.Rect, msg)
def test_arc__valid_rect_formats(self):
expected_color = pygame.Color("red")
surface_color = pygame.Color("black")
surface = pygame.Surface((6, 6))
rect = pygame.Rect((0, 0), (4, 4))
rect.center = surface.get_rect().center
pos = rect.centerx + 1, rect.centery + 1
kwargs = {
"surface": surface,
"color": expected_color,
"rect": None,
"start_angle": 0,
"stop_angle": 7,
"width": 1,
}
rects = (rect, (rect.topleft, rect.size), (rect.x, rect.y, rect.w, rect.h))
for rect in rects:
surface.fill(surface_color) # Clear for each test.
kwargs["rect"] = rect
bounds_rect = self.draw_arc(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_arc__valid_color_formats(self):
green_color = pygame.Color("green")
surface_color = pygame.Color("black")
surface = pygame.Surface((6, 6))
rect = pygame.Rect((0, 0), (4, 4))
rect.center = surface.get_rect().center
pos = rect.centerx + 1, rect.centery + 1
kwargs = {
"surface": surface,
"color": None,
"rect": rect,
"start_angle": 0,
"stop_angle": 7,
"width": 1,
}
greens = (
(0, 255, 0),
(0, 255, 0, 255),
surface.map_rgb(green_color),
green_color,
)
for color in greens:
surface.fill(surface_color) # Clear for each test.
kwargs["color"] = color
if isinstance(color, int):
expected_color = surface.unmap_rgb(color)
else:
expected_color = green_color
bounds_rect = self.draw_arc(**kwargs)
self.assertEqual(surface.get_at(pos), expected_color)
self.assertIsInstance(bounds_rect, pygame.Rect)
def test_arc__invalid_color_formats(self):
pos = (1, 1)
surface = pygame.Surface((4, 3))
kwargs = {
"surface": surface,
"color": None,
"rect": pygame.Rect(pos, (2, 2)),
"start_angle": 5,
"stop_angle": 6.1,
"width": 1,
}
for expected_color in (2.3, self):
kwargs["color"] = expected_color
with self.assertRaises(TypeError):
bounds_rect = self.draw_arc(**kwargs)
def todo_test_arc(self):
self.fail()
def test_arc__bounding_rect(self):
arc_color = pygame.Color("red")
surf_color = pygame.Color("black")
min_width = min_height = 5
max_width = max_height = 7
sizes = ((min_width, min_height), (max_width, max_height))
surface = pygame.Surface((20, 20), 0, 32)
surf_rect = surface.get_rect()
# Make a rect that is bigger than the surface to help test drawing
# arcs off and partially off the surface.
big_rect = surf_rect.inflate(min_width * 2 + 1, min_height * 2 + 1)
# Max angle allows for a full circle to be drawn.
start_angle = 0
stop_angles = (0, 2, 3, 5, math.ceil(2 * math.pi))
for pos in rect_corners_mids_and_center(
surf_rect
) + rect_corners_mids_and_center(big_rect):
# Each of the arc's rect position attributes will be set to the pos
# value.
for attr in RECT_POSITION_ATTRIBUTES:
# Test using different rect sizes, thickness values and stop
# angles.
for width, height in sizes:
arc_rect = pygame.Rect((0, 0), (width, height))
setattr(arc_rect, attr, pos)
for thickness in (0, 1, 2, 3, min(width, height)):
for stop_angle in stop_angles:
surface.fill(surf_color) # Clear for each test.
bounding_rect = self.draw_arc(
surface,
arc_color,
arc_rect,
start_angle,
stop_angle,
thickness,
)
# Calculating the expected_rect after the arc
# is drawn (it uses what is actually drawn).
expected_rect = create_bounding_rect(
surface, surf_color, arc_rect.topleft
)
self.assertEqual(
bounding_rect,
expected_rect,
"thickness={}".format(thickness),
)
def test_arc__surface_clip(self):
surfw = surfh = 30
start = 0.1
end = 0 # end < start so a full circle will be drawn
arc_color = pygame.Color("red")
surface_color = pygame.Color("green")
surface = pygame.Surface((surfw, surfh))
surface.fill(surface_color)
clip_rect = pygame.Rect((0, 0), (11, 11))
clip_rect.center = surface.get_rect().center
pos_rect = clip_rect.copy() # Manages the arc's pos.
for thickness in (1, 3): # Different line widths.
# Test centering the arc along the clip rect's edge.
for center in rect_corners_mids_and_center(clip_rect):
# Get the expected points by drawing the arc without the
# clip area set.
pos_rect.center = center
surface.set_clip(None)
surface.fill(surface_color)
self.draw_arc(surface, arc_color, pos_rect, start, end, thickness)
expected_pts = get_color_points(surface, arc_color, clip_rect)
# Clear the surface and set the clip area. Redraw the arc
# and check that only the clip area is modified.
surface.fill(surface_color)
surface.set_clip(clip_rect)
self.draw_arc(surface, arc_color, pos_rect, start, end, thickness)
surface.lock() # For possible speed up.
# Check all the surface points to ensure only the expected_pts
# are the arc_color.
for pt in ((x, y) for x in range(surfw) for y in range(surfh)):
if pt in expected_pts:
expected_color = arc_color
else:
expected_color = surface_color
self.assertEqual(surface.get_at(pt), expected_color, pt)
surface.unlock()
class DrawArcTest(DrawArcMixin, DrawTestCase):
# Commented out to avoid cluttering the test output. Add back in if draw_py
# ever properly supports drawing arcs.
# @unittest.skip('draw_py.draw_arc not supported yet')
# class PythonDrawArcTest(DrawArcMixin, PythonDrawTestCase):
# """Test draw_py module function draw_arc.
#
# This class inherits the general tests from DrawArcMixin. It is also the
# class to add any draw_py.draw_arc specific tests to.
# """
### Draw Module Testing #######################################################
class DrawModuleTest(unittest.TestCase):
def test_path_data_validation(self):
surf = pygame.Surface((5, 5))
rect = pygame.Rect(0, 0, 5, 5)
bad_values = (
"text",
b"bytes",
1 + 1j, # string, bytes, complex,
object(),
(lambda x: x),
) # object, function
bad_points = list(bad_values) + [(1,), (1, 2, 3)] # wrong tuple length
bad_points.extend((1, v) for v in bad_values) # one wrong value
good_path = [(1, 1), (1, 3), (3, 3), (3, 1)]
# A) draw.lines
check_pts = [(x, y) for x in range(5) for y in range(5)]
for method, is_polgon in (
(draw.lines, 0),
(draw.aalines, 0),
(draw.polygon, 1),
):
for val in bad_values:
# 1. at the beginning
draw.rect(surf, RED, rect, 0)
with self.assertRaises(TypeError):
if is_polgon:
method(surf, GREEN, [val] + good_path, 0)
else:
method(surf, GREEN, True, [val] + good_path)
# make sure, nothing was drawn :
self.assertTrue(all(surf.get_at(pt) == RED for pt in check_pts))
# 2. not at the beginning (was not checked)
draw.rect(surf, RED, rect, 0)
with self.assertRaises(TypeError):
path = good_path[:2] + [val] + good_path[2:]
if is_polgon:
method(surf, GREEN, path, 0)
else:
method(surf, GREEN, True, path)
# make sure, nothing was drawn :
self.assertTrue(all(surf.get_at(pt) == RED for pt in check_pts))
def test_color_validation(self):
surf = pygame.Surface((10, 10))
colors = 123456, (1, 10, 100), RED, '#ab12df', 'red'
points = ((0, 0), (1, 1), (1, 0))
# 1. valid colors
for col in colors:
draw.line(surf, col, (0, 0), (1, 1))
draw.aaline(surf, col, (0, 0), (1, 1))
draw.aalines(surf, col, True, points)
draw.lines(surf, col, True, points)
draw.arc(surf, col, pygame.Rect(0, 0, 3, 3), 15, 150)
draw.ellipse(surf, col, pygame.Rect(0, 0, 3, 6), 1)
draw.circle(surf, col, (7, 3), 2)
draw.polygon(surf, col, points, 0)
# 2. invalid colors
for col in (1.256, object(), None):
with self.assertRaises(TypeError):
draw.line(surf, col, (0, 0), (1, 1))
with self.assertRaises(TypeError):
draw.aaline(surf, col, (0, 0), (1, 1))
with self.assertRaises(TypeError):
draw.aalines(surf, col, True, points)
with self.assertRaises(TypeError):
draw.lines(surf, col, True, points)
with self.assertRaises(TypeError):
draw.arc(surf, col, pygame.Rect(0, 0, 3, 3), 15, 150)
with self.assertRaises(TypeError):
draw.ellipse(surf, col, pygame.Rect(0, 0, 3, 6), 1)
with self.assertRaises(TypeError):
draw.circle(surf, col, (7, 3), 2)
with self.assertRaises(TypeError):
draw.polygon(surf, col, points, 0)
###############################################################################
if __name__ == "__main__":
unittest.main()
| true | true |
f7f79031ff1713a9ec2f8cf11878d55021dd086e | 529 | py | Python | airbyte-integrations/connectors/destination-rabbitmq/setup.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 2 | 2022-03-02T13:46:05.000Z | 2022-03-05T12:31:28.000Z | airbyte-integrations/connectors/destination-rabbitmq/setup.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 29 | 2021-10-07T17:20:29.000Z | 2021-12-27T13:07:09.000Z | airbyte-integrations/connectors/destination-rabbitmq/setup.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 1 | 2021-07-30T07:24:51.000Z | 2021-07-30T07:24:51.000Z | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from setuptools import find_packages, setup
MAIN_REQUIREMENTS = ["airbyte-cdk", "pika>=1.1.0"]
TEST_REQUIREMENTS = ["pytest~=6.1"]
setup(
name="destination_rabbitmq",
description="Destination implementation for Rabbitmq.",
author="Airbyte",
author_email="contact@airbyte.io",
packages=find_packages(),
install_requires=MAIN_REQUIREMENTS,
package_data={"": ["*.json"]},
extras_require={
"tests": TEST_REQUIREMENTS,
},
)
| 22.041667 | 59 | 0.68242 |
from setuptools import find_packages, setup
MAIN_REQUIREMENTS = ["airbyte-cdk", "pika>=1.1.0"]
TEST_REQUIREMENTS = ["pytest~=6.1"]
setup(
name="destination_rabbitmq",
description="Destination implementation for Rabbitmq.",
author="Airbyte",
author_email="contact@airbyte.io",
packages=find_packages(),
install_requires=MAIN_REQUIREMENTS,
package_data={"": ["*.json"]},
extras_require={
"tests": TEST_REQUIREMENTS,
},
)
| true | true |
f7f790696d0085954c6d4fff8df0c30a792955e7 | 1,125 | py | Python | app/dependencies.py | code-lab-org/ise-design | fbc9683e4bdeacf6a0f16cfb81d79ace0c337b28 | [
"Apache-2.0"
] | null | null | null | app/dependencies.py | code-lab-org/ise-design | fbc9683e4bdeacf6a0f16cfb81d79ace0c337b28 | [
"Apache-2.0"
] | null | null | null | app/dependencies.py | code-lab-org/ise-design | fbc9683e4bdeacf6a0f16cfb81d79ace0c337b28 | [
"Apache-2.0"
] | null | null | null | from fastapi_users import FastAPIUsers, models
from fastapi_users.authentication import CookieAuthentication, JWTAuthentication
from fastapi_users.db import SQLAlchemyUserDatabase
import os
from .schemas.user import User, UserCreate, UserUpdate, UserDB
from .models.user import UserTable
from .database import Base, database
# load the secret for cookie authentication
SECRET = os.getenv("ISE_SECRET", "change me")
LOGIN_LIFETIME = os.getenv("ISE_LOGIN_LIFETIME_SECONDS", 7200)
# configure cookie-based authentication
cookie_authentication = CookieAuthentication(
secret=SECRET,
lifetime_seconds=LOGIN_LIFETIME
)
# configure json web token-based authentication
jwt_authentication = JWTAuthentication(
secret=SECRET,
lifetime_seconds=LOGIN_LIFETIME,
tokenUrl="/auth/login"
)
# configure the SQL alchemy database for FastAPI-User
users = UserTable.__table__
user_db = SQLAlchemyUserDatabase(UserDB, database, users)
# configure the FastAPI-User package
fastapi_users = FastAPIUsers(
user_db,
[cookie_authentication, jwt_authentication],
User,
UserCreate,
UserUpdate,
UserDB,
)
| 28.125 | 80 | 0.797333 | from fastapi_users import FastAPIUsers, models
from fastapi_users.authentication import CookieAuthentication, JWTAuthentication
from fastapi_users.db import SQLAlchemyUserDatabase
import os
from .schemas.user import User, UserCreate, UserUpdate, UserDB
from .models.user import UserTable
from .database import Base, database
SECRET = os.getenv("ISE_SECRET", "change me")
LOGIN_LIFETIME = os.getenv("ISE_LOGIN_LIFETIME_SECONDS", 7200)
cookie_authentication = CookieAuthentication(
secret=SECRET,
lifetime_seconds=LOGIN_LIFETIME
)
jwt_authentication = JWTAuthentication(
secret=SECRET,
lifetime_seconds=LOGIN_LIFETIME,
tokenUrl="/auth/login"
)
users = UserTable.__table__
user_db = SQLAlchemyUserDatabase(UserDB, database, users)
fastapi_users = FastAPIUsers(
user_db,
[cookie_authentication, jwt_authentication],
User,
UserCreate,
UserUpdate,
UserDB,
)
| true | true |
f7f79214fb071183d57d7139d78e6859228b97d6 | 4,670 | py | Python | isi_sdk_8_2_2/isi_sdk_8_2_2/models/mapping_users_rules_parameters_default_unix_user.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_2_2/isi_sdk_8_2_2/models/mapping_users_rules_parameters_default_unix_user.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_2_2/isi_sdk_8_2_2/models/mapping_users_rules_parameters_default_unix_user.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_2_2.models.mapping_users_rules_rule_user2 import MappingUsersRulesRuleUser2 # noqa: F401,E501
class MappingUsersRulesParametersDefaultUnixUser(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'domain': 'str',
'user': 'str'
}
attribute_map = {
'domain': 'domain',
'user': 'user'
}
def __init__(self, domain=None, user=None): # noqa: E501
"""MappingUsersRulesParametersDefaultUnixUser - a model defined in Swagger""" # noqa: E501
self._domain = None
self._user = None
self.discriminator = None
if domain is not None:
self.domain = domain
self.user = user
@property
def domain(self):
"""Gets the domain of this MappingUsersRulesParametersDefaultUnixUser. # noqa: E501
:return: The domain of this MappingUsersRulesParametersDefaultUnixUser. # noqa: E501
:rtype: str
"""
return self._domain
@domain.setter
def domain(self, domain):
"""Sets the domain of this MappingUsersRulesParametersDefaultUnixUser.
:param domain: The domain of this MappingUsersRulesParametersDefaultUnixUser. # noqa: E501
:type: str
"""
if domain is not None and len(domain) > 255:
raise ValueError("Invalid value for `domain`, length must be less than or equal to `255`") # noqa: E501
if domain is not None and len(domain) < 0:
raise ValueError("Invalid value for `domain`, length must be greater than or equal to `0`") # noqa: E501
self._domain = domain
@property
def user(self):
"""Gets the user of this MappingUsersRulesParametersDefaultUnixUser. # noqa: E501
:return: The user of this MappingUsersRulesParametersDefaultUnixUser. # noqa: E501
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this MappingUsersRulesParametersDefaultUnixUser.
:param user: The user of this MappingUsersRulesParametersDefaultUnixUser. # noqa: E501
:type: str
"""
if user is None:
raise ValueError("Invalid value for `user`, must not be `None`") # noqa: E501
if user is not None and len(user) > 255:
raise ValueError("Invalid value for `user`, length must be less than or equal to `255`") # noqa: E501
if user is not None and len(user) < 0:
raise ValueError("Invalid value for `user`, length must be greater than or equal to `0`") # noqa: E501
self._user = user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MappingUsersRulesParametersDefaultUnixUser):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.133333 | 117 | 0.596788 |
import pprint
import re
import six
from isi_sdk_8_2_2.models.mapping_users_rules_rule_user2 import MappingUsersRulesRuleUser2
class MappingUsersRulesParametersDefaultUnixUser(object):
swagger_types = {
'domain': 'str',
'user': 'str'
}
attribute_map = {
'domain': 'domain',
'user': 'user'
}
def __init__(self, domain=None, user=None):
self._domain = None
self._user = None
self.discriminator = None
if domain is not None:
self.domain = domain
self.user = user
@property
def domain(self):
return self._domain
@domain.setter
def domain(self, domain):
if domain is not None and len(domain) > 255:
raise ValueError("Invalid value for `domain`, length must be less than or equal to `255`")
if domain is not None and len(domain) < 0:
raise ValueError("Invalid value for `domain`, length must be greater than or equal to `0`")
self._domain = domain
@property
def user(self):
return self._user
@user.setter
def user(self, user):
if user is None:
raise ValueError("Invalid value for `user`, must not be `None`")
if user is not None and len(user) > 255:
raise ValueError("Invalid value for `user`, length must be less than or equal to `255`")
if user is not None and len(user) < 0:
raise ValueError("Invalid value for `user`, length must be greater than or equal to `0`")
self._user = user
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, MappingUsersRulesParametersDefaultUnixUser):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7f7924b903f852c89d8d58411f80c5f432551fe | 1,586 | py | Python | Introduccion_Python/04_depuracion_codigo/probar-decirhora.py | iarielduarte/Python | 871cdaf287a583baad7c88e274e09821396d0bbb | [
"CNRI-Python"
] | null | null | null | Introduccion_Python/04_depuracion_codigo/probar-decirhora.py | iarielduarte/Python | 871cdaf287a583baad7c88e274e09821396d0bbb | [
"CNRI-Python"
] | null | null | null | Introduccion_Python/04_depuracion_codigo/probar-decirhora.py | iarielduarte/Python | 871cdaf287a583baad7c88e274e09821396d0bbb | [
"CNRI-Python"
] | null | null | null | import decirhora
import unittest
class ProbarDecirHora(unittest.TestCase):
def setUp(self):
self.nums = list(range(11))
def test_numbers(self):
# Asegurar la conversion de numeros a letras correctamente
letras = (
'cero', 'uno', 'dos', 'tres', 'cuatro', 'cinco',
'seis', 'siete', 'ocho', 'nueve', 'diez'
)
for i, n in enumerate(self.nums):
self.assertEqual(decirhora.numletras(n).numletras(), letras[i])
def test_time(self):
tupla_horas = (
(0, 0), (0, 1), (11, 0), (12, 0), (13, 0), (12, 29), (12, 30),
(12, 31), (12, 15), (12, 30), (12, 45), (11, 59), (23, 15),
(23, 59), (12, 59), (13, 59), (1, 60), (24, 0)
)
letras_horas= (
"media noche",
"uno pasado la media noche",
"once en punto",
"medio dia",
"uno en punto",
"veinte-nueve pasado la medio dia",
"media hora pasado la medio dia",
"veinte-nueve para la uno",
"un cuarto pasado la medio dia",
"media hora pasado la medio dia",
"un cuarto para la uno",
"uno para la medio dia",
"un cuarto pasado la once",
"uno para la media noche",
"uno para la uno",
"uno para la dos",
"OOR",
"OOR"
)
for i, t in enumerate(tupla_horas):
self.assertEqual(decirhora.decirhora(*t).letras(), letras_horas[i])
if __name__ == "__main__": unittest.main()
| 33.744681 | 79 | 0.496217 | import decirhora
import unittest
class ProbarDecirHora(unittest.TestCase):
def setUp(self):
self.nums = list(range(11))
def test_numbers(self):
letras = (
'cero', 'uno', 'dos', 'tres', 'cuatro', 'cinco',
'seis', 'siete', 'ocho', 'nueve', 'diez'
)
for i, n in enumerate(self.nums):
self.assertEqual(decirhora.numletras(n).numletras(), letras[i])
def test_time(self):
tupla_horas = (
(0, 0), (0, 1), (11, 0), (12, 0), (13, 0), (12, 29), (12, 30),
(12, 31), (12, 15), (12, 30), (12, 45), (11, 59), (23, 15),
(23, 59), (12, 59), (13, 59), (1, 60), (24, 0)
)
letras_horas= (
"media noche",
"uno pasado la media noche",
"once en punto",
"medio dia",
"uno en punto",
"veinte-nueve pasado la medio dia",
"media hora pasado la medio dia",
"veinte-nueve para la uno",
"un cuarto pasado la medio dia",
"media hora pasado la medio dia",
"un cuarto para la uno",
"uno para la medio dia",
"un cuarto pasado la once",
"uno para la media noche",
"uno para la uno",
"uno para la dos",
"OOR",
"OOR"
)
for i, t in enumerate(tupla_horas):
self.assertEqual(decirhora.decirhora(*t).letras(), letras_horas[i])
if __name__ == "__main__": unittest.main()
| true | true |
f7f79307c928929ed3e9c521f83cd421138f9414 | 3,619 | py | Python | jupiter_orm/jupiter_orm/Model.py | dianbaer/fast | 362bd0ca9f6039ea70cd665438dd8731e4c144a5 | [
"MIT"
] | 226 | 2017-10-26T12:05:26.000Z | 2021-12-06T15:14:54.000Z | jupiter_orm/jupiter_orm/Model.py | dianbaer/fast | 362bd0ca9f6039ea70cd665438dd8731e4c144a5 | [
"MIT"
] | null | null | null | jupiter_orm/jupiter_orm/Model.py | dianbaer/fast | 362bd0ca9f6039ea70cd665438dd8731e4c144a5 | [
"MIT"
] | 73 | 2017-10-27T12:38:59.000Z | 2019-12-23T07:29:48.000Z | import logging
from jupiter_orm.DBPool import DBPoolC
from jupiter_orm.ModelMetaclass import ModelMetaclassC
class ModelC(dict, metaclass=ModelMetaclassC):
def __init__(self, **kwargs):
super(ModelC, self).__init__(**kwargs)
def __getattr__(self, item):
try:
return self[item]
except KeyError:
raise AttributeError(r"'ModelC' object has no attribute '%s'" % item)
def __setattr__(self, key, value):
self[key] = value
@classmethod
async def findAll(cls, where=None, args=None, **kwargs):
sql = [cls.__select__]
if where:
sql.append('where')
sql.append(where)
if args is None:
args = []
orderBy = kwargs.get('orderBy', None)
if orderBy:
sql.append('order by')
sql.append(orderBy)
limit = kwargs.get('limit', None)
if limit is not None:
sql.append('limit')
if isinstance(limit, int):
sql.append('?')
args.append(limit)
elif isinstance(limit, tuple) and len(limit) == 2:
sql.append('?, ?')
args.extend(limit)
else:
raise ValueError('Invalid limit value: %s' % str(limit))
rs = await DBPoolC.select(' '.join(sql), args)
return [cls(**r) for r in rs]
@classmethod
async def findNumber(cls, selectField, where=None, args=()):
sql = ['select %s _num_ from `%s`' % (selectField, cls.__table__)]
if where:
sql.append('where')
sql.append(where)
rs = await DBPoolC.select(' '.join(sql), args, 1)
if len(rs) == 0:
return None
return rs[0]['_num_']
@classmethod
async def find(cls, primary_key):
rs = await DBPoolC.select('%s where `%s`=?' % (cls.__select__, cls.__primary_key__), [primary_key], 1)
if len(rs) == 0:
return None
return cls(**rs[0])
def getValue(self, key):
return getattr(self, key, None)
def getValueOrDefault(self, key):
value = getattr(self, key, None)
if value is None:
field = self.__mappings__[key]
if field.default is not None:
value = field.default() if callable(field.default) else field.default
logging.debug('using default value for %s: %s' % (key, str(value)))
setattr(self, key, value)
return value
async def save(self):
args = list(map(self.getValueOrDefault, self.__fields__))
args.append(self.getValueOrDefault(self.__primary_key__))
rows = await DBPoolC.execute(self.__insert__, args)
if rows != 1:
logging.error('failed to insert record: affected rows: %s' % rows)
return False
return True
async def update(self):
args = list(map(self.getValue, self.__fields__))
args.append(self.getValue(self.__primary_key__))
rows = await DBPoolC.execute(self.__update__, args)
if rows != 1:
logging.error('failed to update by primary key: affected rows: %s' % rows)
return False
return True
async def remove(self):
args = [self.getValue(self.__primary_key__)]
rows = await DBPoolC.execute(self.__delete__, args)
if rows != 1:
logging.error('failed to remove by primary key: affected rows: %s' % rows)
return False
return True
| 35.480392 | 111 | 0.555402 | import logging
from jupiter_orm.DBPool import DBPoolC
from jupiter_orm.ModelMetaclass import ModelMetaclassC
class ModelC(dict, metaclass=ModelMetaclassC):
def __init__(self, **kwargs):
super(ModelC, self).__init__(**kwargs)
def __getattr__(self, item):
try:
return self[item]
except KeyError:
raise AttributeError(r"'ModelC' object has no attribute '%s'" % item)
def __setattr__(self, key, value):
self[key] = value
@classmethod
async def findAll(cls, where=None, args=None, **kwargs):
sql = [cls.__select__]
if where:
sql.append('where')
sql.append(where)
if args is None:
args = []
orderBy = kwargs.get('orderBy', None)
if orderBy:
sql.append('order by')
sql.append(orderBy)
limit = kwargs.get('limit', None)
if limit is not None:
sql.append('limit')
if isinstance(limit, int):
sql.append('?')
args.append(limit)
elif isinstance(limit, tuple) and len(limit) == 2:
sql.append('?, ?')
args.extend(limit)
else:
raise ValueError('Invalid limit value: %s' % str(limit))
rs = await DBPoolC.select(' '.join(sql), args)
return [cls(**r) for r in rs]
@classmethod
async def findNumber(cls, selectField, where=None, args=()):
sql = ['select %s _num_ from `%s`' % (selectField, cls.__table__)]
if where:
sql.append('where')
sql.append(where)
rs = await DBPoolC.select(' '.join(sql), args, 1)
if len(rs) == 0:
return None
return rs[0]['_num_']
@classmethod
async def find(cls, primary_key):
rs = await DBPoolC.select('%s where `%s`=?' % (cls.__select__, cls.__primary_key__), [primary_key], 1)
if len(rs) == 0:
return None
return cls(**rs[0])
def getValue(self, key):
return getattr(self, key, None)
def getValueOrDefault(self, key):
value = getattr(self, key, None)
if value is None:
field = self.__mappings__[key]
if field.default is not None:
value = field.default() if callable(field.default) else field.default
logging.debug('using default value for %s: %s' % (key, str(value)))
setattr(self, key, value)
return value
async def save(self):
args = list(map(self.getValueOrDefault, self.__fields__))
args.append(self.getValueOrDefault(self.__primary_key__))
rows = await DBPoolC.execute(self.__insert__, args)
if rows != 1:
logging.error('failed to insert record: affected rows: %s' % rows)
return False
return True
async def update(self):
args = list(map(self.getValue, self.__fields__))
args.append(self.getValue(self.__primary_key__))
rows = await DBPoolC.execute(self.__update__, args)
if rows != 1:
logging.error('failed to update by primary key: affected rows: %s' % rows)
return False
return True
async def remove(self):
args = [self.getValue(self.__primary_key__)]
rows = await DBPoolC.execute(self.__delete__, args)
if rows != 1:
logging.error('failed to remove by primary key: affected rows: %s' % rows)
return False
return True
| true | true |
f7f79323ede6ce8c2a8ec9c8a11b8ec920697cf0 | 1,312 | py | Python | care/apps/accounts/migrations/0004_auto_20200524_1231.py | chetanyakan/care-platform | 52cd390b711bda4d8081f9cb4aed198c3ccc4195 | [
"MIT"
] | null | null | null | care/apps/accounts/migrations/0004_auto_20200524_1231.py | chetanyakan/care-platform | 52cd390b711bda4d8081f9cb4aed198c3ccc4195 | [
"MIT"
] | null | null | null | care/apps/accounts/migrations/0004_auto_20200524_1231.py | chetanyakan/care-platform | 52cd390b711bda4d8081f9cb4aed198c3ccc4195 | [
"MIT"
] | 2 | 2020-05-21T07:18:49.000Z | 2020-11-03T12:18:43.000Z | # Generated by Django 2.2.11 on 2020-05-24 07:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("accounts", "0003_care_platform"),
]
operations = [
migrations.RemoveField(model_name="district", name="active",),
migrations.RemoveField(model_name="district", name="created_at",),
migrations.RemoveField(model_name="district", name="updated_at",),
migrations.RemoveField(model_name="localbody", name="active",),
migrations.RemoveField(model_name="localbody", name="created_at",),
migrations.RemoveField(model_name="localbody", name="updated_at",),
migrations.RemoveField(model_name="skill", name="active",),
migrations.RemoveField(model_name="skill", name="created_at",),
migrations.RemoveField(model_name="skill", name="updated_at",),
migrations.RemoveField(model_name="state", name="active",),
migrations.RemoveField(model_name="state", name="created_at",),
migrations.RemoveField(model_name="state", name="updated_at",),
migrations.RemoveField(model_name="usertype", name="active",),
migrations.RemoveField(model_name="usertype", name="created_at",),
migrations.RemoveField(model_name="usertype", name="updated_at",),
]
| 45.241379 | 75 | 0.689024 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("accounts", "0003_care_platform"),
]
operations = [
migrations.RemoveField(model_name="district", name="active",),
migrations.RemoveField(model_name="district", name="created_at",),
migrations.RemoveField(model_name="district", name="updated_at",),
migrations.RemoveField(model_name="localbody", name="active",),
migrations.RemoveField(model_name="localbody", name="created_at",),
migrations.RemoveField(model_name="localbody", name="updated_at",),
migrations.RemoveField(model_name="skill", name="active",),
migrations.RemoveField(model_name="skill", name="created_at",),
migrations.RemoveField(model_name="skill", name="updated_at",),
migrations.RemoveField(model_name="state", name="active",),
migrations.RemoveField(model_name="state", name="created_at",),
migrations.RemoveField(model_name="state", name="updated_at",),
migrations.RemoveField(model_name="usertype", name="active",),
migrations.RemoveField(model_name="usertype", name="created_at",),
migrations.RemoveField(model_name="usertype", name="updated_at",),
]
| true | true |
f7f793eb14e4b7946837fec10525cfbe9ea6a5a7 | 6,691 | py | Python | scripts/control.py | Jash-Shah/Eklavya---Drone | ca8c88fafb50e3da29690faa016ba43657e67b82 | [
"MIT",
"Unlicense"
] | 7 | 2021-09-17T06:48:00.000Z | 2022-02-14T01:11:49.000Z | scripts/control.py | toshan-luktuke/Eklavya---Drone | ca8c88fafb50e3da29690faa016ba43657e67b82 | [
"MIT",
"Unlicense"
] | 1 | 2021-09-11T10:51:06.000Z | 2021-12-22T10:27:33.000Z | scripts/control.py | toshan-luktuke/Eklavya---Drone | ca8c88fafb50e3da29690faa016ba43657e67b82 | [
"MIT",
"Unlicense"
] | 1 | 2021-12-16T18:04:28.000Z | 2021-12-16T18:04:28.000Z | #! /usr/bin/env python3
import rospy
import time
from pid import *
import message_filters
from vitarana_drone.msg import prop_speed
from vitarana_drone.msg import edrone_cmd
from rospy.topics import Publisher
from sensor_msgs.msg import NavSatFix
from sensor_msgs.msg import Imu
from geometry_msgs.msg import Vector3Stamped
from geometry_msgs.msg import Vector3
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from std_msgs.msg import Float64, Float64MultiArray
from gazebo_msgs.msg import ModelStates
# Initilization of all Parameters
altitude = 0.30999
thrust = 0
vel_x = 0
vel_y = 0
vel_z = 0
roll = 0
pitch = 0
yaw = 0
x = 0
y = 0
z = 0
# Giving default PID values incase no input from user
kp = 20
ki = 0.001
kd = 35
kp_roll = 0.2
ki_roll = 0.00001
kd_roll = 0.5
kp_pitch = 0.15
ki_pitch = 0.00001
kd_pitch = 0.1
kp_yaw = 50
ki_yaw = 0.01
kd_yaw = 5
kp_x = 0.13
ki_x = 0.00001
kd_x = 0.003 #0.00015
kp_y = 0.13
ki_y = 0
kd_y = 0.00015
kp_vel_x = 0.1
ki_vel_x = 0
kd_vel_x = 0.071
kp_vel_y = 0.01
ki_vel_y = 0.0
kd_vel_y = 0.0071
# Flag for checking for the first time the script is run
flag = 0
# Message to publish final motor speeds to propellers
message_pub = rospy.Publisher("/edrone/pwm", prop_speed, queue_size=1000)
# Ask the user for the required target coordinates the drone should hover at
target_x,target_y,req_alt = map(float,input("Enter X,Y,Z coordinates of target : ").split())
# Gets altitude PID published to node
def setPID_alt(msg):
global kp,ki,kd
kp = msg.data[0]
ki = msg.data[1]
kd = msg.data[2]
# Gets roll PID published to node
def setPID_roll(msg):
global kp_roll,ki_roll,kd_roll
kp_roll = msg.data[0]
ki_roll = msg.data[1]
kd_roll = msg.data[2]
# Gets pitch PID published to node
def setPID_pitch(msg):
global kp_pitch,ki_pitch,kd_pitch
kp_pitch = msg.data[0]
ki_pitch = msg.data[1]
kd_pitch = msg.data[2]
# Gets yaw PID published to node
def setPID_yaw(msg):
global kp_yaw,ki_yaw,kd_yaw
kp_yaw = msg.data[0]
ki_yaw = msg.data[1]
kd_yaw = msg.data[2]
# Gets x PID published to node
def setPID_x(msg):
global kp_x,ki_x,kd_x
kp_x = msg.data[0]
ki_x = msg.data[1]
kd_x = msg.data[2]
# Gets y PID published to node
def setPID_y(msg):
global kp_y,ki_y,kd_y
kp_y = msg.data[0]
ki_y = msg.data[1]
kd_y = msg.data[2]
# Gets current altitude of drone from gps sensor
def calAltitude(msg):
global altitude
altitude = msg.altitude
rospy.loginfo("\nAltitude = " + str(altitude))
# Gets current velocity of drone from gps_vel sensor
def calVelocity(msg):
global vel_x, vel_y, vel_z
vel_x = msg.vector.x
vel_y = msg.vector.y
vel_z = msg.vector.z
# Gets current roll. pitch, yaw of drone from IMU sensor
def calImu(msg):
orinetation_list = [msg.orientation.x,msg.orientation.y,msg.orientation.z,msg.orientation.w]
global roll, pitch, yaw
(roll,pitch,yaw) = euler_from_quaternion(orinetation_list)
roll = roll * (180/3.14159265)
pitch = pitch * (180/3.14159265)
yaw = yaw * (180/3.14159265)
#Gets current x,y posiiton of drone
def calPosition(pos):
global x,y
x = round(pos.pose[1].position.x,3)
y = round(pos.pose[1].position.y,3)
# Gets vel_x and vel_y PID published to node
def setPID_vel_x(msg):
global kp_vel_x,ki_vel_x,kd_vel_x
kp_vel_x = msg.data[0]
ki_vel_x = msg.data[1]
kd_vel_x = msg.data[2]
def setPID_vel_y(msg):
global kp_vel_y,ki_vel_y,kd_vel_y
kp_vel_y = msg.data[0]
ki_vel_y = msg.data[1]
kd_vel_y = msg.data[2]
def alt_control(gps, vel, imu):
# Set all variables to global so as to keep them updated values
global altitude,req_alt,flag, kp,ki,kd,roll, pitch, yaw,target_x,target_y
# Gets drones current velocity
calVelocity(vel)
# Gets drones current rpy
calImu(imu)
# Gets drones current altitude
calAltitude(gps)
#Gets drones current x and y position
rospy.Subscriber("/gazebo/model_states",ModelStates,calPosition )
# Subsribe to all required topics to get PID for all controllers
rospy.Subscriber("alt_pid", Float64MultiArray, setPID_alt)
rospy.Subscriber("roll_pid", Float64MultiArray, setPID_roll)
rospy.Subscriber("pitch_pid", Float64MultiArray, setPID_pitch)
rospy.Subscriber("yaw_pid", Float64MultiArray, setPID_yaw)
rospy.Subscriber("x_pid", Float64MultiArray, setPID_x)
rospy.Subscriber("y_pid", Float64MultiArray, setPID_y)
rospy.Subscriber("vel_x_pid", Float64MultiArray, setPID_vel_x)
rospy.Subscriber("vel_y_pid", Float64MultiArray, setPID_vel_y)
# Combine the PID values into tuples so as to easily send easily to PID function
k_alt = (kp,ki,kd)
k_roll = (kp_roll,ki_roll,kd_roll)
k_pitch = (kp_pitch,ki_pitch,kd_pitch)
k_yaw = (kp_yaw,ki_yaw,kd_yaw)
k_x = (kp_x,ki_x,kd_x)
k_y = (kp_y,ki_y,kd_y)
velocity = (vel_x, vel_y, vel_z)
k_vel = (kp_vel_x,ki_vel_x,kd_vel_x,kp_vel_y,ki_vel_y,kd_vel_y)
target = (target_x,target_y,req_alt)
# Logging for debugging purposes
print("\nAltitude = " + str(altitude))
# print("Required alt = ",req_alt)
print("Roll =", roll)
print("Pitch =", pitch)
print("Yaw =", yaw)
print("X = ",x)
print("Y = ",y)
#the goal is to get a function that stabilises the r p y x and y of the drone as per the given target while maintaining altitude
#speed returned is the final motor speed after going through the motor mixing algorithm for all controllers
speed = PID_alt(roll, pitch, yaw,x,y, target, altitude, k_alt, k_roll, k_pitch, k_yaw, k_x, k_y, velocity, k_vel, flag)
flag += 1 #Indicates completion of 1st run of function
# Publish the final motor speeds to the propellers
message_pub.publish(speed)
def control():
# define global values for required parameters to avoid resetting to 0
global altitude, thrust, speed
# initialize node
rospy.init_node("altitude", anonymous = False)
# Creating subscribers to get all relevant sensor data
gps_sub = message_filters.Subscriber("/edrone/gps", NavSatFix)
vel_sub = message_filters.Subscriber("/edrone/gps_velocity", Vector3Stamped)
imu_sub = message_filters.Subscriber("/edrone/imu/data", Imu)
ts = message_filters.TimeSynchronizer([gps_sub, vel_sub, imu_sub], 2)
#one of these publishers is slower than the others
#which is why the messages are loading relatively slowly
ts.registerCallback(alt_control)
rospy.spin()
# Main function
if __name__=='__main__':
try:
control()
except rospy.ROSInterruptException:
pass
| 28.351695 | 132 | 0.705724 |
import rospy
import time
from pid import *
import message_filters
from vitarana_drone.msg import prop_speed
from vitarana_drone.msg import edrone_cmd
from rospy.topics import Publisher
from sensor_msgs.msg import NavSatFix
from sensor_msgs.msg import Imu
from geometry_msgs.msg import Vector3Stamped
from geometry_msgs.msg import Vector3
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from std_msgs.msg import Float64, Float64MultiArray
from gazebo_msgs.msg import ModelStates
altitude = 0.30999
thrust = 0
vel_x = 0
vel_y = 0
vel_z = 0
roll = 0
pitch = 0
yaw = 0
x = 0
y = 0
z = 0
kp = 20
ki = 0.001
kd = 35
kp_roll = 0.2
ki_roll = 0.00001
kd_roll = 0.5
kp_pitch = 0.15
ki_pitch = 0.00001
kd_pitch = 0.1
kp_yaw = 50
ki_yaw = 0.01
kd_yaw = 5
kp_x = 0.13
ki_x = 0.00001
kd_x = 0.003
kp_y = 0.13
ki_y = 0
kd_y = 0.00015
kp_vel_x = 0.1
ki_vel_x = 0
kd_vel_x = 0.071
kp_vel_y = 0.01
ki_vel_y = 0.0
kd_vel_y = 0.0071
flag = 0
message_pub = rospy.Publisher("/edrone/pwm", prop_speed, queue_size=1000)
target_x,target_y,req_alt = map(float,input("Enter X,Y,Z coordinates of target : ").split())
def setPID_alt(msg):
global kp,ki,kd
kp = msg.data[0]
ki = msg.data[1]
kd = msg.data[2]
def setPID_roll(msg):
global kp_roll,ki_roll,kd_roll
kp_roll = msg.data[0]
ki_roll = msg.data[1]
kd_roll = msg.data[2]
def setPID_pitch(msg):
global kp_pitch,ki_pitch,kd_pitch
kp_pitch = msg.data[0]
ki_pitch = msg.data[1]
kd_pitch = msg.data[2]
def setPID_yaw(msg):
global kp_yaw,ki_yaw,kd_yaw
kp_yaw = msg.data[0]
ki_yaw = msg.data[1]
kd_yaw = msg.data[2]
def setPID_x(msg):
global kp_x,ki_x,kd_x
kp_x = msg.data[0]
ki_x = msg.data[1]
kd_x = msg.data[2]
def setPID_y(msg):
global kp_y,ki_y,kd_y
kp_y = msg.data[0]
ki_y = msg.data[1]
kd_y = msg.data[2]
def calAltitude(msg):
global altitude
altitude = msg.altitude
rospy.loginfo("\nAltitude = " + str(altitude))
def calVelocity(msg):
global vel_x, vel_y, vel_z
vel_x = msg.vector.x
vel_y = msg.vector.y
vel_z = msg.vector.z
def calImu(msg):
orinetation_list = [msg.orientation.x,msg.orientation.y,msg.orientation.z,msg.orientation.w]
global roll, pitch, yaw
(roll,pitch,yaw) = euler_from_quaternion(orinetation_list)
roll = roll * (180/3.14159265)
pitch = pitch * (180/3.14159265)
yaw = yaw * (180/3.14159265)
def calPosition(pos):
global x,y
x = round(pos.pose[1].position.x,3)
y = round(pos.pose[1].position.y,3)
def setPID_vel_x(msg):
global kp_vel_x,ki_vel_x,kd_vel_x
kp_vel_x = msg.data[0]
ki_vel_x = msg.data[1]
kd_vel_x = msg.data[2]
def setPID_vel_y(msg):
global kp_vel_y,ki_vel_y,kd_vel_y
kp_vel_y = msg.data[0]
ki_vel_y = msg.data[1]
kd_vel_y = msg.data[2]
def alt_control(gps, vel, imu):
global altitude,req_alt,flag, kp,ki,kd,roll, pitch, yaw,target_x,target_y
calVelocity(vel)
calImu(imu)
calAltitude(gps)
rospy.Subscriber("/gazebo/model_states",ModelStates,calPosition )
rospy.Subscriber("alt_pid", Float64MultiArray, setPID_alt)
rospy.Subscriber("roll_pid", Float64MultiArray, setPID_roll)
rospy.Subscriber("pitch_pid", Float64MultiArray, setPID_pitch)
rospy.Subscriber("yaw_pid", Float64MultiArray, setPID_yaw)
rospy.Subscriber("x_pid", Float64MultiArray, setPID_x)
rospy.Subscriber("y_pid", Float64MultiArray, setPID_y)
rospy.Subscriber("vel_x_pid", Float64MultiArray, setPID_vel_x)
rospy.Subscriber("vel_y_pid", Float64MultiArray, setPID_vel_y)
k_alt = (kp,ki,kd)
k_roll = (kp_roll,ki_roll,kd_roll)
k_pitch = (kp_pitch,ki_pitch,kd_pitch)
k_yaw = (kp_yaw,ki_yaw,kd_yaw)
k_x = (kp_x,ki_x,kd_x)
k_y = (kp_y,ki_y,kd_y)
velocity = (vel_x, vel_y, vel_z)
k_vel = (kp_vel_x,ki_vel_x,kd_vel_x,kp_vel_y,ki_vel_y,kd_vel_y)
target = (target_x,target_y,req_alt)
print("\nAltitude = " + str(altitude))
print("Roll =", roll)
print("Pitch =", pitch)
print("Yaw =", yaw)
print("X = ",x)
print("Y = ",y)
speed = PID_alt(roll, pitch, yaw,x,y, target, altitude, k_alt, k_roll, k_pitch, k_yaw, k_x, k_y, velocity, k_vel, flag)
flag += 1
message_pub.publish(speed)
def control():
global altitude, thrust, speed
rospy.init_node("altitude", anonymous = False)
gps_sub = message_filters.Subscriber("/edrone/gps", NavSatFix)
vel_sub = message_filters.Subscriber("/edrone/gps_velocity", Vector3Stamped)
imu_sub = message_filters.Subscriber("/edrone/imu/data", Imu)
ts = message_filters.TimeSynchronizer([gps_sub, vel_sub, imu_sub], 2)
ts.registerCallback(alt_control)
rospy.spin()
if __name__=='__main__':
try:
control()
except rospy.ROSInterruptException:
pass
| true | true |
f7f7943f5c87469aaa98dd304f9ff981f8f970df | 3,389 | py | Python | tests/unit/test_command.py | osomdev/pyshrimp | 72f4425074a2f09a5f5630ca969472f4f5dcc85e | [
"MIT"
] | 6 | 2021-11-28T12:12:02.000Z | 2021-12-16T19:44:46.000Z | tests/unit/test_command.py | osomdev/pyshrimp | 72f4425074a2f09a5f5630ca969472f4f5dcc85e | [
"MIT"
] | null | null | null | tests/unit/test_command.py | osomdev/pyshrimp | 72f4425074a2f09a5f5630ca969472f4f5dcc85e | [
"MIT"
] | null | null | null | from unittest import TestCase
from pyshrimp.utils.command import shell_cmd, cmd, SkipConfig, Command, CommandArgProcessor
from pyshrimp.utils.subprocess_utils import ProcessExecutionException
class _TextWrapper:
def __init__(self, text):
self._text = text
def __str__(self) -> str:
return self._text
class _PrefixArgumentProcessor(CommandArgProcessor):
def __init__(self, prefix):
self._prefix = prefix
def process_args(self, *args):
return [f'{self._prefix}{arg}' for arg in args]
class TestCommand(TestCase):
def test_cmd(self):
wc_c = cmd('wc', '-c', check=True)
res = wc_c(cmd_in='1234').standard_output.strip()
self.assertEqual(
'4',
res
)
def test_shell_cmd(self):
wc_c = shell_cmd('echo -n "$1" | wc -c', check=True)
self.assertEqual(
'9',
wc_c('123456789').standard_output.strip()
)
def test_cmd_should_raise_exception_on_error_when_instructed(self):
exception = None
try:
cmd('/bin/false', check=True).exec()
except ProcessExecutionException as ex:
exception = ex
self.assertIsNotNone(exception)
self.assertEqual(exception.result.return_code, 1)
def test_command_should_respect_skip_config_no_args(self):
res = cmd('/bin/echo', '-n', '123').exec(skip=SkipConfig(skip_when_no_args=True, skipped_code=42))
self.assertEqual(42, res.return_code)
self.assertEqual('Execution skipped', res.standard_output)
def test_command_should_respect_skip_config_skip(self):
res = cmd('/bin/echo', '-n', '123').exec(skip=SkipConfig(skip=True, skipped_code=42))
self.assertEqual(42, res.return_code)
self.assertEqual('Execution skipped', res.standard_output)
def test_command_should_not_skip_when_args_were_provided(self):
res = cmd('/bin/echo', '-n', '123').exec('abc', skip=SkipConfig(skip_when_no_args=True, skipped_code=42))
self.assertEqual(0, res.return_code)
self.assertEqual('123 abc', res.standard_output)
def test_command_should_change_arguments_to_string(self):
res = cmd('/bin/echo', '-n', _TextWrapper('123')).exec(_TextWrapper('abc'))
self.assertEqual(0, res.return_code)
self.assertEqual('123 abc', res.standard_output)
def test_command_should_use_arguments_processor_on_exec_args(self):
command = Command(
command=['/bin/echo', '-n', _TextWrapper('123')],
argument_processor=_PrefixArgumentProcessor('arg:')
)
res = command.exec(_TextWrapper('abc'))
self.assertEqual(0, res.return_code)
self.assertEqual('123 arg:abc', res.standard_output)
def test_command_should_capture_both_stdout_and_stderr(self):
res = shell_cmd('echo -n err_text >&2 ; echo -n out_text', capture=True).exec()
self.assertEqual(0, res.return_code)
self.assertEqual('out_text', res.standard_output)
self.assertEqual('err_text', res.error_output)
def test_command_should_not_capture_output_when_instructed(self):
res = shell_cmd('echo -n err_text >&2 ; echo -n out_text', capture=False).exec()
self.assertEqual(0, res.return_code)
self.assertEqual('', res.standard_output)
self.assertEqual('', res.error_output)
| 36.836957 | 113 | 0.669814 | from unittest import TestCase
from pyshrimp.utils.command import shell_cmd, cmd, SkipConfig, Command, CommandArgProcessor
from pyshrimp.utils.subprocess_utils import ProcessExecutionException
class _TextWrapper:
def __init__(self, text):
self._text = text
def __str__(self) -> str:
return self._text
class _PrefixArgumentProcessor(CommandArgProcessor):
def __init__(self, prefix):
self._prefix = prefix
def process_args(self, *args):
return [f'{self._prefix}{arg}' for arg in args]
class TestCommand(TestCase):
def test_cmd(self):
wc_c = cmd('wc', '-c', check=True)
res = wc_c(cmd_in='1234').standard_output.strip()
self.assertEqual(
'4',
res
)
def test_shell_cmd(self):
wc_c = shell_cmd('echo -n "$1" | wc -c', check=True)
self.assertEqual(
'9',
wc_c('123456789').standard_output.strip()
)
def test_cmd_should_raise_exception_on_error_when_instructed(self):
exception = None
try:
cmd('/bin/false', check=True).exec()
except ProcessExecutionException as ex:
exception = ex
self.assertIsNotNone(exception)
self.assertEqual(exception.result.return_code, 1)
def test_command_should_respect_skip_config_no_args(self):
res = cmd('/bin/echo', '-n', '123').exec(skip=SkipConfig(skip_when_no_args=True, skipped_code=42))
self.assertEqual(42, res.return_code)
self.assertEqual('Execution skipped', res.standard_output)
def test_command_should_respect_skip_config_skip(self):
res = cmd('/bin/echo', '-n', '123').exec(skip=SkipConfig(skip=True, skipped_code=42))
self.assertEqual(42, res.return_code)
self.assertEqual('Execution skipped', res.standard_output)
def test_command_should_not_skip_when_args_were_provided(self):
res = cmd('/bin/echo', '-n', '123').exec('abc', skip=SkipConfig(skip_when_no_args=True, skipped_code=42))
self.assertEqual(0, res.return_code)
self.assertEqual('123 abc', res.standard_output)
def test_command_should_change_arguments_to_string(self):
res = cmd('/bin/echo', '-n', _TextWrapper('123')).exec(_TextWrapper('abc'))
self.assertEqual(0, res.return_code)
self.assertEqual('123 abc', res.standard_output)
def test_command_should_use_arguments_processor_on_exec_args(self):
command = Command(
command=['/bin/echo', '-n', _TextWrapper('123')],
argument_processor=_PrefixArgumentProcessor('arg:')
)
res = command.exec(_TextWrapper('abc'))
self.assertEqual(0, res.return_code)
self.assertEqual('123 arg:abc', res.standard_output)
def test_command_should_capture_both_stdout_and_stderr(self):
res = shell_cmd('echo -n err_text >&2 ; echo -n out_text', capture=True).exec()
self.assertEqual(0, res.return_code)
self.assertEqual('out_text', res.standard_output)
self.assertEqual('err_text', res.error_output)
def test_command_should_not_capture_output_when_instructed(self):
res = shell_cmd('echo -n err_text >&2 ; echo -n out_text', capture=False).exec()
self.assertEqual(0, res.return_code)
self.assertEqual('', res.standard_output)
self.assertEqual('', res.error_output)
| true | true |
f7f7944fd4154534861cb83c1cb4a890f7956c43 | 619 | py | Python | plato/processors/feature_gaussian.py | cuiboyuan/plato | 260b785cbbf8588c92331d6343211ff72321f90e | [
"Apache-2.0"
] | 135 | 2021-04-14T09:06:08.000Z | 2022-03-31T03:38:42.000Z | plato/processors/feature_gaussian.py | cuiboyuan/plato | 260b785cbbf8588c92331d6343211ff72321f90e | [
"Apache-2.0"
] | 39 | 2021-05-16T00:34:08.000Z | 2022-03-10T22:03:57.000Z | plato/processors/feature_gaussian.py | cuiboyuan/plato | 260b785cbbf8588c92331d6343211ff72321f90e | [
"Apache-2.0"
] | 41 | 2021-04-14T10:40:07.000Z | 2022-03-28T10:24:20.000Z | """
Implements a Processor for applying local differential privacy using gaussian mechanism.
"""
import math
from plato.processors import feature_additive_noise
class Processor(feature_additive_noise.Processor):
"""
Implements a Processor for applying local differential privacy using gaussian mechanism.
"""
def __init__(self,
epsilon=None,
delta=None,
sensitivity=None,
**kwargs) -> None:
scale = 2 * math.log(1.25 / delta) * sensitivity**2 / epsilon**2
super().__init__(method="gaussian", scale=scale, **kwargs)
| 29.47619 | 92 | 0.646204 | import math
from plato.processors import feature_additive_noise
class Processor(feature_additive_noise.Processor):
def __init__(self,
epsilon=None,
delta=None,
sensitivity=None,
**kwargs) -> None:
scale = 2 * math.log(1.25 / delta) * sensitivity**2 / epsilon**2
super().__init__(method="gaussian", scale=scale, **kwargs)
| true | true |
f7f79470c24ffbe45e82a1ee990919c2decadb29 | 488 | py | Python | ai-trading-system/setup.py | yash5OG/RecommenderForDHim | 841d981ec97626ddbe718cf0a044f92ee139fccc | [
"MIT"
] | null | null | null | ai-trading-system/setup.py | yash5OG/RecommenderForDHim | 841d981ec97626ddbe718cf0a044f92ee139fccc | [
"MIT"
] | null | null | null | ai-trading-system/setup.py | yash5OG/RecommenderForDHim | 841d981ec97626ddbe718cf0a044f92ee139fccc | [
"MIT"
] | 1 | 2021-08-13T23:06:46.000Z | 2021-08-13T23:06:46.000Z | # This will be executed everytime we make a pip install
# The find_packages is very important since it's used to
# make all our packages visible to each other inside the project
from setuptools import setup, find_packages
setup(
name='Winteam Bridge',
version='1.0.0',
description='Brosnan\'s winteam bridge',
packages=find_packages(exclude=['*tests']),
include_package_data=True,
setup_requires=[
'pytest-runner',
],
test_suite='test.unittest'
)
| 28.705882 | 64 | 0.713115 |
# make all our packages visible to each other inside the project
from setuptools import setup, find_packages
setup(
name='Winteam Bridge',
version='1.0.0',
description='Brosnan\'s winteam bridge',
packages=find_packages(exclude=['*tests']),
include_package_data=True,
setup_requires=[
'pytest-runner',
],
test_suite='test.unittest'
)
| true | true |
f7f79542b3c56bb705d3874a2ed09b5ead59f3e2 | 1,685 | py | Python | bugtests/test386jar/test386called.py | nelmiux/CS347-Data_Management | 1e9d87097b5a373f9312b0d6b413198e495fd6c0 | [
"CNRI-Jython"
] | 4 | 2021-05-16T07:59:00.000Z | 2022-02-17T20:17:14.000Z | bugtests/test386jar/test386called.py | nelmiux/CS347-Data_Management | 1e9d87097b5a373f9312b0d6b413198e495fd6c0 | [
"CNRI-Jython"
] | 1 | 2016-04-28T00:35:37.000Z | 2016-04-28T00:38:18.000Z | bugtests/test386jar/test386called.py | nelmiux/CS347-Data_Management | 1e9d87097b5a373f9312b0d6b413198e495fd6c0 | [
"CNRI-Jython"
] | 2 | 2022-01-18T23:31:08.000Z | 2022-02-18T12:43:37.000Z | # make sure we are in 'standalone' mode, without package scan
import sys
skipName = "python.cachedir.skip"
if not sys.registry.containsKey(skipName):
raise AssertionError, skipName + " is missing"
if not "true" == sys.registry.getProperty(skipName):
raise AssertionError, skipName + " is not true"
# import a non-builtin module which is not imported by default on startup
# this verifies that /Lib .py files can be imported
# this fixes bug [ 1194650 ]
import getopt
# an early java import # (only works since java.util is an already loaded package)
from java import util
util # used to give a NameError
# import java specific py modules
import os
# now do some java imports which previously failed without a package scan
# this (most of the time) solves the famous 'no module named java' problem
import java # (only works since java is an already loaded package)
import java.lang # (only works since java.lang is an already loaded package)
# explicit imports
from java.math import BigDecimal
from java.math import BigDecimal, BigInteger
from java.lang.reflect import Method
# verify the self healing
try:
# assume package javax.imageio.event was never touched before
import javax.imageio.event
raise AssertionError, "ImportError expected when executing 'import javax.imageio.event'"
except ImportError:
pass
from javax.imageio.event import IIOReadProgressListener
# importing this twice was a problem
from org.python.core import PySystemState
from org.python.core import PySystemState
# verify explicit imports of the form 'import java.net.URL'
import javax.security.auth.Policy
javax
javax.security
javax.security.auth
javax.security.auth.Policy
| 33.7 | 90 | 0.779228 |
import sys
skipName = "python.cachedir.skip"
if not sys.registry.containsKey(skipName):
raise AssertionError, skipName + " is missing"
if not "true" == sys.registry.getProperty(skipName):
raise AssertionError, skipName + " is not true"
import getopt
import java.lang
from java.math import BigDecimal
from java.math import BigDecimal, BigInteger
from java.lang.reflect import Method
try:
import javax.imageio.event
raise AssertionError, "ImportError expected when executing 'import javax.imageio.event'"
except ImportError:
pass
from javax.imageio.event import IIOReadProgressListener
from org.python.core import PySystemState
from org.python.core import PySystemState
import javax.security.auth.Policy
javax
javax.security
javax.security.auth
javax.security.auth.Policy
| false | true |
f7f795f0802af9453a29b14f27eac8665d4b853d | 1,431 | py | Python | tools_http.py | colderleo/pytools | 861409cfffd6c801f1c480045033930b34e1c889 | [
"MIT"
] | 1 | 2020-04-21T01:42:44.000Z | 2020-04-21T01:42:44.000Z | tools_http.py | colderleo/leo-pytools | 861409cfffd6c801f1c480045033930b34e1c889 | [
"MIT"
] | null | null | null | tools_http.py | colderleo/leo-pytools | 861409cfffd6c801f1c480045033930b34e1c889 | [
"MIT"
] | null | null | null |
def code_response(code=1, msg='', data:dict={}):
from django.http.response import JsonResponse
ret = {
'rescode': code,
'resmsg': msg,
}
ret.update(data)
return JsonResponse(ret)
def generate_jwt_token(openid:str='undefined_wx_openid', encode_to_str=True):
import jwt # pip install pyjwt
from tools_common import get_timestamp
JWT_SECRET = 'dkdll893hj938h42h829h'
EXPIRE_SECONDS = '7000'
payload = {
'uid': 'uid_abc',
'expire_time': get_timestamp() + EXPIRE_SECONDS,
}
token = jwt.encode(payload, JWT_SECRET, algorithm='HS256') # decoded = jwt.decode(token, secret, algorithms='HS256')
if encode_to_str:
token = str(token, encoding='utf-8')
return token
def parse_jwt_token(token, key=None):
'''
if verify passed, return payload
if key, return payload[key]
else, return None
'''
import jwt # pip install pyjwt
from tools_common import get_timestamp
try:
JWT_SECRET = 'dkdll893hj938h42h829h'
payload = jwt.decode(token, JWT_SECRET, algorithms='HS256')
cur_timestamp = get_timestamp()
if cur_timestamp > payload['expire_time']:
raise Exception('login expired')
if key:
return payload.get(key)
else:
return payload
except Exception as e:
print(f'verify token failed: {e}')
return None
| 28.058824 | 121 | 0.632425 |
def code_response(code=1, msg='', data:dict={}):
from django.http.response import JsonResponse
ret = {
'rescode': code,
'resmsg': msg,
}
ret.update(data)
return JsonResponse(ret)
def generate_jwt_token(openid:str='undefined_wx_openid', encode_to_str=True):
import jwt
from tools_common import get_timestamp
JWT_SECRET = 'dkdll893hj938h42h829h'
EXPIRE_SECONDS = '7000'
payload = {
'uid': 'uid_abc',
'expire_time': get_timestamp() + EXPIRE_SECONDS,
}
token = jwt.encode(payload, JWT_SECRET, algorithm='HS256')
if encode_to_str:
token = str(token, encoding='utf-8')
return token
def parse_jwt_token(token, key=None):
import jwt
from tools_common import get_timestamp
try:
JWT_SECRET = 'dkdll893hj938h42h829h'
payload = jwt.decode(token, JWT_SECRET, algorithms='HS256')
cur_timestamp = get_timestamp()
if cur_timestamp > payload['expire_time']:
raise Exception('login expired')
if key:
return payload.get(key)
else:
return payload
except Exception as e:
print(f'verify token failed: {e}')
return None
| true | true |
f7f795f4dde16c2dabc8a625d9d8b0be6ab64e67 | 150 | py | Python | flasgger_validation/__main__.py | robertlagrant/flasgger_validation | 413a0c897725990ee1f60ae091f50593c9eca7d2 | [
"MIT"
] | null | null | null | flasgger_validation/__main__.py | robertlagrant/flasgger_validation | 413a0c897725990ee1f60ae091f50593c9eca7d2 | [
"MIT"
] | null | null | null | flasgger_validation/__main__.py | robertlagrant/flasgger_validation | 413a0c897725990ee1f60ae091f50593c9eca7d2 | [
"MIT"
] | null | null | null | from waitress import serve
from .app import create_app
if __name__ == '__main__':
app = create_app()
serve(app, host='0.0.0.0', port=5000)
| 16.666667 | 41 | 0.673333 | from waitress import serve
from .app import create_app
if __name__ == '__main__':
app = create_app()
serve(app, host='0.0.0.0', port=5000)
| true | true |
f7f797daa504a244274423c9fa9de2402c75bb83 | 22,298 | py | Python | tdc/utils.py | hengwei-chan/TDC | 83b9dd1bf75a56e0dac2e61e0cd8fb21208785fa | [
"MIT"
] | 1 | 2021-07-23T01:43:26.000Z | 2021-07-23T01:43:26.000Z | tdc/utils.py | dengzhou5068/TDC | d19bd63c46c8f283c19764c3c9970de66f9a5d29 | [
"MIT"
] | null | null | null | tdc/utils.py | dengzhou5068/TDC | d19bd63c46c8f283c19764c3c9970de66f9a5d29 | [
"MIT"
] | null | null | null | import requests
from zipfile import ZipFile
import os, sys
import numpy as np
import pandas as pd
from pandas.errors import EmptyDataError
import json
import warnings
warnings.filterwarnings("ignore")
import subprocess
import pickle
from fuzzywuzzy import fuzz
from tqdm import tqdm
from .metadata import name2type, name2id, dataset_list, dataset_names, benchmark_names, benchmark2id, benchmark2type
from .metadata import property_names, paired_dataset_names, single_molecule_dataset_names
from .metadata import retrosyn_dataset_names, forwardsyn_dataset_names, molgenpaired_dataset_names, generation_datasets
from .metadata import oracle2id, download_oracle_names, trivial_oracle_names, oracle_names, oracle2type
from .label_name_list import dataset2target_lists
try:
from urllib.error import HTTPError
from urllib.parse import quote, urlencode
from urllib.request import urlopen
except ImportError:
from urllib import urlencode
from urllib2 import quote, urlopen, HTTPError
def fuzzy_search(name, dataset_names):
name = name.lower()
if name in dataset_names:
s = name
else:
# print("========fuzzysearch=======", dataset_names, name)
s = get_closet_match(dataset_names, name)[0]
if s in dataset_names:
return s
else:
raise ValueError(s + " does not belong to this task, please refer to the correct task name!")
def download_wrapper(name, path, dataset_names):
name = fuzzy_search(name, dataset_names)
server_path = 'https://dataverse.harvard.edu/api/access/datafile/'
dataset_path = server_path + str(name2id[name])
if not os.path.exists(path):
os.mkdir(path)
if os.path.exists(os.path.join(path, name + '.' + name2type[name])):
print_sys('Found local copy...')
else:
print_sys("Downloading...")
dataverse_download(dataset_path, path, name, name2type)
return name
def oracle_download_wrapper(name, path, oracle_names):
name = fuzzy_search(name, oracle_names)
if name in trivial_oracle_names:
return name
server_path = 'https://dataverse.harvard.edu/api/access/datafile/'
dataset_path = server_path + str(oracle2id[name])
if not os.path.exists(path):
os.mkdir(path)
if os.path.exists(os.path.join(path, name + '.' + oracle2type[name])):
print_sys('Found local copy...')
else:
print_sys("Downloading Oracle...")
dataverse_download(dataset_path, path, name, oracle2type) ## to-do to-check
print_sys("Done!")
return name
def bm_download_wrapper(name, path):
name = fuzzy_search(name, list(benchmark_names.keys()))
server_path = 'https://dataverse.harvard.edu/api/access/datafile/'
dataset_path = server_path + str(benchmark2id[name])
if not os.path.exists(path):
os.mkdir(path)
if os.path.exists(os.path.join(path, name)):
print_sys('Found local copy...')
else:
print_sys('Downloading Benchmark Group...')
dataverse_download(dataset_path, path, name, benchmark2type)
print_sys('Extracting zip file...')
with ZipFile(os.path.join(path, name + '.zip'), 'r') as zip:
zip.extractall(path = os.path.join(path))
print_sys("Done!")
return name
def pd_load(name, path):
try:
if name2type[name] == 'tab':
df = pd.read_csv(os.path.join(path, name + '.' + name2type[name]), sep = '\t')
elif name2type[name] == 'csv':
df = pd.read_csv(os.path.join(path, name + '.' + name2type[name]))
elif name2type[name] == 'pkl':
df = pd.read_pickle(os.path.join(path, name + '.' + name2type[name]))
else:
raise ValueError("The file type must be one of tab/csv/pickle.")
try:
df = df.drop_duplicates()
except:
pass
return df
except (EmptyDataError, EOFError) as e:
import sys
sys.exit("TDC is hosted in Harvard Dataverse and it is currently under maintenance, please check back in a few hours or checkout https://dataverse.harvard.edu/.")
def property_dataset_load(name, path, target, dataset_names):
if target is None:
target = 'Y'
name = download_wrapper(name, path, dataset_names)
print_sys('Loading...')
df = pd_load(name, path)
try:
if target is not None:
target = fuzzy_search(target, df.columns.values)
df = df[df[target].notnull()].reset_index(drop = True)
except:
with open(os.path.join(path, name + '.' + name2type[name]), 'r') as f:
flag = 'Service Unavailable' in ' '.join(f.readlines())
if flag:
import sys
sys.exit("TDC is hosted in Harvard Dataverse and it is currently under maintenance, please check back in a few hours or checkout https://dataverse.harvard.edu/.")
else:
sys.exit("Please report this error to cosamhkx@gmail.com, thanks!")
try:
return df['X'], df[target], df['ID']
except:
return df['Drug'], df[target], df['Drug_ID']
def molpair_process(name, path, dataset_names):
name = download_wrapper(name, path, dataset_names)
print_sys('Loading...')
df = pd_load(name, path)
return df['input'], df['output']
def interaction_dataset_load(name, path, target, dataset_names):
name = download_wrapper(name, path, dataset_names)
print_sys('Loading...')
df = pd_load(name, path)
try:
if target is None:
target = 'Y'
if target not in df.columns.values:
# for binary interaction data, the labels are all 1. negative samples can be sampled from utils.NegSample function
df[target] = 1
if target is not None:
target = fuzzy_search(target, df.columns.values)
df = df[df[target].notnull()].reset_index(drop = True)
return df['X1'], df['X2'], df[target], df['ID1'], df['ID2']
except:
with open(os.path.join(path, name + '.' + name2type[name]), 'r') as f:
flag = 'Service Unavailable' in ' '.join(f.readlines())
if flag:
import sys
sys.exit("TDC is hosted in Harvard Dataverse and it is currently under maintenance, please check back in a few hours or checkout https://dataverse.harvard.edu/.")
else:
sys.exit("Please report this error to cosamhkx@gmail.com, thanks!")
def multi_dataset_load(name, path, dataset_names):
name = download_wrapper(name, path, dataset_names)
print_sys('Loading...')
df = pd_load(name, path)
return df
def generation_paired_dataset_load(name, path, dataset_names, input_name, output_name):
name = download_wrapper(name, path, dataset_names)
print_sys('Loading...')
df = pd_load(name, path)
return df[input_name], df[output_name]
def distribution_dataset_load(name, path, dataset_names, column_name):
name = download_wrapper(name, path, dataset_names)
print_sys('Loading...')
df = pd_load(name, path)
return df[column_name]
def generation_dataset_load(name, path, dataset_names):
name = download_wrapper(name, path, dataset_names)
print_sys('Loading...')
df = pd_load(name, path)
return df['input'], df['target']
def oracle_load(name, path = './oracle', oracle_names = oracle_names):
name = oracle_download_wrapper(name, path, oracle_names)
return name
def bm_group_load(name, path):
name = bm_download_wrapper(name, path)
return name
def get_label_map(name, path = './data', target = None, file_format = 'csv', output_format = 'dict', task = 'DDI'):
name = fuzzy_search(name, dataset_names[task])
if target is None:
target = 'Y'
df = pd_load(name, path)
if output_format == 'dict':
return dict(zip(df[target].values, df['Map'].values))
elif output_format == 'df':
return df
elif output_format == 'array':
return df['Map'].values
else:
raise ValueError("Please use the correct output format, select from dict, df, array.")
def get_reaction_type(name, path = './data', output_format = 'array'):
name = fuzzy_search(name, dataset_names['RetroSyn'])
df = pd_load(name, path)
if output_format == 'df':
return df
elif output_format == 'array':
return df['category'].values
else:
raise ValueError("Please use the correct output format, select from df, array.")
def dataverse_download(url, path, name, types):
save_path = os.path.join(path, name + '.' + types[name])
response = requests.get(url, stream=True)
total_size_in_bytes= int(response.headers.get('content-length', 0))
block_size = 1024
progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)
with open(save_path, 'wb') as file:
for data in response.iter_content(block_size):
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
def convert_y_unit(y, from_, to_):
"""
Arguments:
y: a list of labels
from_: 'nM' or 'p'
to_: 'nM' or 'p'
Returns:
y: a numpy array of transformed labels
"""
if from_ == 'nM':
y = y
elif from_ == 'p':
y = 10**(-y) / 1e-9
if to_ == 'p':
y = -np.log10(y*1e-9 + 1e-10)
elif to_ == 'nM':
y = y
return y
def label_transform(y, binary, threshold, convert_to_log, verbose = True, order = 'descending'):
"""
Arguments:
y: a list of labels
binary: binarize the label given the threshold
threshold: threshold values
convert_to_log: for continuous values such as Kd and etc
Returns:
y: a numpy array of transformed labels
"""
if (len(np.unique(y)) > 2) and binary:
if verbose:
print("Binariztion using threshold' + str(threshold) + ', you use specify your threhsold values by threshold = X)", flush = True, file = sys.stderr)
if order == 'descending':
y = np.array([1 if i else 0 for i in np.array(y) < threshold])
elif order == 'ascending':
y = np.array([1 if i else 0 for i in np.array(y) > threshold])
else:
raise ValueError("Please select order from 'descending or ascending!")
else:
if (len(np.unique(y)) > 2) and convert_to_log:
if verbose:
print('To log space...', flush = True, file = sys.stderr)
y = convert_y_unit(np.array(y), 'nM', 'p')
else:
y = y
return y
def convert_to_log(y):
y = convert_y_unit(np.array(y), 'nM', 'p')
return y
def convert_back_log(y):
y = convert_y_unit(np.array(y), 'p', 'nM')
return y
def binarize(y, threshold, order = 'ascending'):
if order == 'ascending':
y = np.array([1 if i else 0 for i in np.array(y) > threshold])
elif order == 'descending':
y = np.array([1 if i else 0 for i in np.array(y) < threshold])
else:
raise AttributeError("'order' must be either ascending or descending")
return y
def label_dist(y, name = None):
try:
import seaborn as sns
import matplotlib.pyplot as plt
except:
utils.install("seaborn")
utils.install("matplotlib")
import seaborn as sns
import matplotlib.pyplot as plt
median = np.median(y)
mean = np.mean(y)
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw= {"height_ratios": (0.15, 1)})
if name is None:
sns.boxplot(y, ax=ax_box).set_title("Label Distribution")
else:
sns.boxplot(y, ax=ax_box).set_title("Label Distribution of " + str(name) + " Dataset")
ax_box.axvline(median, color='b', linestyle='--')
ax_box.axvline(mean, color='g', linestyle='--')
sns.distplot(y, ax = ax_hist)
ax_hist.axvline(median, color='b', linestyle='--')
ax_hist.axvline(mean, color='g', linestyle='--')
ax_hist.legend({'Median':median,'Mean':mean})
ax_box.set(xlabel='')
plt.show()
#print("The median is " + str(median), flush = True, file = sys.stderr)
#print("The mean is " + str(mean), flush = True, file = sys.stderr)
# random split
def create_fold(df, fold_seed, frac):
train_frac, val_frac, test_frac = frac
test = df.sample(frac = test_frac, replace = False, random_state = fold_seed)
train_val = df[~df.index.isin(test.index)]
val = train_val.sample(frac = val_frac/(1-test_frac), replace = False, random_state = 1)
train = train_val[~train_val.index.isin(val.index)]
return {'train': train.reset_index(drop = True),
'valid': val.reset_index(drop = True),
'test': test.reset_index(drop = True)}
# cold setting
def create_fold_setting_cold(df, fold_seed, frac, entity):
train_frac, val_frac, test_frac = frac
gene_drop = df[entity].drop_duplicates().sample(frac = test_frac, replace = False, random_state = fold_seed).values
test = df[df[entity].isin(gene_drop)]
train_val = df[~df[entity].isin(gene_drop)]
gene_drop_val = train_val[entity].drop_duplicates().sample(frac = val_frac/(1-test_frac), replace = False, random_state = fold_seed).values
val = train_val[train_val[entity].isin(gene_drop_val)]
train = train_val[~train_val[entity].isin(gene_drop_val)]
return {'train': train.reset_index(drop = True),
'valid': val.reset_index(drop = True),
'test': test.reset_index(drop = True)}
# scaffold split
def create_scaffold_split(df, seed, frac, entity):
# reference: https://github.com/chemprop/chemprop/blob/master/chemprop/data/scaffold.py
try:
from rdkit import Chem
from rdkit.Chem.Scaffolds import MurckoScaffold
except:
raise ImportError("Please install rdkit by 'conda install -c conda-forge rdkit'! ")
from tqdm import tqdm
from random import Random
from collections import defaultdict
random = Random(seed)
s = df[entity].values
scaffolds = defaultdict(set)
idx2mol = dict(zip(list(range(len(s))),s))
error_smiles = 0
for i, smiles in tqdm(enumerate(s), total=len(s)):
try:
scaffold = MurckoScaffold.MurckoScaffoldSmiles(mol = Chem.MolFromSmiles(smiles), includeChirality = False)
scaffolds[scaffold].add(i)
except:
print_sys(smiles + ' returns RDKit error and is thus omitted...')
error_smiles += 1
train, val, test = [], [], []
train_size = int((len(df) - error_smiles) * frac[0])
val_size = int((len(df) - error_smiles) * frac[1])
test_size = (len(df) - error_smiles) - train_size - val_size
train_scaffold_count, val_scaffold_count, test_scaffold_count = 0, 0, 0
#index_sets = sorted(list(scaffolds.values()), key=lambda i: len(i), reverse=True)
index_sets = list(scaffolds.values())
big_index_sets = []
small_index_sets = []
for index_set in index_sets:
if len(index_set) > val_size / 2 or len(index_set) > test_size / 2:
big_index_sets.append(index_set)
else:
small_index_sets.append(index_set)
random.seed(seed)
random.shuffle(big_index_sets)
random.shuffle(small_index_sets)
index_sets = big_index_sets + small_index_sets
if frac[2] == 0:
for index_set in index_sets:
if len(train) + len(index_set) <= train_size:
train += index_set
train_scaffold_count += 1
else:
val += index_set
val_scaffold_count += 1
else:
for index_set in index_sets:
if len(train) + len(index_set) <= train_size:
train += index_set
train_scaffold_count += 1
elif len(val) + len(index_set) <= val_size:
val += index_set
val_scaffold_count += 1
else:
test += index_set
test_scaffold_count += 1
return {'train': df.iloc[train].reset_index(drop = True),
'valid': df.iloc[val].reset_index(drop = True),
'test': df.iloc[test].reset_index(drop = True)}
def train_val_test_split(len_data, frac, seed):
test_size = int(len_data * frac[2])
train_size = int(len_data * frac[0])
val_size = len_data - train_size - test_size
np.random.seed(seed)
x = np.array(list(range(len_data)))
np.random.shuffle(x)
return x[:train_size], x[train_size:(train_size + val_size)], x[-test_size:]
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
def print_sys(s):
print(s, flush = True, file = sys.stderr)
def _parse_prop(search, proplist):
"""Extract property value from record using the given urn search filter."""
props = [i for i in proplist if all(item in i['urn'].items() for item in search.items())]
if len(props) > 0:
return props[0]['value'][list(props[0]['value'].keys())[0]]
def request(identifier, namespace='cid', domain='compound', operation=None, output='JSON', searchtype=None):
"""
copied from https://github.com/mcs07/PubChemPy/blob/e3c4f4a9b6120433e5cc3383464c7a79e9b2b86e/pubchempy.py#L238
Construct API request from parameters and return the response.
Full specification at http://pubchem.ncbi.nlm.nih.gov/pug_rest/PUG_REST.html
"""
API_BASE = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug'
text_types = str, bytes
if not identifier:
raise ValueError('identifier/cid cannot be None')
# If identifier is a list, join with commas into string
if isinstance(identifier, int):
identifier = str(identifier)
if not isinstance(identifier, text_types):
identifier = ','.join(str(x) for x in identifier)
# Build API URL
urlid, postdata = None, None
if namespace == 'sourceid':
identifier = identifier.replace('/', '.')
if namespace in ['listkey', 'formula', 'sourceid'] \
or searchtype == 'xref' \
or (searchtype and namespace == 'cid') or domain == 'sources':
urlid = quote(identifier.encode('utf8'))
else:
postdata = urlencode([(namespace, identifier)]).encode('utf8')
comps = filter(None, [API_BASE, domain, searchtype, namespace, urlid, operation, output])
apiurl = '/'.join(comps)
# Make request
response = urlopen(apiurl, postdata)
return response
def NegSample(df, column_names, frac, two_types):
"""Negative Sampling for Binary Interaction Dataset
Parameters
----------
df : pandas.DataFrame
Data File
column_names: list
column names in the order of [id1, x1, id2, x2]
"""
x = int(len(df) * frac)
id1, x1, id2, x2 = column_names
df[id1] = df[id1].apply(lambda x: str(x))
df[id2] = df[id2].apply(lambda x: str(x))
if not two_types:
df_unique = np.unique(df[[id1, id2]].values.reshape(-1))
pos = df[[id1, id2]].values
pos_set = set([tuple([i[0], i[1]]) for i in pos])
np.random.seed(1234)
samples = np.random.choice(df_unique, size=(x, 2), replace=True)
neg_set = set([tuple([i[0], i[1]]) for i in samples if i[0] != i[1]]) - pos_set
while len(neg_set) < x:
sample = np.random.choice(df_unique, 2, replace=False)
sample = tuple([sample[0], sample[1]])
if sample not in pos_set:
neg_set.add(sample)
neg_list = [list(i) for i in neg_set]
id2seq = dict(df[[id1, x1]].values)
id2seq.update(df[[id2, x2]].values)
neg_list_val = []
for i in neg_list:
neg_list_val.append([i[0], id2seq[i[0]], i[1], id2seq[i[1]], 0])
df = df.append(pd.DataFrame(neg_list_val).rename(columns = {0: id1, 1: x1, 2: id2, 3: x2, 4: 'Y'})).reset_index(drop = True)
return df
else:
df_unique_id1 = np.unique(df[id1].values.reshape(-1))
df_unique_id2 = np.unique(df[id2].values.reshape(-1))
pos = df[[id1, id2]].values
pos_set = set([tuple([i[0], i[1]]) for i in pos])
np.random.seed(1234)
sample_id1 = np.random.choice(df_unique_id1, size=len(df), replace=True)
sample_id2 = np.random.choice(df_unique_id2, size=len(df), replace=True)
neg_set = set([tuple([sample_id1[i], sample_id2[i]]) for i in range(len(df)) if sample_id1[i] != sample_id2[i]]) - pos_set
while len(neg_set) < len(df):
sample_id1 = np.random.choice(df_unique_id1, size=1, replace=True)
sample_id2 = np.random.choice(df_unique_id2, size=1, replace=True)
sample = tuple([sample_id1[0], sample_id2[0]])
if sample not in pos_set:
neg_set.add(sample)
neg_list = [list(i) for i in neg_set]
id2seq1 = dict(df_temp[[id1, x1]].values)
id2seq2 = dict(df_temp[[id2, x2]].values)
neg_list_val = []
for i in neg_list:
neg_list_val.append([i[0], id2seq[i[0]], i[1], id2seq[i[1]], 0])
df = df.append(pd.DataFrame(neg_list_val).rename(columns = {0: id1, 1: x1, 2: id2, 3: x2, 4: 'Y'})).reset_index(drop = True)
return df
def uniprot2seq(ProteinID):
"""Get protein sequence from Uniprot ID
Parameters
----------
ProteinID : str
Uniprot ID
Returns
-------
str
Amino acid sequence of input uniprot ID
"""
import urllib
import string
import urllib.request as ur
ID = str(ProteinID)
localfile = ur.urlopen('http://www.uniprot.org/uniprot/' + ID + '.fasta')
temp = localfile.readlines()
res = ''
for i in range(1, len(temp)):
res = res + temp[i].strip().decode("utf-8")
return res
def cid2smiles(cid):
try:
smiles = _parse_prop({'label': 'SMILES', 'name': 'Canonical'}, json.loads(request(cid).read().decode())['PC_Compounds'][0]['props'])
except:
print('cid ' + str(cid) + ' failed, use NULL string')
smiles = 'NULL'
return smiles
def get_closet_match(predefined_tokens, test_token, threshold=0.8):
"""Get the closest match by Levenshtein Distance.
Parameters
----------
predefined_tokens : list of string
Predefined string tokens.
test_token : string
User input that needs matching to existing tokens.
threshold : float in (0, 1), optional (default=0.8)
The lowest match score to raise errors.
Returns
-------
"""
prob_list = []
for token in predefined_tokens:
# print(token)
prob_list.append(
fuzz.ratio(str(token).lower(), str(test_token).lower()))
assert (len(prob_list) == len(predefined_tokens))
prob_max = np.nanmax(prob_list)
token_max = predefined_tokens[np.nanargmax(prob_list)]
# match similarity is low
if prob_max / 100 < threshold:
print_sys(predefined_tokens)
raise ValueError(test_token,
"does not match to available values. "
"Please double check.")
return token_max, prob_max / 100
def save_dict(path, obj):
with open(path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_dict(path):
with open(path, 'rb') as f:
return pickle.load(f)
def retrieve_label_name_list(name):
name = fuzzy_search(name, dataset_list)
return dataset2target_lists[name]
def retrieve_dataset_names(name):
return dataset_names[name]
def retrieve_all_benchmarks():
return list(benchmark_names.keys())
def retrieve_benchmark_names(name):
name = fuzzy_search(name, list(benchmark_names.keys()))
datasets = benchmark_names[name]
dataset_names = []
for task, datasets in datasets.items():
for dataset in datasets:
dataset_names.append(dataset)
return dataset_names
def to_submission_format(results):
df = pd.DataFrame(results)
def get_metric(x):
metric = []
for i in x:
metric.append(list(i.values())[0])
return [round(np.mean(metric), 3), round(np.std(metric), 3)]
return dict(df.apply(get_metric, axis = 1))
| 33.083086 | 166 | 0.681989 | import requests
from zipfile import ZipFile
import os, sys
import numpy as np
import pandas as pd
from pandas.errors import EmptyDataError
import json
import warnings
warnings.filterwarnings("ignore")
import subprocess
import pickle
from fuzzywuzzy import fuzz
from tqdm import tqdm
from .metadata import name2type, name2id, dataset_list, dataset_names, benchmark_names, benchmark2id, benchmark2type
from .metadata import property_names, paired_dataset_names, single_molecule_dataset_names
from .metadata import retrosyn_dataset_names, forwardsyn_dataset_names, molgenpaired_dataset_names, generation_datasets
from .metadata import oracle2id, download_oracle_names, trivial_oracle_names, oracle_names, oracle2type
from .label_name_list import dataset2target_lists
try:
from urllib.error import HTTPError
from urllib.parse import quote, urlencode
from urllib.request import urlopen
except ImportError:
from urllib import urlencode
from urllib2 import quote, urlopen, HTTPError
def fuzzy_search(name, dataset_names):
name = name.lower()
if name in dataset_names:
s = name
else:
s = get_closet_match(dataset_names, name)[0]
if s in dataset_names:
return s
else:
raise ValueError(s + " does not belong to this task, please refer to the correct task name!")
def download_wrapper(name, path, dataset_names):
name = fuzzy_search(name, dataset_names)
server_path = 'https://dataverse.harvard.edu/api/access/datafile/'
dataset_path = server_path + str(name2id[name])
if not os.path.exists(path):
os.mkdir(path)
if os.path.exists(os.path.join(path, name + '.' + name2type[name])):
print_sys('Found local copy...')
else:
print_sys("Downloading...")
dataverse_download(dataset_path, path, name, name2type)
return name
def oracle_download_wrapper(name, path, oracle_names):
name = fuzzy_search(name, oracle_names)
if name in trivial_oracle_names:
return name
server_path = 'https://dataverse.harvard.edu/api/access/datafile/'
dataset_path = server_path + str(oracle2id[name])
if not os.path.exists(path):
os.mkdir(path)
if os.path.exists(os.path.join(path, name + '.' + oracle2type[name])):
print_sys('Found local copy...')
else:
print_sys("Downloading Oracle...")
dataverse_download(dataset_path, path, name, oracle2type) ne!")
return name
def bm_download_wrapper(name, path):
name = fuzzy_search(name, list(benchmark_names.keys()))
server_path = 'https://dataverse.harvard.edu/api/access/datafile/'
dataset_path = server_path + str(benchmark2id[name])
if not os.path.exists(path):
os.mkdir(path)
if os.path.exists(os.path.join(path, name)):
print_sys('Found local copy...')
else:
print_sys('Downloading Benchmark Group...')
dataverse_download(dataset_path, path, name, benchmark2type)
print_sys('Extracting zip file...')
with ZipFile(os.path.join(path, name + '.zip'), 'r') as zip:
zip.extractall(path = os.path.join(path))
print_sys("Done!")
return name
def pd_load(name, path):
try:
if name2type[name] == 'tab':
df = pd.read_csv(os.path.join(path, name + '.' + name2type[name]), sep = '\t')
elif name2type[name] == 'csv':
df = pd.read_csv(os.path.join(path, name + '.' + name2type[name]))
elif name2type[name] == 'pkl':
df = pd.read_pickle(os.path.join(path, name + '.' + name2type[name]))
else:
raise ValueError("The file type must be one of tab/csv/pickle.")
try:
df = df.drop_duplicates()
except:
pass
return df
except (EmptyDataError, EOFError) as e:
import sys
sys.exit("TDC is hosted in Harvard Dataverse and it is currently under maintenance, please check back in a few hours or checkout https://dataverse.harvard.edu/.")
def property_dataset_load(name, path, target, dataset_names):
if target is None:
target = 'Y'
name = download_wrapper(name, path, dataset_names)
print_sys('Loading...')
df = pd_load(name, path)
try:
if target is not None:
target = fuzzy_search(target, df.columns.values)
df = df[df[target].notnull()].reset_index(drop = True)
except:
with open(os.path.join(path, name + '.' + name2type[name]), 'r') as f:
flag = 'Service Unavailable' in ' '.join(f.readlines())
if flag:
import sys
sys.exit("TDC is hosted in Harvard Dataverse and it is currently under maintenance, please check back in a few hours or checkout https://dataverse.harvard.edu/.")
else:
sys.exit("Please report this error to cosamhkx@gmail.com, thanks!")
try:
return df['X'], df[target], df['ID']
except:
return df['Drug'], df[target], df['Drug_ID']
def molpair_process(name, path, dataset_names):
name = download_wrapper(name, path, dataset_names)
print_sys('Loading...')
df = pd_load(name, path)
return df['input'], df['output']
def interaction_dataset_load(name, path, target, dataset_names):
name = download_wrapper(name, path, dataset_names)
print_sys('Loading...')
df = pd_load(name, path)
try:
if target is None:
target = 'Y'
if target not in df.columns.values:
df[target] = 1
if target is not None:
target = fuzzy_search(target, df.columns.values)
df = df[df[target].notnull()].reset_index(drop = True)
return df['X1'], df['X2'], df[target], df['ID1'], df['ID2']
except:
with open(os.path.join(path, name + '.' + name2type[name]), 'r') as f:
flag = 'Service Unavailable' in ' '.join(f.readlines())
if flag:
import sys
sys.exit("TDC is hosted in Harvard Dataverse and it is currently under maintenance, please check back in a few hours or checkout https://dataverse.harvard.edu/.")
else:
sys.exit("Please report this error to cosamhkx@gmail.com, thanks!")
def multi_dataset_load(name, path, dataset_names):
name = download_wrapper(name, path, dataset_names)
print_sys('Loading...')
df = pd_load(name, path)
return df
def generation_paired_dataset_load(name, path, dataset_names, input_name, output_name):
name = download_wrapper(name, path, dataset_names)
print_sys('Loading...')
df = pd_load(name, path)
return df[input_name], df[output_name]
def distribution_dataset_load(name, path, dataset_names, column_name):
name = download_wrapper(name, path, dataset_names)
print_sys('Loading...')
df = pd_load(name, path)
return df[column_name]
def generation_dataset_load(name, path, dataset_names):
name = download_wrapper(name, path, dataset_names)
print_sys('Loading...')
df = pd_load(name, path)
return df['input'], df['target']
def oracle_load(name, path = './oracle', oracle_names = oracle_names):
name = oracle_download_wrapper(name, path, oracle_names)
return name
def bm_group_load(name, path):
name = bm_download_wrapper(name, path)
return name
def get_label_map(name, path = './data', target = None, file_format = 'csv', output_format = 'dict', task = 'DDI'):
name = fuzzy_search(name, dataset_names[task])
if target is None:
target = 'Y'
df = pd_load(name, path)
if output_format == 'dict':
return dict(zip(df[target].values, df['Map'].values))
elif output_format == 'df':
return df
elif output_format == 'array':
return df['Map'].values
else:
raise ValueError("Please use the correct output format, select from dict, df, array.")
def get_reaction_type(name, path = './data', output_format = 'array'):
name = fuzzy_search(name, dataset_names['RetroSyn'])
df = pd_load(name, path)
if output_format == 'df':
return df
elif output_format == 'array':
return df['category'].values
else:
raise ValueError("Please use the correct output format, select from df, array.")
def dataverse_download(url, path, name, types):
save_path = os.path.join(path, name + '.' + types[name])
response = requests.get(url, stream=True)
total_size_in_bytes= int(response.headers.get('content-length', 0))
block_size = 1024
progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)
with open(save_path, 'wb') as file:
for data in response.iter_content(block_size):
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
def convert_y_unit(y, from_, to_):
if from_ == 'nM':
y = y
elif from_ == 'p':
y = 10**(-y) / 1e-9
if to_ == 'p':
y = -np.log10(y*1e-9 + 1e-10)
elif to_ == 'nM':
y = y
return y
def label_transform(y, binary, threshold, convert_to_log, verbose = True, order = 'descending'):
if (len(np.unique(y)) > 2) and binary:
if verbose:
print("Binariztion using threshold' + str(threshold) + ', you use specify your threhsold values by threshold = X)", flush = True, file = sys.stderr)
if order == 'descending':
y = np.array([1 if i else 0 for i in np.array(y) < threshold])
elif order == 'ascending':
y = np.array([1 if i else 0 for i in np.array(y) > threshold])
else:
raise ValueError("Please select order from 'descending or ascending!")
else:
if (len(np.unique(y)) > 2) and convert_to_log:
if verbose:
print('To log space...', flush = True, file = sys.stderr)
y = convert_y_unit(np.array(y), 'nM', 'p')
else:
y = y
return y
def convert_to_log(y):
y = convert_y_unit(np.array(y), 'nM', 'p')
return y
def convert_back_log(y):
y = convert_y_unit(np.array(y), 'p', 'nM')
return y
def binarize(y, threshold, order = 'ascending'):
if order == 'ascending':
y = np.array([1 if i else 0 for i in np.array(y) > threshold])
elif order == 'descending':
y = np.array([1 if i else 0 for i in np.array(y) < threshold])
else:
raise AttributeError("'order' must be either ascending or descending")
return y
def label_dist(y, name = None):
try:
import seaborn as sns
import matplotlib.pyplot as plt
except:
utils.install("seaborn")
utils.install("matplotlib")
import seaborn as sns
import matplotlib.pyplot as plt
median = np.median(y)
mean = np.mean(y)
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw= {"height_ratios": (0.15, 1)})
if name is None:
sns.boxplot(y, ax=ax_box).set_title("Label Distribution")
else:
sns.boxplot(y, ax=ax_box).set_title("Label Distribution of " + str(name) + " Dataset")
ax_box.axvline(median, color='b', linestyle='--')
ax_box.axvline(mean, color='g', linestyle='--')
sns.distplot(y, ax = ax_hist)
ax_hist.axvline(median, color='b', linestyle='--')
ax_hist.axvline(mean, color='g', linestyle='--')
ax_hist.legend({'Median':median,'Mean':mean})
ax_box.set(xlabel='')
plt.show()
#print("The median is " + str(median), flush = True, file = sys.stderr)
#print("The mean is " + str(mean), flush = True, file = sys.stderr)
# random split
def create_fold(df, fold_seed, frac):
train_frac, val_frac, test_frac = frac
test = df.sample(frac = test_frac, replace = False, random_state = fold_seed)
train_val = df[~df.index.isin(test.index)]
val = train_val.sample(frac = val_frac/(1-test_frac), replace = False, random_state = 1)
train = train_val[~train_val.index.isin(val.index)]
return {'train': train.reset_index(drop = True),
'valid': val.reset_index(drop = True),
'test': test.reset_index(drop = True)}
# cold setting
def create_fold_setting_cold(df, fold_seed, frac, entity):
train_frac, val_frac, test_frac = frac
gene_drop = df[entity].drop_duplicates().sample(frac = test_frac, replace = False, random_state = fold_seed).values
test = df[df[entity].isin(gene_drop)]
train_val = df[~df[entity].isin(gene_drop)]
gene_drop_val = train_val[entity].drop_duplicates().sample(frac = val_frac/(1-test_frac), replace = False, random_state = fold_seed).values
val = train_val[train_val[entity].isin(gene_drop_val)]
train = train_val[~train_val[entity].isin(gene_drop_val)]
return {'train': train.reset_index(drop = True),
'valid': val.reset_index(drop = True),
'test': test.reset_index(drop = True)}
# scaffold split
def create_scaffold_split(df, seed, frac, entity):
# reference: https://github.com/chemprop/chemprop/blob/master/chemprop/data/scaffold.py
try:
from rdkit import Chem
from rdkit.Chem.Scaffolds import MurckoScaffold
except:
raise ImportError("Please install rdkit by 'conda install -c conda-forge rdkit'! ")
from tqdm import tqdm
from random import Random
from collections import defaultdict
random = Random(seed)
s = df[entity].values
scaffolds = defaultdict(set)
idx2mol = dict(zip(list(range(len(s))),s))
error_smiles = 0
for i, smiles in tqdm(enumerate(s), total=len(s)):
try:
scaffold = MurckoScaffold.MurckoScaffoldSmiles(mol = Chem.MolFromSmiles(smiles), includeChirality = False)
scaffolds[scaffold].add(i)
except:
print_sys(smiles + ' returns RDKit error and is thus omitted...')
error_smiles += 1
train, val, test = [], [], []
train_size = int((len(df) - error_smiles) * frac[0])
val_size = int((len(df) - error_smiles) * frac[1])
test_size = (len(df) - error_smiles) - train_size - val_size
train_scaffold_count, val_scaffold_count, test_scaffold_count = 0, 0, 0
#index_sets = sorted(list(scaffolds.values()), key=lambda i: len(i), reverse=True)
index_sets = list(scaffolds.values())
big_index_sets = []
small_index_sets = []
for index_set in index_sets:
if len(index_set) > val_size / 2 or len(index_set) > test_size / 2:
big_index_sets.append(index_set)
else:
small_index_sets.append(index_set)
random.seed(seed)
random.shuffle(big_index_sets)
random.shuffle(small_index_sets)
index_sets = big_index_sets + small_index_sets
if frac[2] == 0:
for index_set in index_sets:
if len(train) + len(index_set) <= train_size:
train += index_set
train_scaffold_count += 1
else:
val += index_set
val_scaffold_count += 1
else:
for index_set in index_sets:
if len(train) + len(index_set) <= train_size:
train += index_set
train_scaffold_count += 1
elif len(val) + len(index_set) <= val_size:
val += index_set
val_scaffold_count += 1
else:
test += index_set
test_scaffold_count += 1
return {'train': df.iloc[train].reset_index(drop = True),
'valid': df.iloc[val].reset_index(drop = True),
'test': df.iloc[test].reset_index(drop = True)}
def train_val_test_split(len_data, frac, seed):
test_size = int(len_data * frac[2])
train_size = int(len_data * frac[0])
val_size = len_data - train_size - test_size
np.random.seed(seed)
x = np.array(list(range(len_data)))
np.random.shuffle(x)
return x[:train_size], x[train_size:(train_size + val_size)], x[-test_size:]
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
def print_sys(s):
print(s, flush = True, file = sys.stderr)
def _parse_prop(search, proplist):
props = [i for i in proplist if all(item in i['urn'].items() for item in search.items())]
if len(props) > 0:
return props[0]['value'][list(props[0]['value'].keys())[0]]
def request(identifier, namespace='cid', domain='compound', operation=None, output='JSON', searchtype=None):
API_BASE = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug'
text_types = str, bytes
if not identifier:
raise ValueError('identifier/cid cannot be None')
# If identifier is a list, join with commas into string
if isinstance(identifier, int):
identifier = str(identifier)
if not isinstance(identifier, text_types):
identifier = ','.join(str(x) for x in identifier)
# Build API URL
urlid, postdata = None, None
if namespace == 'sourceid':
identifier = identifier.replace('/', '.')
if namespace in ['listkey', 'formula', 'sourceid'] \
or searchtype == 'xref' \
or (searchtype and namespace == 'cid') or domain == 'sources':
urlid = quote(identifier.encode('utf8'))
else:
postdata = urlencode([(namespace, identifier)]).encode('utf8')
comps = filter(None, [API_BASE, domain, searchtype, namespace, urlid, operation, output])
apiurl = '/'.join(comps)
# Make request
response = urlopen(apiurl, postdata)
return response
def NegSample(df, column_names, frac, two_types):
x = int(len(df) * frac)
id1, x1, id2, x2 = column_names
df[id1] = df[id1].apply(lambda x: str(x))
df[id2] = df[id2].apply(lambda x: str(x))
if not two_types:
df_unique = np.unique(df[[id1, id2]].values.reshape(-1))
pos = df[[id1, id2]].values
pos_set = set([tuple([i[0], i[1]]) for i in pos])
np.random.seed(1234)
samples = np.random.choice(df_unique, size=(x, 2), replace=True)
neg_set = set([tuple([i[0], i[1]]) for i in samples if i[0] != i[1]]) - pos_set
while len(neg_set) < x:
sample = np.random.choice(df_unique, 2, replace=False)
sample = tuple([sample[0], sample[1]])
if sample not in pos_set:
neg_set.add(sample)
neg_list = [list(i) for i in neg_set]
id2seq = dict(df[[id1, x1]].values)
id2seq.update(df[[id2, x2]].values)
neg_list_val = []
for i in neg_list:
neg_list_val.append([i[0], id2seq[i[0]], i[1], id2seq[i[1]], 0])
df = df.append(pd.DataFrame(neg_list_val).rename(columns = {0: id1, 1: x1, 2: id2, 3: x2, 4: 'Y'})).reset_index(drop = True)
return df
else:
df_unique_id1 = np.unique(df[id1].values.reshape(-1))
df_unique_id2 = np.unique(df[id2].values.reshape(-1))
pos = df[[id1, id2]].values
pos_set = set([tuple([i[0], i[1]]) for i in pos])
np.random.seed(1234)
sample_id1 = np.random.choice(df_unique_id1, size=len(df), replace=True)
sample_id2 = np.random.choice(df_unique_id2, size=len(df), replace=True)
neg_set = set([tuple([sample_id1[i], sample_id2[i]]) for i in range(len(df)) if sample_id1[i] != sample_id2[i]]) - pos_set
while len(neg_set) < len(df):
sample_id1 = np.random.choice(df_unique_id1, size=1, replace=True)
sample_id2 = np.random.choice(df_unique_id2, size=1, replace=True)
sample = tuple([sample_id1[0], sample_id2[0]])
if sample not in pos_set:
neg_set.add(sample)
neg_list = [list(i) for i in neg_set]
id2seq1 = dict(df_temp[[id1, x1]].values)
id2seq2 = dict(df_temp[[id2, x2]].values)
neg_list_val = []
for i in neg_list:
neg_list_val.append([i[0], id2seq[i[0]], i[1], id2seq[i[1]], 0])
df = df.append(pd.DataFrame(neg_list_val).rename(columns = {0: id1, 1: x1, 2: id2, 3: x2, 4: 'Y'})).reset_index(drop = True)
return df
def uniprot2seq(ProteinID):
import urllib
import string
import urllib.request as ur
ID = str(ProteinID)
localfile = ur.urlopen('http://www.uniprot.org/uniprot/' + ID + '.fasta')
temp = localfile.readlines()
res = ''
for i in range(1, len(temp)):
res = res + temp[i].strip().decode("utf-8")
return res
def cid2smiles(cid):
try:
smiles = _parse_prop({'label': 'SMILES', 'name': 'Canonical'}, json.loads(request(cid).read().decode())['PC_Compounds'][0]['props'])
except:
print('cid ' + str(cid) + ' failed, use NULL string')
smiles = 'NULL'
return smiles
def get_closet_match(predefined_tokens, test_token, threshold=0.8):
prob_list = []
for token in predefined_tokens:
# print(token)
prob_list.append(
fuzz.ratio(str(token).lower(), str(test_token).lower()))
assert (len(prob_list) == len(predefined_tokens))
prob_max = np.nanmax(prob_list)
token_max = predefined_tokens[np.nanargmax(prob_list)]
# match similarity is low
if prob_max / 100 < threshold:
print_sys(predefined_tokens)
raise ValueError(test_token,
"does not match to available values. "
"Please double check.")
return token_max, prob_max / 100
def save_dict(path, obj):
with open(path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_dict(path):
with open(path, 'rb') as f:
return pickle.load(f)
def retrieve_label_name_list(name):
name = fuzzy_search(name, dataset_list)
return dataset2target_lists[name]
def retrieve_dataset_names(name):
return dataset_names[name]
def retrieve_all_benchmarks():
return list(benchmark_names.keys())
def retrieve_benchmark_names(name):
name = fuzzy_search(name, list(benchmark_names.keys()))
datasets = benchmark_names[name]
dataset_names = []
for task, datasets in datasets.items():
for dataset in datasets:
dataset_names.append(dataset)
return dataset_names
def to_submission_format(results):
df = pd.DataFrame(results)
def get_metric(x):
metric = []
for i in x:
metric.append(list(i.values())[0])
return [round(np.mean(metric), 3), round(np.std(metric), 3)]
return dict(df.apply(get_metric, axis = 1))
| true | true |
f7f7980bfe545a5add10a60ef3d0423dc009e1e5 | 2,021 | py | Python | leetcode/roman_to_integer.py | dmitrvk/solutions | 3831e8f8ca1e71ff158873593c7f7643341d3344 | [
"MIT"
] | null | null | null | leetcode/roman_to_integer.py | dmitrvk/solutions | 3831e8f8ca1e71ff158873593c7f7643341d3344 | [
"MIT"
] | null | null | null | leetcode/roman_to_integer.py | dmitrvk/solutions | 3831e8f8ca1e71ff158873593c7f7643341d3344 | [
"MIT"
] | null | null | null | import collections
class Solution:
ROMAN = collections.OrderedDict((
(1000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(3, 'III'),
(2, 'II'),
(1, 'I'),
))
def romanToInt(self, roman: str) -> int:
number = 0
while roman:
for key, value in self.ROMAN.items():
if roman.startswith(value):
roman = roman.replace(value, '', 1)
number += key
break
return number
def test_roman_to_int():
solution = Solution()
assert solution.romanToInt('I') == 1
assert solution.romanToInt('II') == 2
assert solution.romanToInt('III') == 3
assert solution.romanToInt('IV') == 4
assert solution.romanToInt('V') == 5
assert solution.romanToInt('VI') == 6
assert solution.romanToInt('VII') == 7
assert solution.romanToInt('VIII') == 8
assert solution.romanToInt('IX') == 9
assert solution.romanToInt('X') == 10
assert solution.romanToInt('XI') == 11
assert solution.romanToInt('XIV') == 14
assert solution.romanToInt('XV') == 15
assert solution.romanToInt('XIX') == 19
assert solution.romanToInt('XXXIX') == 39
assert solution.romanToInt('XL') == 40
assert solution.romanToInt('L') == 50
assert solution.romanToInt('LVIII') == 58
assert solution.romanToInt('LI') == 51
assert solution.romanToInt('XC') == 90
assert solution.romanToInt('C') == 100
assert solution.romanToInt('CD') == 400
assert solution.romanToInt('D') == 500
assert solution.romanToInt('CM') == 900
assert solution.romanToInt('M') == 1000
assert solution.romanToInt('MM') == 2000
assert solution.romanToInt('MMXXI') == 2021
assert solution.romanToInt('MCMXCIV') == 1994
assert solution.romanToInt('MMMCMXCIX') == 3999
| 30.164179 | 55 | 0.557645 | import collections
class Solution:
ROMAN = collections.OrderedDict((
(1000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(3, 'III'),
(2, 'II'),
(1, 'I'),
))
def romanToInt(self, roman: str) -> int:
number = 0
while roman:
for key, value in self.ROMAN.items():
if roman.startswith(value):
roman = roman.replace(value, '', 1)
number += key
break
return number
def test_roman_to_int():
solution = Solution()
assert solution.romanToInt('I') == 1
assert solution.romanToInt('II') == 2
assert solution.romanToInt('III') == 3
assert solution.romanToInt('IV') == 4
assert solution.romanToInt('V') == 5
assert solution.romanToInt('VI') == 6
assert solution.romanToInt('VII') == 7
assert solution.romanToInt('VIII') == 8
assert solution.romanToInt('IX') == 9
assert solution.romanToInt('X') == 10
assert solution.romanToInt('XI') == 11
assert solution.romanToInt('XIV') == 14
assert solution.romanToInt('XV') == 15
assert solution.romanToInt('XIX') == 19
assert solution.romanToInt('XXXIX') == 39
assert solution.romanToInt('XL') == 40
assert solution.romanToInt('L') == 50
assert solution.romanToInt('LVIII') == 58
assert solution.romanToInt('LI') == 51
assert solution.romanToInt('XC') == 90
assert solution.romanToInt('C') == 100
assert solution.romanToInt('CD') == 400
assert solution.romanToInt('D') == 500
assert solution.romanToInt('CM') == 900
assert solution.romanToInt('M') == 1000
assert solution.romanToInt('MM') == 2000
assert solution.romanToInt('MMXXI') == 2021
assert solution.romanToInt('MCMXCIV') == 1994
assert solution.romanToInt('MMMCMXCIX') == 3999
| true | true |
f7f7989095ae300b2b9f3d29a6694dcc51a4760b | 2,073 | py | Python | gryphon/wizard/settings_states/add_remote_registry.py | vittorfp/labskit_cli | 28e109b4a9f36a03d499eb953e04a4fb787632fe | [
"MIT"
] | null | null | null | gryphon/wizard/settings_states/add_remote_registry.py | vittorfp/labskit_cli | 28e109b4a9f36a03d499eb953e04a4fb787632fe | [
"MIT"
] | 1 | 2022-03-08T14:54:26.000Z | 2022-03-08T15:02:52.000Z | gryphon/wizard/settings_states/add_remote_registry.py | vittorfp/labskit_cli | 28e109b4a9f36a03d499eb953e04a4fb787632fe | [
"MIT"
] | null | null | null | import logging
from ..functions import erase_lines
from ..questions import SettingsQuestions
from ...fsm import State, Transition
from ...constants import NO
from ...constants import (YES, SUCCESS)
from ...core.settings import SettingsManager
logger = logging.getLogger('gryphon')
def back_to_previous(history, **kwargs):
history.pop()
erase_lines(**kwargs)
#####
def _condition_from_add_remote_registry_to_end(context: dict) -> bool:
return context["confirmation_option"] == YES
def _callback_from_add_remote_registry_to_end(context: dict) -> dict:
manager = SettingsManager()
manager.add_git_template_registry(
registry_name=context["registry_name"],
registry_repo=context["url"]
)
logger.log(SUCCESS, f'Successfully added registry {context["registry_name"]}.')
context["history"] = []
print("\n")
return context
####
def _condition_from_add_remote_registry_to_ask_option(context: dict) -> bool:
return context["confirmation_option"] == NO
def _callback_from_from_add_remote_registry_to_ask_option(context: dict) -> dict:
# remove 2 entries from history
back_to_previous(context["history"], n_lines=2)
back_to_previous(context["history"], n_lines=2)
return context
class AddRemoteRegistry(State):
name = "add_remote_registry"
transitions = [
Transition(
next_state="ask_option",
condition=_condition_from_add_remote_registry_to_ask_option,
callback=_callback_from_from_add_remote_registry_to_ask_option
),
Transition(
next_state="ask_option",
condition=_condition_from_add_remote_registry_to_end,
callback=_callback_from_add_remote_registry_to_end
)
]
def on_start(self, context: dict) -> dict:
context["registry_name"] = SettingsQuestions.ask_registry_name()
context["url"] = SettingsQuestions.ask_git_url()
context["confirmation_option"] = SettingsQuestions.confirm_registry_addition(context["registry_name"])
return context
| 30.485294 | 110 | 0.718765 | import logging
from ..functions import erase_lines
from ..questions import SettingsQuestions
from ...fsm import State, Transition
from ...constants import NO
from ...constants import (YES, SUCCESS)
from ...core.settings import SettingsManager
logger = logging.getLogger('gryphon')
def back_to_previous(history, **kwargs):
history.pop()
erase_lines(**kwargs)
ition_from_add_remote_registry_to_end(context: dict) -> bool:
return context["confirmation_option"] == YES
def _callback_from_add_remote_registry_to_end(context: dict) -> dict:
manager = SettingsManager()
manager.add_git_template_registry(
registry_name=context["registry_name"],
registry_repo=context["url"]
)
logger.log(SUCCESS, f'Successfully added registry {context["registry_name"]}.')
context["history"] = []
print("\n")
return context
condition_from_add_remote_registry_to_ask_option(context: dict) -> bool:
return context["confirmation_option"] == NO
def _callback_from_from_add_remote_registry_to_ask_option(context: dict) -> dict:
back_to_previous(context["history"], n_lines=2)
back_to_previous(context["history"], n_lines=2)
return context
class AddRemoteRegistry(State):
name = "add_remote_registry"
transitions = [
Transition(
next_state="ask_option",
condition=_condition_from_add_remote_registry_to_ask_option,
callback=_callback_from_from_add_remote_registry_to_ask_option
),
Transition(
next_state="ask_option",
condition=_condition_from_add_remote_registry_to_end,
callback=_callback_from_add_remote_registry_to_end
)
]
def on_start(self, context: dict) -> dict:
context["registry_name"] = SettingsQuestions.ask_registry_name()
context["url"] = SettingsQuestions.ask_git_url()
context["confirmation_option"] = SettingsQuestions.confirm_registry_addition(context["registry_name"])
return context
| true | true |
f7f7995a3815e724ade1aa3347110ab4861d1a7c | 3,079 | py | Python | strainer/widgets/reference.py | jmbreuer/strainer | cf8d5fbb0782ca9d9148107c28cdcd66ac2d6927 | [
"Unlicense"
] | 2 | 2020-04-10T22:20:14.000Z | 2020-05-14T21:35:12.000Z | strainer/widgets/reference.py | jmbreuer/strainer | cf8d5fbb0782ca9d9148107c28cdcd66ac2d6927 | [
"Unlicense"
] | 31 | 2020-05-21T14:03:53.000Z | 2022-03-11T12:04:50.000Z | strainer/widgets/reference.py | jmbreuer/strainer | cf8d5fbb0782ca9d9148107c28cdcd66ac2d6927 | [
"Unlicense"
] | 1 | 2022-03-09T18:19:55.000Z | 2022-03-09T18:19:55.000Z | from PyQt5.QtCore import QSize, QUrl
from PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEnginePage
from PyQt5.QtWidgets import QApplication
from ..actions import HomePage, PreviousPage, NextPage, ReloadPage, CopyUrl, FindInPage
from ..controls import NavigateMenu
from ..types import FindDirection, FindOptions, FindQuery
from .base import Menu, MenuMixin, Find, FindMixin
class Reference(MenuMixin, FindMixin, QWebEngineView):
_menu = Menu(
NavigateMenu,
{
HomePage: 'home', PreviousPage: 'back', NextPage: 'forward',
ReloadPage: 'stopOrReload', CopyUrl: 'copyUrl',
},
('urlChanged',)
)
_find = Find(FindInPage, FindOptions(True))
__make_url = 'https://thsmi.github.io/sieve-reference/en/{}.html'.format
def __init__(self, parent):
super().__init__(parent)
self._isLoading = False
def setLoading(value):
self._isLoading = value
self.updateMenu()
self.loadStarted.connect(self.onLoadStarted)
self.loadFinished.connect(self.onLoadFinished)
self.home()
def onLoadStarted(self):
self._isLoading = True
self.window().statusBar().showMessage(f'Communicating with {self.url().host()}, please wait...')
self.updateMenu()
def onLoadFinished(self):
self._isLoading = False
self.window().statusBar().clearMessage()
self.updateMenu()
def isHome(self):
return self.url().matches(self._make_url(), QUrl.StripTrailingSlash | QUrl.NormalizePathSegments)
def isLoading(self):
return self._isLoading
def home(self):
self.setUrl(self._make_url())
def browse(self, category, page):
self.setUrl(self._make_url(category, 'core', page))
def stopOrReload(self):
(self.stop if self._isLoading else self.reload)()
def copyUrl(self):
QApplication.clipboard().setText(self.url().toString())
# The WebAction doesn't say it's disabled if no link is active,
# but it just doesn't copy anything in this case.
# So we first copy the page URL, then overwrite it with the
# currently selected link's URL (if any).
self.page().triggerAction(QWebEnginePage.CopyLinkToClipboard)
# (If the user first right-clicks a link, then clicks the
# "copy link" somewhere else, it will still copy that.
# I didn't find much we could do about it.)
def sizeHint(self):
return QSize(300, 600)
def _make_url(self, *components):
if not components:
components = ('index',)
return QUrl(self.__make_url('/'.join(components)))
def _findNext(self, query: FindQuery):
flags = 0
if query.direction == FindDirection.Backward:
flags |= QWebEnginePage.FindBackward
if query.options.caseSensitive:
flags |= QWebEnginePage.FindCaseSensitively
self.findText(query.expression, QWebEnginePage.FindFlags(flags), query.callback)
def _cancelFind(self):
self.findText('')
| 34.988636 | 105 | 0.658006 | from PyQt5.QtCore import QSize, QUrl
from PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEnginePage
from PyQt5.QtWidgets import QApplication
from ..actions import HomePage, PreviousPage, NextPage, ReloadPage, CopyUrl, FindInPage
from ..controls import NavigateMenu
from ..types import FindDirection, FindOptions, FindQuery
from .base import Menu, MenuMixin, Find, FindMixin
class Reference(MenuMixin, FindMixin, QWebEngineView):
_menu = Menu(
NavigateMenu,
{
HomePage: 'home', PreviousPage: 'back', NextPage: 'forward',
ReloadPage: 'stopOrReload', CopyUrl: 'copyUrl',
},
('urlChanged',)
)
_find = Find(FindInPage, FindOptions(True))
__make_url = 'https://thsmi.github.io/sieve-reference/en/{}.html'.format
def __init__(self, parent):
super().__init__(parent)
self._isLoading = False
def setLoading(value):
self._isLoading = value
self.updateMenu()
self.loadStarted.connect(self.onLoadStarted)
self.loadFinished.connect(self.onLoadFinished)
self.home()
def onLoadStarted(self):
self._isLoading = True
self.window().statusBar().showMessage(f'Communicating with {self.url().host()}, please wait...')
self.updateMenu()
def onLoadFinished(self):
self._isLoading = False
self.window().statusBar().clearMessage()
self.updateMenu()
def isHome(self):
return self.url().matches(self._make_url(), QUrl.StripTrailingSlash | QUrl.NormalizePathSegments)
def isLoading(self):
return self._isLoading
def home(self):
self.setUrl(self._make_url())
def browse(self, category, page):
self.setUrl(self._make_url(category, 'core', page))
def stopOrReload(self):
(self.stop if self._isLoading else self.reload)()
def copyUrl(self):
QApplication.clipboard().setText(self.url().toString())
# So we first copy the page URL, then overwrite it with the
# currently selected link's URL (if any).
self.page().triggerAction(QWebEnginePage.CopyLinkToClipboard)
def sizeHint(self):
return QSize(300, 600)
def _make_url(self, *components):
if not components:
components = ('index',)
return QUrl(self.__make_url('/'.join(components)))
def _findNext(self, query: FindQuery):
flags = 0
if query.direction == FindDirection.Backward:
flags |= QWebEnginePage.FindBackward
if query.options.caseSensitive:
flags |= QWebEnginePage.FindCaseSensitively
self.findText(query.expression, QWebEnginePage.FindFlags(flags), query.callback)
def _cancelFind(self):
self.findText('')
| true | true |
f7f799eddfd72728b35bf7cd86a67b77514fdff5 | 12,804 | py | Python | tests/test_resources.py | JulianLiederer/restless | 660e77234d3336bae80e727351df44ade1dd67d4 | [
"BSD-3-Clause"
] | 1 | 2019-02-19T07:26:59.000Z | 2019-02-19T07:26:59.000Z | tests/test_resources.py | JulianLiederer/restless | 660e77234d3336bae80e727351df44ade1dd67d4 | [
"BSD-3-Clause"
] | null | null | null | tests/test_resources.py | JulianLiederer/restless | 660e77234d3336bae80e727351df44ade1dd67d4 | [
"BSD-3-Clause"
] | null | null | null | import six
import unittest
from restless.exceptions import HttpError, NotFound, MethodNotImplemented
from restless.preparers import Preparer, FieldsPreparer
from restless.resources import Resource
from restless.utils import json
from .fakes import FakeHttpRequest, FakeHttpResponse
class GenericResource(Resource):
def build_response(self, data, status=200):
resp = FakeHttpResponse(data, content_type='application/json')
resp.status_code = status
return resp
# This should Fake some endpoint Authentication
def is_authenticated(self):
if self.endpoint == 'list':
return False
return super(GenericResource, self).is_authenticated()
class ResourceTestCase(unittest.TestCase):
resource_class = GenericResource
def setUp(self):
super(ResourceTestCase, self).setUp()
self.res = self.resource_class()
# Assign here, since we typically won't be entering through
# ``as_list/as_detail`` methods like normal flow.
self.res.request = FakeHttpRequest()
def test_init(self):
res = self.resource_class('abc', test=True)
self.assertEqual(res.init_args, ('abc',))
self.assertEqual(res.init_kwargs, {'test': True})
self.assertIsNone(res.request)
self.assertIsNone(res.data)
self.assertIsNone(res.endpoint)
self.assertEqual(res.status, 200)
def test_request_method(self):
self.assertEqual(self.res.request_method(), 'GET')
self.res.request = FakeHttpRequest('POST', '{"hello": "world"}')
self.assertEqual(self.res.request_method(), 'POST')
self.res.request = FakeHttpRequest('PUT', '{"hello": "world"}')
self.assertEqual(self.res.request_method(), 'PUT')
self.res.request = FakeHttpRequest('DELETE', '')
self.assertEqual(self.res.request_method(), 'DELETE')
def test_request_body(self):
if six.PY3:
self.assertEqual(self.res.request_body(), b'')
else:
self.assertEqual(self.res.request_body(), '')
self.res.request = FakeHttpRequest('POST', '{"hello": "world"}')
if six.PY3:
self.assertEqual(self.res.request_body(), b'{"hello": "world"}')
else:
self.assertEqual(self.res.request_body(), '{"hello": "world"}')
self.res.request = FakeHttpRequest('PUT', '{"hello": "world"}')
if six.PY3:
self.assertEqual(self.res.request_body(), b'{"hello": "world"}')
else:
self.assertEqual(self.res.request_body(), '{"hello": "world"}')
self.res.request = FakeHttpRequest('DELETE', '{}')
if six.PY3:
self.assertEqual(self.res.request_body(), b'{}')
else:
self.assertEqual(self.res.request_body(), '{}')
def test_build_response(self):
resp = self.res.build_response('Hello, world!')
self.assertEqual(resp.body, 'Hello, world!')
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(resp.status_code, 200)
resp = self.res.build_response('{"hello": "world"}', status=302)
self.assertEqual(resp.body, '{"hello": "world"}')
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(resp.status_code, 302)
def test_build_error(self):
err = HttpError("Whoopsie")
resp = self.res.build_error(err)
resp_body = json.loads(resp.body)
self.assertEqual(resp_body, {'error': 'Whoopsie'})
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(resp.status_code, 500)
nf_err = NotFound()
resp = self.res.build_error(nf_err)
resp_body = json.loads(resp.body)
# Default error message.
self.assertEqual(resp_body, {'error': 'Resource not found.'})
self.assertEqual(resp.content_type, 'application/json')
# Custom status code.
self.assertEqual(resp.status_code, 404)
# Non-restless exception.
unknown_err = AttributeError("'something' not found on the object.")
resp = self.res.build_error(unknown_err)
resp_body = json.loads(resp.body)
# Still gets the JSON treatment & an appropriate status code.
self.assertEqual(resp_body, {'error': "'something' not found on the object."})
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(resp.status_code, 500)
def test_is_debug(self):
self.assertFalse(self.res.is_debug())
def test_bubble_exceptions(self):
self.assertFalse(self.res.bubble_exceptions())
def test_deserialize(self):
list_body = '["one", "three", "two"]'
self.assertEqual(self.res.deserialize('POST', 'list', list_body), [
"one",
"three",
"two",
])
# Should select list.
self.assertEqual(self.res.deserialize('POST', 'list', ''), [])
# Should select detail.
self.assertEqual(self.res.deserialize('PUT', 'detail', ''), {})
def test_deserialize_list(self):
body = '["one", "three", "two"]'
self.assertEqual(self.res.deserialize_list(body), [
"one",
"three",
"two",
])
self.assertEqual(self.res.deserialize_list(''), [])
def test_deserialize_detail(self):
body = '{"title": "Hitchhiker\'s Guide To The Galaxy", "author": "Douglas Adams"}'
self.assertEqual(self.res.deserialize_detail(body), {
'author': 'Douglas Adams',
'title': "Hitchhiker's Guide To The Galaxy",
})
self.assertEqual(self.res.deserialize_detail(''), {})
def test_serialize(self):
list_data = ['a', 'c', 'b']
detail_data = {'hello': 'world'}
# Normal calls.
self.assertEqual(self.res.serialize('GET', 'list', list_data), '{"objects": ["a", "c", "b"]}')
self.assertEqual(self.res.serialize('GET', 'detail', detail_data), '{"hello": "world"}')
# The create special-case.
self.assertEqual(self.res.serialize('POST', 'list', detail_data), '{"hello": "world"}')
# Make sure other methods aren't special-cased.
self.assertEqual(self.res.serialize('PUT', 'list', list_data), '{"objects": ["a", "c", "b"]}')
def test_serialize_list(self):
data = [
{
'title': 'Cosmos',
'author': 'Carl Sagan',
'short_desc': 'A journey through the stars by an emminent astrophysist.',
'pub_date': '1980',
},
{
'title': "Hitchhiker's Guide To The Galaxy",
'author': 'Douglas Adams',
'short_desc': "Don't forget your towel.",
'pub_date': '1979',
}
]
self.res.preparer = FieldsPreparer(fields={
'title': 'title',
'author': 'author',
'synopsis': 'short_desc',
})
res = self.res.serialize_list(data)
self.assertEqual(json.loads(res), {
'objects': [
{
'author': 'Carl Sagan',
'synopsis': 'A journey through the stars by an emminent astrophysist.',
'title': 'Cosmos'
},
{
'title': "Hitchhiker's Guide To The Galaxy",
'author': 'Douglas Adams',
'synopsis': "Don't forget your towel.",
},
],
})
# Make sure we don't try to serialize a ``None``, which would fail.
self.assertEqual(self.res.serialize_list(None), '')
def test_serialize_detail(self):
# This isn't very unit-y, but we're also testing that we're using the
# right JSON encoder & that it can handle other data types.
data = {
'title': 'Cosmos',
'author': 'Carl Sagan',
'short_desc': 'A journey through the stars by an emminent astrophysist.',
}
self.res.preparer = FieldsPreparer(fields={
'title': 'title',
'author': 'author',
'synopsis': 'short_desc',
})
res = self.res.serialize_detail(data)
self.assertEqual(json.loads(res), {
'author': 'Carl Sagan',
'synopsis': 'A journey through the stars by an emminent astrophysist.',
'title': 'Cosmos'
})
# Make sure we don't try to serialize a ``None``, which would fail.
self.assertEqual(self.res.serialize_detail(None), '')
def test_prepare(self):
# Without fields.
data = {
'title': 'Cosmos',
'author': 'Carl Sagan',
'short_desc': 'A journey through the stars by an emminent astrophysist.',
'pub_date': '1980'
}
# Should be unmodified.
self.assertIsInstance(self.res.preparer, Preparer)
self.assertEqual(self.res.prepare(data), data)
self.res.preparer = FieldsPreparer(fields={
'title': 'title',
'author': 'author',
'synopsis': 'short_desc',
})
self.assertEqual(self.res.prepare(data), {
'author': 'Carl Sagan',
'synopsis': 'A journey through the stars by an emminent astrophysist.',
'title': 'Cosmos'
})
def test_prepare_list(self):
data = {
'title': 'Cosmos',
'author': 'Carl Sagan',
'short_desc': 'A journey through the stars by an emminent astrophysist.',
'pub_date': '1980',
'index': '7',
}
self.res.handle('list')
self.res.list_preparer = FieldsPreparer(fields={
'title': 'title',
'author': 'author',
'index': 'index',
})
self.assertEqual(self.res.prepare(data), {
'title': 'Cosmos',
'author': 'Carl Sagan',
'index': '7',
})
def test_wrap_list_response(self):
data = ['one', 'three', 'two']
self.assertEqual(self.res.wrap_list_response(data), {
'objects': [
'one',
'three',
'two',
],
})
def test_is_authenticated(self):
# By default, only GETs are allowed.
self.assertTrue(self.res.is_authenticated())
self.res.request = FakeHttpRequest('POST')
self.assertFalse(self.res.is_authenticated())
self.res.request = FakeHttpRequest('PUT')
self.assertFalse(self.res.is_authenticated())
self.res.request = FakeHttpRequest('DELETE')
self.assertFalse(self.res.is_authenticated())
self.res.handle('list')
self.assertFalse(self.res.is_authenticated())
def test_list(self):
with self.assertRaises(MethodNotImplemented):
self.res.list()
def test_detail(self):
with self.assertRaises(MethodNotImplemented):
self.res.detail()
def test_create(self):
with self.assertRaises(MethodNotImplemented):
self.res.create()
def test_update(self):
with self.assertRaises(MethodNotImplemented):
self.res.update()
def test_delete(self):
with self.assertRaises(MethodNotImplemented):
self.res.delete()
def test_update_list(self):
with self.assertRaises(MethodNotImplemented):
self.res.update_list()
def test_create_detail(self):
with self.assertRaises(MethodNotImplemented):
self.res.create_detail()
def test_delete_list(self):
with self.assertRaises(MethodNotImplemented):
self.res.delete_list()
def test_endpoint_list(self):
self.res.handle('list')
self.assertEqual(self.res.endpoint, 'list')
def test_endpoint_detail(self):
self.res.handle('detail')
self.assertEqual(self.res.endpoint, 'detail')
def test_endpoint_create(self):
self.res.handle('create')
self.assertEqual(self.res.endpoint, 'create')
def test_endpoint_update(self):
self.res.handle('update')
self.assertEqual(self.res.endpoint, 'update')
def test_endpoint_delete(self):
self.res.handle('delete')
self.assertEqual(self.res.endpoint, 'delete')
def test_endpoint_update_list(self):
self.res.handle('update_list')
self.assertEqual(self.res.endpoint, 'update_list')
def test_endpoint_create_detail(self):
self.res.handle('create_detail')
self.assertEqual(self.res.endpoint, 'create_detail')
def test_endpoint_delete_list(self):
self.res.handle('delete_list')
self.assertEqual(self.res.endpoint, 'delete_list')
| 34.983607 | 102 | 0.586848 | import six
import unittest
from restless.exceptions import HttpError, NotFound, MethodNotImplemented
from restless.preparers import Preparer, FieldsPreparer
from restless.resources import Resource
from restless.utils import json
from .fakes import FakeHttpRequest, FakeHttpResponse
class GenericResource(Resource):
def build_response(self, data, status=200):
resp = FakeHttpResponse(data, content_type='application/json')
resp.status_code = status
return resp
def is_authenticated(self):
if self.endpoint == 'list':
return False
return super(GenericResource, self).is_authenticated()
class ResourceTestCase(unittest.TestCase):
resource_class = GenericResource
def setUp(self):
super(ResourceTestCase, self).setUp()
self.res = self.resource_class()
# ``as_list/as_detail`` methods like normal flow.
self.res.request = FakeHttpRequest()
def test_init(self):
res = self.resource_class('abc', test=True)
self.assertEqual(res.init_args, ('abc',))
self.assertEqual(res.init_kwargs, {'test': True})
self.assertIsNone(res.request)
self.assertIsNone(res.data)
self.assertIsNone(res.endpoint)
self.assertEqual(res.status, 200)
def test_request_method(self):
self.assertEqual(self.res.request_method(), 'GET')
self.res.request = FakeHttpRequest('POST', '{"hello": "world"}')
self.assertEqual(self.res.request_method(), 'POST')
self.res.request = FakeHttpRequest('PUT', '{"hello": "world"}')
self.assertEqual(self.res.request_method(), 'PUT')
self.res.request = FakeHttpRequest('DELETE', '')
self.assertEqual(self.res.request_method(), 'DELETE')
def test_request_body(self):
if six.PY3:
self.assertEqual(self.res.request_body(), b'')
else:
self.assertEqual(self.res.request_body(), '')
self.res.request = FakeHttpRequest('POST', '{"hello": "world"}')
if six.PY3:
self.assertEqual(self.res.request_body(), b'{"hello": "world"}')
else:
self.assertEqual(self.res.request_body(), '{"hello": "world"}')
self.res.request = FakeHttpRequest('PUT', '{"hello": "world"}')
if six.PY3:
self.assertEqual(self.res.request_body(), b'{"hello": "world"}')
else:
self.assertEqual(self.res.request_body(), '{"hello": "world"}')
self.res.request = FakeHttpRequest('DELETE', '{}')
if six.PY3:
self.assertEqual(self.res.request_body(), b'{}')
else:
self.assertEqual(self.res.request_body(), '{}')
def test_build_response(self):
resp = self.res.build_response('Hello, world!')
self.assertEqual(resp.body, 'Hello, world!')
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(resp.status_code, 200)
resp = self.res.build_response('{"hello": "world"}', status=302)
self.assertEqual(resp.body, '{"hello": "world"}')
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(resp.status_code, 302)
def test_build_error(self):
err = HttpError("Whoopsie")
resp = self.res.build_error(err)
resp_body = json.loads(resp.body)
self.assertEqual(resp_body, {'error': 'Whoopsie'})
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(resp.status_code, 500)
nf_err = NotFound()
resp = self.res.build_error(nf_err)
resp_body = json.loads(resp.body)
# Default error message.
self.assertEqual(resp_body, {'error': 'Resource not found.'})
self.assertEqual(resp.content_type, 'application/json')
# Custom status code.
self.assertEqual(resp.status_code, 404)
# Non-restless exception.
unknown_err = AttributeError("'something' not found on the object.")
resp = self.res.build_error(unknown_err)
resp_body = json.loads(resp.body)
# Still gets the JSON treatment & an appropriate status code.
self.assertEqual(resp_body, {'error': "'something' not found on the object."})
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(resp.status_code, 500)
def test_is_debug(self):
self.assertFalse(self.res.is_debug())
def test_bubble_exceptions(self):
self.assertFalse(self.res.bubble_exceptions())
def test_deserialize(self):
list_body = '["one", "three", "two"]'
self.assertEqual(self.res.deserialize('POST', 'list', list_body), [
"one",
"three",
"two",
])
# Should select list.
self.assertEqual(self.res.deserialize('POST', 'list', ''), [])
# Should select detail.
self.assertEqual(self.res.deserialize('PUT', 'detail', ''), {})
def test_deserialize_list(self):
body = '["one", "three", "two"]'
self.assertEqual(self.res.deserialize_list(body), [
"one",
"three",
"two",
])
self.assertEqual(self.res.deserialize_list(''), [])
def test_deserialize_detail(self):
body = '{"title": "Hitchhiker\'s Guide To The Galaxy", "author": "Douglas Adams"}'
self.assertEqual(self.res.deserialize_detail(body), {
'author': 'Douglas Adams',
'title': "Hitchhiker's Guide To The Galaxy",
})
self.assertEqual(self.res.deserialize_detail(''), {})
def test_serialize(self):
list_data = ['a', 'c', 'b']
detail_data = {'hello': 'world'}
# Normal calls.
self.assertEqual(self.res.serialize('GET', 'list', list_data), '{"objects": ["a", "c", "b"]}')
self.assertEqual(self.res.serialize('GET', 'detail', detail_data), '{"hello": "world"}')
# The create special-case.
self.assertEqual(self.res.serialize('POST', 'list', detail_data), '{"hello": "world"}')
# Make sure other methods aren't special-cased.
self.assertEqual(self.res.serialize('PUT', 'list', list_data), '{"objects": ["a", "c", "b"]}')
def test_serialize_list(self):
data = [
{
'title': 'Cosmos',
'author': 'Carl Sagan',
'short_desc': 'A journey through the stars by an emminent astrophysist.',
'pub_date': '1980',
},
{
'title': "Hitchhiker's Guide To The Galaxy",
'author': 'Douglas Adams',
'short_desc': "Don't forget your towel.",
'pub_date': '1979',
}
]
self.res.preparer = FieldsPreparer(fields={
'title': 'title',
'author': 'author',
'synopsis': 'short_desc',
})
res = self.res.serialize_list(data)
self.assertEqual(json.loads(res), {
'objects': [
{
'author': 'Carl Sagan',
'synopsis': 'A journey through the stars by an emminent astrophysist.',
'title': 'Cosmos'
},
{
'title': "Hitchhiker's Guide To The Galaxy",
'author': 'Douglas Adams',
'synopsis': "Don't forget your towel.",
},
],
})
self.assertEqual(self.res.serialize_list(None), '')
def test_serialize_detail(self):
# This isn't very unit-y, but we're also testing that we're using the
data = {
'title': 'Cosmos',
'author': 'Carl Sagan',
'short_desc': 'A journey through the stars by an emminent astrophysist.',
}
self.res.preparer = FieldsPreparer(fields={
'title': 'title',
'author': 'author',
'synopsis': 'short_desc',
})
res = self.res.serialize_detail(data)
self.assertEqual(json.loads(res), {
'author': 'Carl Sagan',
'synopsis': 'A journey through the stars by an emminent astrophysist.',
'title': 'Cosmos'
})
self.assertEqual(self.res.serialize_detail(None), '')
def test_prepare(self):
# Without fields.
data = {
'title': 'Cosmos',
'author': 'Carl Sagan',
'short_desc': 'A journey through the stars by an emminent astrophysist.',
'pub_date': '1980'
}
# Should be unmodified.
self.assertIsInstance(self.res.preparer, Preparer)
self.assertEqual(self.res.prepare(data), data)
self.res.preparer = FieldsPreparer(fields={
'title': 'title',
'author': 'author',
'synopsis': 'short_desc',
})
self.assertEqual(self.res.prepare(data), {
'author': 'Carl Sagan',
'synopsis': 'A journey through the stars by an emminent astrophysist.',
'title': 'Cosmos'
})
def test_prepare_list(self):
data = {
'title': 'Cosmos',
'author': 'Carl Sagan',
'short_desc': 'A journey through the stars by an emminent astrophysist.',
'pub_date': '1980',
'index': '7',
}
self.res.handle('list')
self.res.list_preparer = FieldsPreparer(fields={
'title': 'title',
'author': 'author',
'index': 'index',
})
self.assertEqual(self.res.prepare(data), {
'title': 'Cosmos',
'author': 'Carl Sagan',
'index': '7',
})
def test_wrap_list_response(self):
data = ['one', 'three', 'two']
self.assertEqual(self.res.wrap_list_response(data), {
'objects': [
'one',
'three',
'two',
],
})
def test_is_authenticated(self):
# By default, only GETs are allowed.
self.assertTrue(self.res.is_authenticated())
self.res.request = FakeHttpRequest('POST')
self.assertFalse(self.res.is_authenticated())
self.res.request = FakeHttpRequest('PUT')
self.assertFalse(self.res.is_authenticated())
self.res.request = FakeHttpRequest('DELETE')
self.assertFalse(self.res.is_authenticated())
self.res.handle('list')
self.assertFalse(self.res.is_authenticated())
def test_list(self):
with self.assertRaises(MethodNotImplemented):
self.res.list()
def test_detail(self):
with self.assertRaises(MethodNotImplemented):
self.res.detail()
def test_create(self):
with self.assertRaises(MethodNotImplemented):
self.res.create()
def test_update(self):
with self.assertRaises(MethodNotImplemented):
self.res.update()
def test_delete(self):
with self.assertRaises(MethodNotImplemented):
self.res.delete()
def test_update_list(self):
with self.assertRaises(MethodNotImplemented):
self.res.update_list()
def test_create_detail(self):
with self.assertRaises(MethodNotImplemented):
self.res.create_detail()
def test_delete_list(self):
with self.assertRaises(MethodNotImplemented):
self.res.delete_list()
def test_endpoint_list(self):
self.res.handle('list')
self.assertEqual(self.res.endpoint, 'list')
def test_endpoint_detail(self):
self.res.handle('detail')
self.assertEqual(self.res.endpoint, 'detail')
def test_endpoint_create(self):
self.res.handle('create')
self.assertEqual(self.res.endpoint, 'create')
def test_endpoint_update(self):
self.res.handle('update')
self.assertEqual(self.res.endpoint, 'update')
def test_endpoint_delete(self):
self.res.handle('delete')
self.assertEqual(self.res.endpoint, 'delete')
def test_endpoint_update_list(self):
self.res.handle('update_list')
self.assertEqual(self.res.endpoint, 'update_list')
def test_endpoint_create_detail(self):
self.res.handle('create_detail')
self.assertEqual(self.res.endpoint, 'create_detail')
def test_endpoint_delete_list(self):
self.res.handle('delete_list')
self.assertEqual(self.res.endpoint, 'delete_list')
| true | true |
f7f79a0e840973ca5e08241da89a77b61d9e27ea | 8,322 | py | Python | validator/sawtooth_validator/consensus/poet0/poet_transaction_block.py | jrineck/sawtooth-core | e3eb79f32c97a25993c87eda7f77a02fd2086c7c | [
"Apache-2.0"
] | null | null | null | validator/sawtooth_validator/consensus/poet0/poet_transaction_block.py | jrineck/sawtooth-core | e3eb79f32c97a25993c87eda7f77a02fd2086c7c | [
"Apache-2.0"
] | null | null | null | validator/sawtooth_validator/consensus/poet0/poet_transaction_block.py | jrineck/sawtooth-core | e3eb79f32c97a25993c87eda7f77a02fd2086c7c | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
import hashlib
from threading import RLock
from journal import transaction_block
from journal.messages import transaction_block_message
from sawtooth_validator.consensus.poet0.wait_certificate \
import WaitCertificate, WaitTimer
from gossip.common import NullIdentifier
LOGGER = logging.getLogger(__name__)
def register_message_handlers(journal):
"""Registers transaction block message handlers with the journal.
Args:
journal (PoetJournal): The journal on which to register the
message handlers.
"""
journal.dispatcher.register_message_handler(
PoetTransactionBlockMessage,
transaction_block_message.transaction_block_message_handler)
class PoetTransactionBlockMessage(
transaction_block_message.TransactionBlockMessage):
"""Represents the message format for exchanging information about blocks.
Attributes:
PoetTransactionBlockMessage.MessageType (str): The class name of
the message.
"""
MessageType = "/Poet0/TransactionBlock"
def __init__(self, minfo=None):
if minfo is None:
minfo = {}
super(PoetTransactionBlockMessage, self).__init__(minfo)
tinfo = minfo.get('TransactionBlock', {})
self.TransactionBlock = PoetTransactionBlock(tinfo)
class PoetTransactionBlock(transaction_block.TransactionBlock):
"""A set of transactions to be applied to a ledger, and proof of wait data.
Attributes:
PoetTransactionBlock.TransactionBlockTypeName (str): The name of the
transaction block type.
PoetTransactionBlock.MessageType (type): The message class.
wait_timer (wait_timer.WaitTimer): The wait timer for the block.
wait_certificate (wait_certificate.WaitCertificate): The wait
certificate for the block.
"""
TransactionBlockTypeName = '/Poet/PoetTransactionBlock'
MessageType = PoetTransactionBlockMessage
def __init__(self, minfo=None):
"""Constructor for the PoetTransactionBlock class.
Args:
minfo (dict): A dict of values for initializing
PoetTransactionBlocks.
"""
if minfo is None:
minfo = {}
super(PoetTransactionBlock, self).__init__(minfo)
self._lock = RLock()
self.wait_timer = None
self.wait_certificate = None
if 'WaitCertificate' in minfo:
wc = minfo.get('WaitCertificate')
serialized = wc.get('SerializedCert')
signature = wc.get('Signature')
self.wait_certificate = \
WaitCertificate.deserialize_wait_certificate(
serialized, signature)
self.aggregate_local_mean = 0.0
def __getstate__(self):
state = self.__dict__.copy()
del state['_lock']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._lock = RLock()
def __str__(self):
return "{0}, {1}, {2}, {3:0.2f}, {4}".format(
self.BlockNum, self.Identifier[:8], len(self.TransactionIDs),
self.CommitTime, self.wait_certificate)
def __cmp__(self, other):
"""
Compare two blocks, this will throw an error unless
both blocks are valid.
"""
if self.Status != transaction_block.Status.valid:
raise ValueError('block {0} must be valid for comparison'.format(
self.Identifier))
if other.Status != transaction_block.Status.valid:
raise ValueError('block {0} must be valid for comparison'.format(
other.Identifier))
# Criteria #1: if both blocks share the same previous block,
# then the block with the smallest duration wins
if self.PreviousBlockID == other.PreviousBlockID:
if self.wait_certificate.duration < \
other.wait_certificate.duration:
return 1
elif self.wait_certificate.duration > \
other.wait_certificate.duration:
return -1
# Criteria #2: if there is a difference between the immediate
# ancestors then pick the chain with the highest aggregate
# local mean, this will be the largest population (more or less)
else:
if self.aggregate_local_mean > other.aggregate_local_mean:
return 1
elif self.aggregate_local_mean < other.aggregate_local_mean:
return -1
# Criteria #3... use number of transactions as a tie breaker, this
# should not happen except in very rare cases
return super(PoetTransactionBlock, self).__cmp__(other)
def update_block_weight(self, journal):
with self._lock:
assert self.Status == transaction_block.Status.valid
super(PoetTransactionBlock, self).update_block_weight(journal)
assert self.wait_certificate
self.aggregate_local_mean = self.wait_certificate.local_mean
if self.PreviousBlockID != NullIdentifier:
assert self.PreviousBlockID in journal.block_store
self.aggregate_local_mean += \
journal.block_store[self.PreviousBlockID].\
aggregate_local_mean
def is_valid(self, journal):
"""Verifies that the block received is valid.
This includes checks for valid signature and a valid
WaitCertificate.
Args:
journal (PoetJorunal): Journal for pulling context.
"""
with self._lock:
if not super(PoetTransactionBlock, self).is_valid(journal):
return False
if not self.wait_certificate:
LOGGER.info('not a valid block, no wait certificate')
return False
return self.wait_certificate.is_valid_wait_certificate(
self.OriginatorID,
journal.consensus.build_certificate_list(
journal.block_store, self),
self.TransactionIDs)
def create_wait_timer(self, validator_address, certlist):
"""Creates a wait timer for the journal based on a list
of wait certificates.
Args:
certlist (list): A list of wait certificates.
"""
with self._lock:
self.wait_timer = WaitTimer.create_wait_timer(
validator_address,
certlist)
def create_wait_certificate(self):
"""Create a wait certificate for the journal based on the wait timer.
"""
with self._lock:
hasher = hashlib.sha256()
for tid in self.TransactionIDs:
hasher.update(tid)
block_hash = hasher.hexdigest()
self.wait_certificate = \
WaitCertificate.create_wait_certificate(
self.wait_timer,
block_hash)
if self.wait_certificate:
self.wait_timer = None
def wait_timer_is_expired(self, now):
"""Determines if the wait timer is expired.
Returns:
bool: Whether or not the wait timer is expired.
"""
with self._lock:
return self.wait_timer.is_expired(now)
def dump(self):
"""Returns a dict with information about the block.
Returns:
dict: A dict containing information about the block.
"""
with self._lock:
result = super(PoetTransactionBlock, self).dump()
result['WaitCertificate'] = self.wait_certificate.dump()
return result
| 35.564103 | 80 | 0.633261 |
import logging
import hashlib
from threading import RLock
from journal import transaction_block
from journal.messages import transaction_block_message
from sawtooth_validator.consensus.poet0.wait_certificate \
import WaitCertificate, WaitTimer
from gossip.common import NullIdentifier
LOGGER = logging.getLogger(__name__)
def register_message_handlers(journal):
journal.dispatcher.register_message_handler(
PoetTransactionBlockMessage,
transaction_block_message.transaction_block_message_handler)
class PoetTransactionBlockMessage(
transaction_block_message.TransactionBlockMessage):
MessageType = "/Poet0/TransactionBlock"
def __init__(self, minfo=None):
if minfo is None:
minfo = {}
super(PoetTransactionBlockMessage, self).__init__(minfo)
tinfo = minfo.get('TransactionBlock', {})
self.TransactionBlock = PoetTransactionBlock(tinfo)
class PoetTransactionBlock(transaction_block.TransactionBlock):
TransactionBlockTypeName = '/Poet/PoetTransactionBlock'
MessageType = PoetTransactionBlockMessage
def __init__(self, minfo=None):
if minfo is None:
minfo = {}
super(PoetTransactionBlock, self).__init__(minfo)
self._lock = RLock()
self.wait_timer = None
self.wait_certificate = None
if 'WaitCertificate' in minfo:
wc = minfo.get('WaitCertificate')
serialized = wc.get('SerializedCert')
signature = wc.get('Signature')
self.wait_certificate = \
WaitCertificate.deserialize_wait_certificate(
serialized, signature)
self.aggregate_local_mean = 0.0
def __getstate__(self):
state = self.__dict__.copy()
del state['_lock']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._lock = RLock()
def __str__(self):
return "{0}, {1}, {2}, {3:0.2f}, {4}".format(
self.BlockNum, self.Identifier[:8], len(self.TransactionIDs),
self.CommitTime, self.wait_certificate)
def __cmp__(self, other):
if self.Status != transaction_block.Status.valid:
raise ValueError('block {0} must be valid for comparison'.format(
self.Identifier))
if other.Status != transaction_block.Status.valid:
raise ValueError('block {0} must be valid for comparison'.format(
other.Identifier))
r.PreviousBlockID:
if self.wait_certificate.duration < \
other.wait_certificate.duration:
return 1
elif self.wait_certificate.duration > \
other.wait_certificate.duration:
return -1
lf.aggregate_local_mean > other.aggregate_local_mean:
return 1
elif self.aggregate_local_mean < other.aggregate_local_mean:
return -1
lf).__cmp__(other)
def update_block_weight(self, journal):
with self._lock:
assert self.Status == transaction_block.Status.valid
super(PoetTransactionBlock, self).update_block_weight(journal)
assert self.wait_certificate
self.aggregate_local_mean = self.wait_certificate.local_mean
if self.PreviousBlockID != NullIdentifier:
assert self.PreviousBlockID in journal.block_store
self.aggregate_local_mean += \
journal.block_store[self.PreviousBlockID].\
aggregate_local_mean
def is_valid(self, journal):
with self._lock:
if not super(PoetTransactionBlock, self).is_valid(journal):
return False
if not self.wait_certificate:
LOGGER.info('not a valid block, no wait certificate')
return False
return self.wait_certificate.is_valid_wait_certificate(
self.OriginatorID,
journal.consensus.build_certificate_list(
journal.block_store, self),
self.TransactionIDs)
def create_wait_timer(self, validator_address, certlist):
with self._lock:
self.wait_timer = WaitTimer.create_wait_timer(
validator_address,
certlist)
def create_wait_certificate(self):
with self._lock:
hasher = hashlib.sha256()
for tid in self.TransactionIDs:
hasher.update(tid)
block_hash = hasher.hexdigest()
self.wait_certificate = \
WaitCertificate.create_wait_certificate(
self.wait_timer,
block_hash)
if self.wait_certificate:
self.wait_timer = None
def wait_timer_is_expired(self, now):
with self._lock:
return self.wait_timer.is_expired(now)
def dump(self):
with self._lock:
result = super(PoetTransactionBlock, self).dump()
result['WaitCertificate'] = self.wait_certificate.dump()
return result
| true | true |
f7f79a3d84b224a60560d7b6f7878e89af50729e | 815 | py | Python | yatube/yatube/urls.py | AlexeyTikhonchuk/yatube_project | 1e05f9ceffeaebcea2b690e02844d15b89d80a8f | [
"MIT"
] | null | null | null | yatube/yatube/urls.py | AlexeyTikhonchuk/yatube_project | 1e05f9ceffeaebcea2b690e02844d15b89d80a8f | [
"MIT"
] | null | null | null | yatube/yatube/urls.py | AlexeyTikhonchuk/yatube_project | 1e05f9ceffeaebcea2b690e02844d15b89d80a8f | [
"MIT"
] | null | null | null | """yatube URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', include('posts.urls', namespace='posts')),
path('admin/', admin.site.urls),
]
| 32.6 | 77 | 0.700613 | from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', include('posts.urls', namespace='posts')),
path('admin/', admin.site.urls),
]
| true | true |
f7f79b07899dc658f29ebe55eb4b6ce81224488c | 13,780 | py | Python | client-py/src/client_example.py | SailVR/incubator-iotdb | f158fe1c96575ba7737b0d1e69c2c075735b8b58 | [
"Apache-2.0"
] | 2 | 2020-06-01T13:58:51.000Z | 2020-12-23T08:16:53.000Z | client-py/src/client_example.py | SailVR/incubator-iotdb | f158fe1c96575ba7737b0d1e69c2c075735b8b58 | [
"Apache-2.0"
] | null | null | null | client-py/src/client_example.py | SailVR/incubator-iotdb | f158fe1c96575ba7737b0d1e69c2c075735b8b58 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import struct
# If you generate IoTDB python library manually, add it to your python path
#for example, if you run compile.sh, you can use the following code:
# sys.path.append("../target")
#if you use maven to compile the thrift api, just use the follwoing code:
sys.path.append("../../service-rpc/target/generated-sources-python")
from thrift.protocol import TBinaryProtocol, TCompactProtocol
from thrift.transport import TSocket, TTransport
from iotdb.rpc.TSIService import Client, TSCreateTimeseriesReq, TSInsertRecordReq, \
TSInsertTabletReq, TSExecuteStatementReq, TSOpenSessionReq, TSQueryDataSet, \
TSFetchResultsReq, TSCloseOperationReq, \
TSCloseSessionReq
from iotdb.rpc.ttypes import TSProtocolVersion, TSFetchMetadataReq
TSDataType = {
'BOOLEAN': 0,
'INT32': 1,
'INT64': 2,
'FLOAT': 3,
'DOUBLE': 4,
'TEXT': 5
}
TSEncoding = {
'PLAIN': 0,
'PLAIN_DICTIONARY': 1,
'RLE': 2,
'DIFF': 3,
'TS_2DIFF': 4,
'BITMAP': 5,
'GORILLA': 6,
'REGULAR': 7
}
Compressor = {
'UNCOMPRESSED': 0,
'SNAPPY': 1,
'GZIP': 2,
'LZO': 3,
'SDT': 4,
'PAA': 5,
'PLA': 6
}
class Enum:
def __init__(self):
pass
MetaQueryTypes = Enum()
MetaQueryTypes.CATALOG_COLUMN = "COLUMN"
MetaQueryTypes.CATALOG_TIMESERIES = "SHOW_TIMESERIES"
MetaQueryTypes.CATALOG_STORAGE_GROUP = "SHOW_STORAGE_GROUP"
MetaQueryTypes.CATALOG_DEVICES = "SHOW_DEVICES"
MetaQueryTypes.CATALOG_CHILD_PATHS = "SHOW_CHILD_PATHS"
# used to do `and` operation with bitmap to judge whether the value is null
flag = 0x80
INT32_BYTE_LEN = 4
BOOL_BYTE_LEN = 1
INT64_BYTE_LEN = 8
FLOAT_BYTE_LEN = 4
DOUBLE_BYTE_LEN = 8
def is_null(bitmap_bytes, rowNum):
bitmap = bitmap_bytes[rowNum // 8]
shift = rowNum % 8
return ((flag >> shift) & bitmap) == 0
def convertQueryDataSet(queryDataSet, dataTypeList):
time_bytes = queryDataSet.time
value_bytes_list = queryDataSet.valueList
bitmap_list = queryDataSet.bitmapList
row_count = len(time_bytes) // 8
time_unpack_str = '>' + str(row_count) + 'q'
records = []
times = struct.unpack(time_unpack_str, time_bytes)
for i in range(row_count):
records.append([times[i]])
for i in range(len(dataTypeList)):
value_type = dataTypeList[i]
value_bytes = value_bytes_list[i]
bitmap = bitmap_list[i]
# the actual number of value
if value_type == 'BOOLEAN':
value_count = len(value_bytes) // BOOL_BYTE_LEN
values_list = struct.unpack('>' + str(value_count) + '?', value_bytes)
elif value_type == 'INT32':
value_count = len(value_bytes) // INT32_BYTE_LEN
values_list = struct.unpack('>' + str(value_count) + 'i', value_bytes)
elif value_type == 'INT64':
value_count = len(value_bytes) // INT64_BYTE_LEN
values_list = struct.unpack('>' + str(value_count) + 'q', value_bytes)
elif value_type == 'FLOAT':
value_count = len(value_bytes) // FLOAT_BYTE_LEN
values_list = struct.unpack('>' + str(value_count) + 'f', value_bytes)
elif value_type == 'DOUBLE':
value_count = len(value_bytes) // DOUBLE_BYTE_LEN
values_list = struct.unpack('>' + str(value_count) + 'd', value_bytes)
elif value_type == 'TEXT':
values_list = []
# current index for value in values_list
value_index = 0
for j in range(row_count):
if is_null(bitmap, j):
records[j].append('null')
else:
if value_type != 'TEXT':
records[j].append(values_list[value_index])
else:
size = value_bytes[:4]
value_bytes = value_bytes[4:]
size = struct.unpack('>i', size)[0]
records[j].append(value_bytes[:size])
value_bytes = value_bytes[size:]
value_index += 1
return records
def valueListToBytes(values, dataTypes):
valueByte = bytearray();
for value, dataType in enumerate(values, dataTypes):
pass
return valueByte
if __name__ == '__main__':
ip = "127.0.0.1"
port = "6667"
username = 'root'
password = 'root'
# Make socket
transport = TSocket.TSocket(ip, port)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TFramedTransport(transport)
# Wrap in a protocol
# use TCompactProtocol if the server enable thrift compression,
# otherwise use TBinaryProtocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = Client(protocol)
# Connect!
transport.open()
# Authentication
clientProtocol = TSProtocolVersion.IOTDB_SERVICE_PROTOCOL_V3
resp = client.openSession(TSOpenSessionReq(client_protocol=clientProtocol,
username=username,
password=password))
if resp.serverProtocolVersion != clientProtocol:
print('Inconsistent protocol, server version: %d, client version: %d'
% (resp.serverProtocolVersion, clientProtocol))
if resp.serverProtocolVersion > clientProtocol:
exit()
sessionId = resp.sessionId
# This is necessary for resource control
stmtId = client.requestStatementId(sessionId)
# create a storage group
status = client.setStorageGroup(sessionId, "root.group1")
print(status.message)
# create timeseries
status = client.createTimeseries(TSCreateTimeseriesReq(sessionId,
"root.group1.s1",
TSDataType['INT64'],
TSEncoding['PLAIN'],
Compressor['UNCOMPRESSED']))
print(status.message)
status = client.createTimeseries(TSCreateTimeseriesReq(sessionId,
"root.group1.s2",
TSDataType['INT32'],
TSEncoding['PLAIN'],
Compressor['UNCOMPRESSED']))
print(status.message)
status = client.createTimeseries(TSCreateTimeseriesReq(sessionId,
"root.group1.s3",
TSDataType['DOUBLE'],
TSEncoding['PLAIN'],
Compressor['UNCOMPRESSED']))
print(status.message)
status = client.createTimeseries(TSCreateTimeseriesReq(sessionId,
"root.group1.s4",
TSDataType['FLOAT'],
TSEncoding['PLAIN'],
Compressor['UNCOMPRESSED']))
print(status.message)
status = client.createTimeseries(TSCreateTimeseriesReq(sessionId,
"root.group1.s5",
TSDataType['BOOLEAN'],
TSEncoding['PLAIN'],
Compressor['UNCOMPRESSED']))
print(status.message)
status = client.createTimeseries(TSCreateTimeseriesReq(sessionId,
"root.group1.s6",
TSDataType['TEXT'],
TSEncoding['PLAIN'],
Compressor['UNCOMPRESSED']))
print(status.message)
deviceId = "root.group1"
measurements = ["s1", "s2", "s3", "s4", "s5", "s6"]
# insert a single row
values = [1, 11, 1.1, 11.1, True, "\'text0\'"]
dataTypes = [TSDataType['INT64'], TSDataType['INT32'], TSDataType['DOUBLE'],
TSDataType['FLOAT'], TSDataType['BOOLEAN'], TSDataType['TEXT']]
value_pack_str = '>hqhihdhfh?hi' + str(len(values[5])) + 's'
encoding = 'utf-8'
valueByte = bytearray()
valueByte.extend(struct.pack(value_pack_str,dataTypes[0], values[0],
dataTypes[1], values[1],
dataTypes[2], values[2],
dataTypes[3], values[3],
dataTypes[4], values[4],
dataTypes[5], len(values[5]), bytes(values[5], encoding)))
timestamp = 1
status = client.insertRecord(TSInsertRecordReq(sessionId, deviceId, measurements, valueByte, timestamp))
print(status.message)
# insert multiple rows, this interface is more efficient
values = bytearray()
times = bytearray()
rowCnt = 3
# the first 3 belong to 's1', the second 3 belong to 's2'... the last 3
# belong to 's6'
# to transfer a string, you must first send its length and then its bytes
# (like the last 3 'i7s'). Text values should start and end with ' or ".
# IoTDB use big endian in rpc
value_pack_str = '>3q3i3d3f3bi7si7si7s'
time_pack_str = '>3q'
encoding = 'utf-8'
values.extend(struct.pack(value_pack_str, 2, 3, 4, 22, 33, 44, 2.2, 3.3,
4.4, 22.2, 33.3, 44.4, True, True, False,
len(bytes('\'text1\'', encoding)),
bytes('\'text1\'', encoding),
len(bytes('\'text2\'', encoding)),
bytes('\'text2\'', encoding),
len(bytes('\'text3\'', encoding)),
bytes('\'text3\'', encoding)))
# warning: the data in batch must be sorted by time
times.extend(struct.pack(time_pack_str, 2, 3, 4))
resp = client.insertTablet(TSInsertTabletReq(sessionId,deviceId,
measurements, values,
times, dataTypes, rowCnt))
status = resp.code
print(status)
# execute deletion (or other statements)
resp = client.executeStatement(TSExecuteStatementReq(sessionId, "DELETE FROM "
"root.group1 where time < 2", stmtId))
status = resp.status
print(status.message)
# query the data
stmt = "SELECT * FROM root.group1"
fetchSize = 2
# this is also for resource control, make sure different queries will not use the same id at the same time
resp = client.executeQueryStatement(TSExecuteStatementReq(sessionId, stmt, stmtId))
# headers
dataTypeList = resp.dataTypeList
print(resp.columns)
print(dataTypeList)
status = resp.status
print(status.message)
queryId = resp.queryId
while True:
rst = client.fetchResults(TSFetchResultsReq(sessionId, stmt, fetchSize,
queryId, True)).queryDataSet
records = convertQueryDataSet(rst, dataTypeList)
if len(records) == 0:
break
for record in records:
print(record)
# do not forget to close it when a query is over
closeReq = TSCloseOperationReq(sessionId)
closeReq.queryId = queryId
client.closeOperation(closeReq)
# query metadata
metaReq = TSFetchMetadataReq(sessionId=sessionId, type=MetaQueryTypes.CATALOG_DEVICES)
print(client.fetchMetadata(metaReq).status)
metaReq = TSFetchMetadataReq(sessionId=sessionId,
type=MetaQueryTypes.CATALOG_TIMESERIES,
columnPath='root')
#print(client.fetchMetadata(metaReq).timeseriesList)
metaReq = TSFetchMetadataReq(sessionId=sessionId,
type=MetaQueryTypes.CATALOG_CHILD_PATHS,
columnPath='root')
#print(client.fetchMetadata(metaReq).childPaths)
metaReq = TSFetchMetadataReq(sessionId=sessionId, type=MetaQueryTypes.CATALOG_STORAGE_GROUP)
#print(client.fetchMetadata(metaReq).storageGroups)
metaReq = TSFetchMetadataReq(sessionId=sessionId,
type=MetaQueryTypes.CATALOG_COLUMN,
columnPath='root.group1.s1')
print(client.fetchMetadata(metaReq).dataType)
# and do not forget to close the session before exiting
client.closeSession(TSCloseSessionReq(sessionId))
| 39.597701 | 110 | 0.567054 |
import sys
import struct
sys.path.append("../../service-rpc/target/generated-sources-python")
from thrift.protocol import TBinaryProtocol, TCompactProtocol
from thrift.transport import TSocket, TTransport
from iotdb.rpc.TSIService import Client, TSCreateTimeseriesReq, TSInsertRecordReq, \
TSInsertTabletReq, TSExecuteStatementReq, TSOpenSessionReq, TSQueryDataSet, \
TSFetchResultsReq, TSCloseOperationReq, \
TSCloseSessionReq
from iotdb.rpc.ttypes import TSProtocolVersion, TSFetchMetadataReq
TSDataType = {
'BOOLEAN': 0,
'INT32': 1,
'INT64': 2,
'FLOAT': 3,
'DOUBLE': 4,
'TEXT': 5
}
TSEncoding = {
'PLAIN': 0,
'PLAIN_DICTIONARY': 1,
'RLE': 2,
'DIFF': 3,
'TS_2DIFF': 4,
'BITMAP': 5,
'GORILLA': 6,
'REGULAR': 7
}
Compressor = {
'UNCOMPRESSED': 0,
'SNAPPY': 1,
'GZIP': 2,
'LZO': 3,
'SDT': 4,
'PAA': 5,
'PLA': 6
}
class Enum:
def __init__(self):
pass
MetaQueryTypes = Enum()
MetaQueryTypes.CATALOG_COLUMN = "COLUMN"
MetaQueryTypes.CATALOG_TIMESERIES = "SHOW_TIMESERIES"
MetaQueryTypes.CATALOG_STORAGE_GROUP = "SHOW_STORAGE_GROUP"
MetaQueryTypes.CATALOG_DEVICES = "SHOW_DEVICES"
MetaQueryTypes.CATALOG_CHILD_PATHS = "SHOW_CHILD_PATHS"
flag = 0x80
INT32_BYTE_LEN = 4
BOOL_BYTE_LEN = 1
INT64_BYTE_LEN = 8
FLOAT_BYTE_LEN = 4
DOUBLE_BYTE_LEN = 8
def is_null(bitmap_bytes, rowNum):
bitmap = bitmap_bytes[rowNum // 8]
shift = rowNum % 8
return ((flag >> shift) & bitmap) == 0
def convertQueryDataSet(queryDataSet, dataTypeList):
time_bytes = queryDataSet.time
value_bytes_list = queryDataSet.valueList
bitmap_list = queryDataSet.bitmapList
row_count = len(time_bytes) // 8
time_unpack_str = '>' + str(row_count) + 'q'
records = []
times = struct.unpack(time_unpack_str, time_bytes)
for i in range(row_count):
records.append([times[i]])
for i in range(len(dataTypeList)):
value_type = dataTypeList[i]
value_bytes = value_bytes_list[i]
bitmap = bitmap_list[i]
if value_type == 'BOOLEAN':
value_count = len(value_bytes) // BOOL_BYTE_LEN
values_list = struct.unpack('>' + str(value_count) + '?', value_bytes)
elif value_type == 'INT32':
value_count = len(value_bytes) // INT32_BYTE_LEN
values_list = struct.unpack('>' + str(value_count) + 'i', value_bytes)
elif value_type == 'INT64':
value_count = len(value_bytes) // INT64_BYTE_LEN
values_list = struct.unpack('>' + str(value_count) + 'q', value_bytes)
elif value_type == 'FLOAT':
value_count = len(value_bytes) // FLOAT_BYTE_LEN
values_list = struct.unpack('>' + str(value_count) + 'f', value_bytes)
elif value_type == 'DOUBLE':
value_count = len(value_bytes) // DOUBLE_BYTE_LEN
values_list = struct.unpack('>' + str(value_count) + 'd', value_bytes)
elif value_type == 'TEXT':
values_list = []
value_index = 0
for j in range(row_count):
if is_null(bitmap, j):
records[j].append('null')
else:
if value_type != 'TEXT':
records[j].append(values_list[value_index])
else:
size = value_bytes[:4]
value_bytes = value_bytes[4:]
size = struct.unpack('>i', size)[0]
records[j].append(value_bytes[:size])
value_bytes = value_bytes[size:]
value_index += 1
return records
def valueListToBytes(values, dataTypes):
valueByte = bytearray();
for value, dataType in enumerate(values, dataTypes):
pass
return valueByte
if __name__ == '__main__':
ip = "127.0.0.1"
port = "6667"
username = 'root'
password = 'root'
transport = TSocket.TSocket(ip, port)
transport = TTransport.TFramedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = Client(protocol)
transport.open()
clientProtocol = TSProtocolVersion.IOTDB_SERVICE_PROTOCOL_V3
resp = client.openSession(TSOpenSessionReq(client_protocol=clientProtocol,
username=username,
password=password))
if resp.serverProtocolVersion != clientProtocol:
print('Inconsistent protocol, server version: %d, client version: %d'
% (resp.serverProtocolVersion, clientProtocol))
if resp.serverProtocolVersion > clientProtocol:
exit()
sessionId = resp.sessionId
stmtId = client.requestStatementId(sessionId)
status = client.setStorageGroup(sessionId, "root.group1")
print(status.message)
status = client.createTimeseries(TSCreateTimeseriesReq(sessionId,
"root.group1.s1",
TSDataType['INT64'],
TSEncoding['PLAIN'],
Compressor['UNCOMPRESSED']))
print(status.message)
status = client.createTimeseries(TSCreateTimeseriesReq(sessionId,
"root.group1.s2",
TSDataType['INT32'],
TSEncoding['PLAIN'],
Compressor['UNCOMPRESSED']))
print(status.message)
status = client.createTimeseries(TSCreateTimeseriesReq(sessionId,
"root.group1.s3",
TSDataType['DOUBLE'],
TSEncoding['PLAIN'],
Compressor['UNCOMPRESSED']))
print(status.message)
status = client.createTimeseries(TSCreateTimeseriesReq(sessionId,
"root.group1.s4",
TSDataType['FLOAT'],
TSEncoding['PLAIN'],
Compressor['UNCOMPRESSED']))
print(status.message)
status = client.createTimeseries(TSCreateTimeseriesReq(sessionId,
"root.group1.s5",
TSDataType['BOOLEAN'],
TSEncoding['PLAIN'],
Compressor['UNCOMPRESSED']))
print(status.message)
status = client.createTimeseries(TSCreateTimeseriesReq(sessionId,
"root.group1.s6",
TSDataType['TEXT'],
TSEncoding['PLAIN'],
Compressor['UNCOMPRESSED']))
print(status.message)
deviceId = "root.group1"
measurements = ["s1", "s2", "s3", "s4", "s5", "s6"]
values = [1, 11, 1.1, 11.1, True, "\'text0\'"]
dataTypes = [TSDataType['INT64'], TSDataType['INT32'], TSDataType['DOUBLE'],
TSDataType['FLOAT'], TSDataType['BOOLEAN'], TSDataType['TEXT']]
value_pack_str = '>hqhihdhfh?hi' + str(len(values[5])) + 's'
encoding = 'utf-8'
valueByte = bytearray()
valueByte.extend(struct.pack(value_pack_str,dataTypes[0], values[0],
dataTypes[1], values[1],
dataTypes[2], values[2],
dataTypes[3], values[3],
dataTypes[4], values[4],
dataTypes[5], len(values[5]), bytes(values[5], encoding)))
timestamp = 1
status = client.insertRecord(TSInsertRecordReq(sessionId, deviceId, measurements, valueByte, timestamp))
print(status.message)
values = bytearray()
times = bytearray()
rowCnt = 3
# IoTDB use big endian in rpc
value_pack_str = '>3q3i3d3f3bi7si7si7s'
time_pack_str = '>3q'
encoding = 'utf-8'
values.extend(struct.pack(value_pack_str, 2, 3, 4, 22, 33, 44, 2.2, 3.3,
4.4, 22.2, 33.3, 44.4, True, True, False,
len(bytes('\'text1\'', encoding)),
bytes('\'text1\'', encoding),
len(bytes('\'text2\'', encoding)),
bytes('\'text2\'', encoding),
len(bytes('\'text3\'', encoding)),
bytes('\'text3\'', encoding)))
# warning: the data in batch must be sorted by time
times.extend(struct.pack(time_pack_str, 2, 3, 4))
resp = client.insertTablet(TSInsertTabletReq(sessionId,deviceId,
measurements, values,
times, dataTypes, rowCnt))
status = resp.code
print(status)
# execute deletion (or other statements)
resp = client.executeStatement(TSExecuteStatementReq(sessionId, "DELETE FROM "
"root.group1 where time < 2", stmtId))
status = resp.status
print(status.message)
# query the data
stmt = "SELECT * FROM root.group1"
fetchSize = 2
# this is also for resource control, make sure different queries will not use the same id at the same time
resp = client.executeQueryStatement(TSExecuteStatementReq(sessionId, stmt, stmtId))
# headers
dataTypeList = resp.dataTypeList
print(resp.columns)
print(dataTypeList)
status = resp.status
print(status.message)
queryId = resp.queryId
while True:
rst = client.fetchResults(TSFetchResultsReq(sessionId, stmt, fetchSize,
queryId, True)).queryDataSet
records = convertQueryDataSet(rst, dataTypeList)
if len(records) == 0:
break
for record in records:
print(record)
# do not forget to close it when a query is over
closeReq = TSCloseOperationReq(sessionId)
closeReq.queryId = queryId
client.closeOperation(closeReq)
# query metadata
metaReq = TSFetchMetadataReq(sessionId=sessionId, type=MetaQueryTypes.CATALOG_DEVICES)
print(client.fetchMetadata(metaReq).status)
metaReq = TSFetchMetadataReq(sessionId=sessionId,
type=MetaQueryTypes.CATALOG_TIMESERIES,
columnPath='root')
#print(client.fetchMetadata(metaReq).timeseriesList)
metaReq = TSFetchMetadataReq(sessionId=sessionId,
type=MetaQueryTypes.CATALOG_CHILD_PATHS,
columnPath='root')
#print(client.fetchMetadata(metaReq).childPaths)
metaReq = TSFetchMetadataReq(sessionId=sessionId, type=MetaQueryTypes.CATALOG_STORAGE_GROUP)
#print(client.fetchMetadata(metaReq).storageGroups)
metaReq = TSFetchMetadataReq(sessionId=sessionId,
type=MetaQueryTypes.CATALOG_COLUMN,
columnPath='root.group1.s1')
print(client.fetchMetadata(metaReq).dataType)
# and do not forget to close the session before exiting
client.closeSession(TSCloseSessionReq(sessionId))
| true | true |
f7f79b5f7aadea4a945b5bb452ec9cb961364a6e | 9,532 | py | Python | leo/plugins/datenodes.py | gchiu/leo-editor | 9de55692d724fe8f12d0809540956327d131dbc5 | [
"MIT"
] | null | null | null | leo/plugins/datenodes.py | gchiu/leo-editor | 9de55692d724fe8f12d0809540956327d131dbc5 | [
"MIT"
] | null | null | null | leo/plugins/datenodes.py | gchiu/leo-editor | 9de55692d724fe8f12d0809540956327d131dbc5 | [
"MIT"
] | null | null | null | #@+leo-ver=5-thin
#@+node:ekr.20060807103814.1: * @file datenodes.py
#@+<< docstring >>
#@+node:bobjack.20080615065747.4: ** << docstring >>
""" Allows users to insert headlines containing dates.
'Date nodes' are nodes that have dates in their headlines. They may be added to
the outline one at a time, a month's-worth at a time, or a year's-worth at a
time. The format of the labels (headlines) is configurable.
There are options to omit Saturdays and Sundays.
An 'Insert Date Nodes ...' submenu will be created (by default) in the 'Outline'
menu. This menu can be suppressed by using either of the following settings:
- @bool suppress-datenodes-menus
- @bool suppress-all-plugins-menus
The following commands are available for use via the minibuffer or in
@menu/@popup settings.
- datenodes-today
- datenodes-this-month
- datenodes-this-year
"""
#@-<< docstring >>
#@@language python
#@@tabwidth -4
__version__ = "0.7"
#@+<< version history >>
#@+node:gfunch.20041207100416.2: ** << version history >>
#@@nocolor
#@+at
#
# 0.1: Initial version.
# 0.2: Improved menu structure. Added ini file.
# 0.3: Changed docstring slightly.
# 0.4: Added event=None to insert_xxx_node.
# 0.5: Added options to omit saturdays and sundays. Use leoSettings.leo instead of datenodes.ini for storing options.
# 0.6: Removed @c from most nodes: this is not needed. Also removed .ini file from cvs.
# 0.7 bobjack:
# - added plugin init method
# - exposed the pluginController as c.theDateNodesController
# - added support for settings:
# - @bool suppress-datenodes-menus
# - added minibuffer commands
# - datenodes-today
# - datenodes-this-month
# - datenodes-this-year
#@-<< version history >>
#@+<< todo >>
#@+node:bobjack.20080615065747.5: ** << todo >>
#@@nocolor
#@+at
#
# - add commands to allow day, month, year to be input via minibuffer
#
# - add a calendar widget to allow dates to be entered via gui
#
# - add extra methods to controller to make it easier to use the plugin from scripts
#
# - allow date ranges to be specified
#
# - add a dialog that allows all parameters to be slected prior to insertion
#@-<< todo >>
#@+<< imports >>
#@+node:gfunch.20041207100416.3: ** << imports >>
import leo.core.leoGlobals as g
import calendar
import codecs
import datetime
#@-<< imports >>
#@+others
#@+node:bobjack.20080615065747.2: ** init
def init():
'''Return True if the plugin has loaded successfully.'''
g.registerHandler("after-create-leo-frame", on_create)
g.plugin_signon(__name__)
return True # OK for unit testing.
#@+node:gfunch.20041207100416.5: ** class DateNodes
class DateNodes(object):
"""Main DateNodes class"""
# The defaults for all possible settings.
default_settings = {
"datenodes_body_text": "To do...",
"datenodes_day_node_headline": "%Y-%m-%d",
"datenodes_month_node_day_headline": "%d: %A",
"datenodes_month_node_month_headline": "%B %Y",
"datenodes_month_node_omit_saturdays": True,
"datenodes_month_node_omit_sundays": True,
"datenodes_year_node_day_headline": "%d: %A",
"datenodes_year_node_month_headline": "%B",
"datenodes_year_node_year_headline": "%Y",
"datenodes_year_node_omit_saturdays": True,
"datenodes_year_node_omit_sundays": True
}
# Names of settings that have to be read with getBool()
boolean_settings = [
"datenodes_month_node_omit_saturdays",
"datenodes_month_node_omit_sundays",
"datenodes_year_node_omit_saturdays",
"datenodes_year_node_omit_sundays"
]
ascii_encoder = codecs.getencoder("ASCII")
#@+others
#@+node:gfunch.20041207100416.6: *3* __init__(DateNodes, datenodes.py)
def __init__(self, c):
self.c = c
self._get_settings()
for commandName, method in (
('datenodes-today', self.insert_day_node),
('datenodes-this-month', self.insert_month_node),
('datenodes-this-year', self.insert_year_node),
):
c.k.registerCommand(commandName, method)
#@+node:gfunch.20041209073652: *3* _get_settings
def _get_settings(self):
"""Get any configuration options."""
settings = {}
for setting in DateNodes.default_settings:
if setting in DateNodes.boolean_settings:
getter = self.c.config.getBool
else:
getter = self.c.config.getString
value = getter(setting)
if value is None:
value = DateNodes.default_settings[setting]
settings[setting[10:]] = value # Omit datenodes_ prefix
self.settings = settings
#@+node:dcb.20060806185031: *3* _insert_date_node
def _insert_date_node(self, parent, date, format):
p = parent.insertAsLastChild()
p.h = date.strftime(g.toUnicode(format))
return p
#@+node:dcb.20060806183810: *3* _insert_day_node
def _insert_day_node(self, parent, date, day_fmt):
p = self._insert_date_node(parent, date, day_fmt)
p.b = self.settings.get("body_text", '')
return p
#@+node:gfunch.20041207100416.11: *3* _insert_month_node
def _insert_month_node(self, parent, date, day_fmt, month_fmt, omit_saturdays, omit_sundays):
"""Insert a months-worth of date nodes into the outline ."""
month_node = self._insert_date_node(parent, date, month_fmt)
year, month = date.timetuple()[:2]
first_day_of_month, num_days = calendar.monthrange(year, month)
for day in range(1, num_days + 1):
day_date = datetime.date(year, month, day)
isoweekday = day_date.isoweekday()
if (
(isoweekday == 6 and omit_saturdays) or
(isoweekday == 7 and omit_sundays)
):
continue
self._insert_day_node(
parent = month_node,
date = day_date,
day_fmt = day_fmt)
return month_node
#@+node:gfunch.20041207100416.12: *3* _insert_year_node
def _insert_year_node(self,
parent,
date,
day_fmt,
month_fmt,
year_fmt,
omit_saturdays,
omit_sundays,
):
"""Insert a years-worth of date nodes into the outline."""
year_node = self._insert_date_node(parent, date, year_fmt)
year, month, day = date.timetuple()[:3]
for month in range(1, 13):
month_date = datetime.date(year, month, day)
self._insert_month_node(
parent = year_node,
date = month_date,
day_fmt = day_fmt,
month_fmt = month_fmt,
omit_saturdays = omit_saturdays,
omit_sundays = omit_sundays)
return year_node
#@+node:gfunch.20041208074734: *3* insert_day_node
def insert_day_node(self, event = None):
c = self.c
today = datetime.date.today()
day_fmt = self.settings["day_node_headline"]
day_node = self._insert_day_node(self.c.p, today, day_fmt)
c.selectPosition(day_node)
c.redraw()
#@+node:dcb.20060806183928: *3* insert_month_node
def insert_month_node(self, event = None):
c = self.c
today = datetime.date.today()
day_fmt = self.settings["month_node_day_headline"]
month_fmt = self.settings["month_node_month_headline"]
omit_saturdays = self.settings["month_node_omit_saturdays"]
omit_sundays = self.settings["month_node_omit_sundays"]
month_node = self._insert_month_node(
c.p, today, day_fmt, month_fmt, omit_saturdays, omit_sundays)
c.selectPosition(month_node)
c.redraw()
#@+node:dcb.20060806184117: *3* insert_year_node
def insert_year_node(self, event = None):
c = self.c
today = datetime.date.today()
day_fmt = self.settings["year_node_day_headline"]
month_fmt = self.settings["year_node_month_headline"]
year_fmt = self.settings["year_node_year_headline"]
omit_saturdays = self.settings["year_node_omit_saturdays"]
omit_sundays = self.settings["year_node_omit_sundays"]
year_node = self._insert_year_node(
c.p, today, day_fmt, month_fmt, year_fmt, omit_saturdays, omit_sundays)
c.selectPosition(year_node)
c.redraw()
#@-others
#@+node:gfunch.20041207100654: ** on_create
def on_create(tag, keywords):
c = keywords.get("c")
if not (c and c.exists):
return
# Rewrite to eliminate a pylint complaint.
if hasattr(c,'theDateNodesController'):
return
# establish a class instance
c.theDateNodesController = instance = DateNodes(c)
#@+<< Create the plug-in menu. >>
#@+node:bobjack.20080615065747.3: *3* << Create the plug-in menu. >>
if not c.config.getBool('suppress-datenodes-menus'):
# create a menu separator
c.frame.menu.createMenuItemsFromTable("Outline", [("-", None, None),])
# create an expandable menu
table = [("Single Day", None, instance.insert_day_node),
("Full Month", None, instance.insert_month_node),
("Full Year", None, instance.insert_year_node)]
expandMenu = c.frame.menu.createNewMenu("Insert Date Nodes...", "Outline")
c.frame.menu.createMenuEntries(expandMenu, table, dynamicMenu = True)
#@-<< Create the plug-in menu. >>
#@-others
#@-leo
| 35.700375 | 117 | 0.644985 |
__version__ = "0.7"
import leo.core.leoGlobals as g
import calendar
import codecs
import datetime
def init():
g.registerHandler("after-create-leo-frame", on_create)
g.plugin_signon(__name__)
return True
class DateNodes(object):
default_settings = {
"datenodes_body_text": "To do...",
"datenodes_day_node_headline": "%Y-%m-%d",
"datenodes_month_node_day_headline": "%d: %A",
"datenodes_month_node_month_headline": "%B %Y",
"datenodes_month_node_omit_saturdays": True,
"datenodes_month_node_omit_sundays": True,
"datenodes_year_node_day_headline": "%d: %A",
"datenodes_year_node_month_headline": "%B",
"datenodes_year_node_year_headline": "%Y",
"datenodes_year_node_omit_saturdays": True,
"datenodes_year_node_omit_sundays": True
}
boolean_settings = [
"datenodes_month_node_omit_saturdays",
"datenodes_month_node_omit_sundays",
"datenodes_year_node_omit_saturdays",
"datenodes_year_node_omit_sundays"
]
ascii_encoder = codecs.getencoder("ASCII")
def __init__(self, c):
self.c = c
self._get_settings()
for commandName, method in (
('datenodes-today', self.insert_day_node),
('datenodes-this-month', self.insert_month_node),
('datenodes-this-year', self.insert_year_node),
):
c.k.registerCommand(commandName, method)
def _get_settings(self):
settings = {}
for setting in DateNodes.default_settings:
if setting in DateNodes.boolean_settings:
getter = self.c.config.getBool
else:
getter = self.c.config.getString
value = getter(setting)
if value is None:
value = DateNodes.default_settings[setting]
settings[setting[10:]] = value
self.settings = settings
def _insert_date_node(self, parent, date, format):
p = parent.insertAsLastChild()
p.h = date.strftime(g.toUnicode(format))
return p
def _insert_day_node(self, parent, date, day_fmt):
p = self._insert_date_node(parent, date, day_fmt)
p.b = self.settings.get("body_text", '')
return p
def _insert_month_node(self, parent, date, day_fmt, month_fmt, omit_saturdays, omit_sundays):
month_node = self._insert_date_node(parent, date, month_fmt)
year, month = date.timetuple()[:2]
first_day_of_month, num_days = calendar.monthrange(year, month)
for day in range(1, num_days + 1):
day_date = datetime.date(year, month, day)
isoweekday = day_date.isoweekday()
if (
(isoweekday == 6 and omit_saturdays) or
(isoweekday == 7 and omit_sundays)
):
continue
self._insert_day_node(
parent = month_node,
date = day_date,
day_fmt = day_fmt)
return month_node
def _insert_year_node(self,
parent,
date,
day_fmt,
month_fmt,
year_fmt,
omit_saturdays,
omit_sundays,
):
year_node = self._insert_date_node(parent, date, year_fmt)
year, month, day = date.timetuple()[:3]
for month in range(1, 13):
month_date = datetime.date(year, month, day)
self._insert_month_node(
parent = year_node,
date = month_date,
day_fmt = day_fmt,
month_fmt = month_fmt,
omit_saturdays = omit_saturdays,
omit_sundays = omit_sundays)
return year_node
def insert_day_node(self, event = None):
c = self.c
today = datetime.date.today()
day_fmt = self.settings["day_node_headline"]
day_node = self._insert_day_node(self.c.p, today, day_fmt)
c.selectPosition(day_node)
c.redraw()
def insert_month_node(self, event = None):
c = self.c
today = datetime.date.today()
day_fmt = self.settings["month_node_day_headline"]
month_fmt = self.settings["month_node_month_headline"]
omit_saturdays = self.settings["month_node_omit_saturdays"]
omit_sundays = self.settings["month_node_omit_sundays"]
month_node = self._insert_month_node(
c.p, today, day_fmt, month_fmt, omit_saturdays, omit_sundays)
c.selectPosition(month_node)
c.redraw()
def insert_year_node(self, event = None):
c = self.c
today = datetime.date.today()
day_fmt = self.settings["year_node_day_headline"]
month_fmt = self.settings["year_node_month_headline"]
year_fmt = self.settings["year_node_year_headline"]
omit_saturdays = self.settings["year_node_omit_saturdays"]
omit_sundays = self.settings["year_node_omit_sundays"]
year_node = self._insert_year_node(
c.p, today, day_fmt, month_fmt, year_fmt, omit_saturdays, omit_sundays)
c.selectPosition(year_node)
c.redraw()
def on_create(tag, keywords):
c = keywords.get("c")
if not (c and c.exists):
return
if hasattr(c,'theDateNodesController'):
return
c.theDateNodesController = instance = DateNodes(c)
if not c.config.getBool('suppress-datenodes-menus'):
c.frame.menu.createMenuItemsFromTable("Outline", [("-", None, None),])
table = [("Single Day", None, instance.insert_day_node),
("Full Month", None, instance.insert_month_node),
("Full Year", None, instance.insert_year_node)]
expandMenu = c.frame.menu.createNewMenu("Insert Date Nodes...", "Outline")
c.frame.menu.createMenuEntries(expandMenu, table, dynamicMenu = True)
| true | true |
f7f79bd302825ef9b8335c829fc0ae2fe1e601b9 | 10,807 | py | Python | detectron2/utils/events.py | newUtkarsh/detectron2 | e1c055abff34513f347a767f43bfe60e275b136c | [
"Apache-2.0"
] | 221 | 2019-11-04T02:43:51.000Z | 2022-03-23T09:16:41.000Z | detectron2/utils/events.py | newUtkarsh/detectron2 | e1c055abff34513f347a767f43bfe60e275b136c | [
"Apache-2.0"
] | 56 | 2021-03-18T12:02:28.000Z | 2022-03-24T15:12:57.000Z | detectron2/utils/events.py | newUtkarsh/detectron2 | e1c055abff34513f347a767f43bfe60e275b136c | [
"Apache-2.0"
] | 65 | 2019-11-06T06:23:04.000Z | 2021-11-29T15:32:55.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import datetime
import json
import logging
import os
from collections import defaultdict
from contextlib import contextmanager
import torch
from fvcore.common.file_io import PathManager
from fvcore.common.history_buffer import HistoryBuffer
_CURRENT_STORAGE_STACK = []
def get_event_storage():
assert len(
_CURRENT_STORAGE_STACK
), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!"
return _CURRENT_STORAGE_STACK[-1]
class EventWriter:
"""
Base class for writers that obtain events from :class:`EventStorage` and process them.
"""
def write(self):
raise NotImplementedError
def close(self):
pass
class JSONWriter(EventWriter):
"""
Write scalars to a json file.
It saves scalars as one json per line (instead of a big json) for easy parsing.
Examples parsing such a json file:
.. code-block:: none
$ cat metrics.json | jq -s '.[0:2]'
[
{
"data_time": 0.008433341979980469,
"iteration": 20,
"loss": 1.9228371381759644,
"loss_box_reg": 0.050025828182697296,
"loss_classifier": 0.5316952466964722,
"loss_mask": 0.7236229181289673,
"loss_rpn_box": 0.0856662318110466,
"loss_rpn_cls": 0.48198649287223816,
"lr": 0.007173333333333333,
"time": 0.25401854515075684
},
{
"data_time": 0.007216215133666992,
"iteration": 40,
"loss": 1.282649278640747,
"loss_box_reg": 0.06222952902317047,
"loss_classifier": 0.30682939291000366,
"loss_mask": 0.6970193982124329,
"loss_rpn_box": 0.038663312792778015,
"loss_rpn_cls": 0.1471673548221588,
"lr": 0.007706666666666667,
"time": 0.2490077018737793
}
]
$ cat metrics.json | jq '.loss_mask'
0.7126231789588928
0.689423680305481
0.6776131987571716
...
"""
def __init__(self, json_file, window_size=20):
"""
Args:
json_file (str): path to the json file. New data will be appended if the file exists.
window_size (int): the window size of median smoothing for the scalars whose
`smoothing_hint` are True.
"""
self._file_handle = PathManager.open(json_file, "a")
self._window_size = window_size
def write(self):
storage = get_event_storage()
to_save = {"iteration": storage.iter}
to_save.update(storage.latest_with_smoothing_hint(self._window_size))
self._file_handle.write(json.dumps(to_save, sort_keys=True) + "\n")
self._file_handle.flush()
try:
os.fsync(self._file_handle.fileno())
except AttributeError:
pass
def close(self):
self._file_handle.close()
class TensorboardXWriter(EventWriter):
"""
Write all scalars to a tensorboard file.
"""
def __init__(self, log_dir: str, window_size: int = 20, **kwargs):
"""
Args:
log_dir (str): The directory to save the output events
window_size (int): the scalars will be median-smoothed by this window size
kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)`
"""
self._window_size = window_size
from torch.utils.tensorboard import SummaryWriter
self._writer = SummaryWriter(log_dir, **kwargs)
def write(self):
storage = get_event_storage()
for k, v in storage.latest_with_smoothing_hint(self._window_size).items():
self._writer.add_scalar(k, v, storage.iter)
def close(self):
if hasattr(self, "_writer"): # doesn't exist when the code fails at import
self._writer.close()
class CommonMetricPrinter(EventWriter):
"""
Print **common** metrics to the terminal, including
iteration time, ETA, memory, all losses, and the learning rate.
To print something different, please implement a similar printer by yourself.
"""
def __init__(self, max_iter):
"""
Args:
max_iter (int): the maximum number of iterations to train.
Used to compute ETA.
"""
self.logger = logging.getLogger(__name__)
self._max_iter = max_iter
def write(self):
storage = get_event_storage()
iteration = storage.iter
data_time, time = None, None
eta_string = "N/A"
try:
data_time = storage.history("data_time").avg(20)
time = storage.history("time").global_avg()
eta_seconds = storage.history("time").median(1000) * (self._max_iter - iteration)
storage.put_scalar("eta_seconds", eta_seconds, smoothing_hint=False)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
except KeyError: # they may not exist in the first few iterations (due to warmup)
pass
try:
lr = "{:.6f}".format(storage.history("lr").latest())
except KeyError:
lr = "N/A"
if torch.cuda.is_available():
max_mem_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0
else:
max_mem_mb = None
# NOTE: max_mem is parsed by grep in "dev/parse_results.sh"
self.logger.info(
"""\
eta: {eta} iter: {iter} {losses} \
{time} {data_time} \
lr: {lr} {memory}\
""".format(
eta=eta_string,
iter=iteration,
losses=" ".join(
[
"{}: {:.3f}".format(k, v.median(20))
for k, v in storage.histories().items()
if "loss" in k
]
),
time="time: {:.4f}".format(time) if time is not None else "",
data_time="data_time: {:.4f}".format(data_time) if data_time is not None else "",
lr=lr,
memory="max_mem: {:.0f}M".format(max_mem_mb) if max_mem_mb is not None else "",
)
)
class EventStorage:
"""
The user-facing class that provides metric storage functionalities.
In the future we may add support for storing / logging other types of data if needed.
"""
def __init__(self, start_iter=0):
"""
Args:
start_iter (int): the iteration number to start with
"""
self._history = defaultdict(HistoryBuffer)
self._smoothing_hints = {}
self._latest_scalars = {}
self._iter = start_iter
self._current_prefix = ""
def put_scalar(self, name, value, smoothing_hint=True):
"""
Add a scalar `value` to the `HistoryBuffer` associated with `name`.
Args:
smoothing_hint (bool): a 'hint' on whether this scalar is noisy and should be
smoothed when logged. The hint will be accessible through
:meth:`EventStorage.smoothing_hints`. A writer may ignore the hint
and apply custom smoothing rule.
It defaults to True because most scalars we save need to be smoothed to
provide any useful signal.
"""
name = self._current_prefix + name
history = self._history[name]
value = float(value)
history.update(value, self._iter)
self._latest_scalars[name] = value
existing_hint = self._smoothing_hints.get(name)
if existing_hint is not None:
assert (
existing_hint == smoothing_hint
), "Scalar {} was put with a different smoothing_hint!".format(name)
else:
self._smoothing_hints[name] = smoothing_hint
def put_scalars(self, *, smoothing_hint=True, **kwargs):
"""
Put multiple scalars from keyword arguments.
Examples:
storage.put_scalars(loss=my_loss, accuracy=my_accuracy, smoothing_hint=True)
"""
for k, v in kwargs.items():
self.put_scalar(k, v, smoothing_hint=smoothing_hint)
def history(self, name):
"""
Returns:
HistoryBuffer: the scalar history for name
"""
ret = self._history.get(name, None)
if ret is None:
raise KeyError("No history metric available for {}!".format(name))
return ret
def histories(self):
"""
Returns:
dict[name -> HistoryBuffer]: the HistoryBuffer for all scalars
"""
return self._history
def latest(self):
"""
Returns:
dict[name -> number]: the scalars that's added in the current iteration.
"""
return self._latest_scalars
def latest_with_smoothing_hint(self, window_size=20):
"""
Similar to :meth:`latest`, but the returned values
are either the un-smoothed original latest value,
or a median of the given window_size,
depend on whether the smoothing_hint is True.
This provides a default behavior that other writers can use.
"""
result = {}
for k, v in self._latest_scalars.items():
result[k] = self._history[k].median(window_size) if self._smoothing_hints[k] else v
return result
def smoothing_hints(self):
"""
Returns:
dict[name -> bool]: the user-provided hint on whether the scalar
is noisy and needs smoothing.
"""
return self._smoothing_hints
def step(self):
"""
User should call this function at the beginning of each iteration, to
notify the storage of the start of a new iteration.
The storage will then be able to associate the new data with the
correct iteration number.
"""
self._iter += 1
self._latest_scalars = {}
@property
def iter(self):
return self._iter
@property
def iteration(self):
# for backward compatibility
return self._iter
def __enter__(self):
_CURRENT_STORAGE_STACK.append(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
assert _CURRENT_STORAGE_STACK[-1] == self
_CURRENT_STORAGE_STACK.pop()
@contextmanager
def name_scope(self, name):
"""
Yields:
A context within which all the events added to this storage
will be prefixed by the name scope.
"""
old_prefix = self._current_prefix
self._current_prefix = name.rstrip("/") + "/"
yield
self._current_prefix = old_prefix
| 31.973373 | 97 | 0.593689 |
import datetime
import json
import logging
import os
from collections import defaultdict
from contextlib import contextmanager
import torch
from fvcore.common.file_io import PathManager
from fvcore.common.history_buffer import HistoryBuffer
_CURRENT_STORAGE_STACK = []
def get_event_storage():
assert len(
_CURRENT_STORAGE_STACK
), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!"
return _CURRENT_STORAGE_STACK[-1]
class EventWriter:
def write(self):
raise NotImplementedError
def close(self):
pass
class JSONWriter(EventWriter):
def __init__(self, json_file, window_size=20):
self._file_handle = PathManager.open(json_file, "a")
self._window_size = window_size
def write(self):
storage = get_event_storage()
to_save = {"iteration": storage.iter}
to_save.update(storage.latest_with_smoothing_hint(self._window_size))
self._file_handle.write(json.dumps(to_save, sort_keys=True) + "\n")
self._file_handle.flush()
try:
os.fsync(self._file_handle.fileno())
except AttributeError:
pass
def close(self):
self._file_handle.close()
class TensorboardXWriter(EventWriter):
def __init__(self, log_dir: str, window_size: int = 20, **kwargs):
self._window_size = window_size
from torch.utils.tensorboard import SummaryWriter
self._writer = SummaryWriter(log_dir, **kwargs)
def write(self):
storage = get_event_storage()
for k, v in storage.latest_with_smoothing_hint(self._window_size).items():
self._writer.add_scalar(k, v, storage.iter)
def close(self):
if hasattr(self, "_writer"):
self._writer.close()
class CommonMetricPrinter(EventWriter):
def __init__(self, max_iter):
self.logger = logging.getLogger(__name__)
self._max_iter = max_iter
def write(self):
storage = get_event_storage()
iteration = storage.iter
data_time, time = None, None
eta_string = "N/A"
try:
data_time = storage.history("data_time").avg(20)
time = storage.history("time").global_avg()
eta_seconds = storage.history("time").median(1000) * (self._max_iter - iteration)
storage.put_scalar("eta_seconds", eta_seconds, smoothing_hint=False)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
except KeyError: # they may not exist in the first few iterations (due to warmup)
pass
try:
lr = "{:.6f}".format(storage.history("lr").latest())
except KeyError:
lr = "N/A"
if torch.cuda.is_available():
max_mem_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0
else:
max_mem_mb = None
# NOTE: max_mem is parsed by grep in "dev/parse_results.sh"
self.logger.info(
"""\
eta: {eta} iter: {iter} {losses} \
{time} {data_time} \
lr: {lr} {memory}\
""".format(
eta=eta_string,
iter=iteration,
losses=" ".join(
[
"{}: {:.3f}".format(k, v.median(20))
for k, v in storage.histories().items()
if "loss" in k
]
),
time="time: {:.4f}".format(time) if time is not None else "",
data_time="data_time: {:.4f}".format(data_time) if data_time is not None else "",
lr=lr,
memory="max_mem: {:.0f}M".format(max_mem_mb) if max_mem_mb is not None else "",
)
)
class EventStorage:
def __init__(self, start_iter=0):
self._history = defaultdict(HistoryBuffer)
self._smoothing_hints = {}
self._latest_scalars = {}
self._iter = start_iter
self._current_prefix = ""
def put_scalar(self, name, value, smoothing_hint=True):
name = self._current_prefix + name
history = self._history[name]
value = float(value)
history.update(value, self._iter)
self._latest_scalars[name] = value
existing_hint = self._smoothing_hints.get(name)
if existing_hint is not None:
assert (
existing_hint == smoothing_hint
), "Scalar {} was put with a different smoothing_hint!".format(name)
else:
self._smoothing_hints[name] = smoothing_hint
def put_scalars(self, *, smoothing_hint=True, **kwargs):
for k, v in kwargs.items():
self.put_scalar(k, v, smoothing_hint=smoothing_hint)
def history(self, name):
ret = self._history.get(name, None)
if ret is None:
raise KeyError("No history metric available for {}!".format(name))
return ret
def histories(self):
return self._history
def latest(self):
return self._latest_scalars
def latest_with_smoothing_hint(self, window_size=20):
result = {}
for k, v in self._latest_scalars.items():
result[k] = self._history[k].median(window_size) if self._smoothing_hints[k] else v
return result
def smoothing_hints(self):
return self._smoothing_hints
def step(self):
self._iter += 1
self._latest_scalars = {}
@property
def iter(self):
return self._iter
@property
def iteration(self):
# for backward compatibility
return self._iter
def __enter__(self):
_CURRENT_STORAGE_STACK.append(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
assert _CURRENT_STORAGE_STACK[-1] == self
_CURRENT_STORAGE_STACK.pop()
@contextmanager
def name_scope(self, name):
old_prefix = self._current_prefix
self._current_prefix = name.rstrip("/") + "/"
yield
self._current_prefix = old_prefix
| true | true |
f7f79be385a11dd622266b06537df6b536a385e7 | 14,634 | py | Python | model/pytorch/dffml_model_pytorch/pytorch_base.py | SGeetansh/dffml | 04647bdcadef2f7e7b59cdd8ac1e89f17ef1095b | [
"MIT"
] | 1 | 2019-03-11T17:24:17.000Z | 2019-03-11T17:24:17.000Z | model/pytorch/dffml_model_pytorch/pytorch_base.py | SGeetansh/dffml | 04647bdcadef2f7e7b59cdd8ac1e89f17ef1095b | [
"MIT"
] | 24 | 2020-05-20T23:29:57.000Z | 2021-04-14T04:18:21.000Z | model/pytorch/dffml_model_pytorch/pytorch_base.py | SGeetansh/dffml | 04647bdcadef2f7e7b59cdd8ac1e89f17ef1095b | [
"MIT"
] | 1 | 2020-05-06T19:07:02.000Z | 2020-05-06T19:07:02.000Z | import os
import pathlib
from typing import Any, Tuple, AsyncIterator, List, Type, Dict
import copy
import time
import torch
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
from dffml.record import Record
from dffml.model.accuracy import Accuracy
from dffml.base import config, field
from dffml.feature.feature import Feature, Features
from dffml.source.source import Sources, SourcesContext
from dffml.model.model import ModelContext, ModelNotTrained
from .utils import NumpyToTensor, PyTorchLoss, CrossEntropyLossFunction
@config
class PyTorchModelConfig:
predict: Feature = field("Feature name holding classification value")
features: Features = field("Features to train on")
directory: pathlib.Path = field("Directory where state should be saved")
classifications: List[str] = field(
"Options for value of classification", default=None
)
clstype: Type = field("Data type of classifications values", default=str)
imageSize: int = field(
"Common size for all images to resize and crop to", default=None
)
enableGPU: bool = field("Utilize GPUs for processing", default=False)
epochs: int = field(
"Number of iterations to pass over all records in a source", default=20
)
batch_size: int = field("Batch size", default=32)
validation_split: float = field(
"Split training data for Validation", default=0.0
)
patience: int = field(
"Early stops the training if validation loss doesn't improve after a given patience",
default=5,
)
loss: PyTorchLoss = field(
"Loss Functions available in PyTorch",
default=CrossEntropyLossFunction,
)
optimizer: str = field(
"Optimizer Algorithms available in PyTorch", default="SGD"
)
normalize_mean: List[float] = field(
"Mean values for normalizing Tensor image", default=None
)
normalize_std: List[float] = field(
"Standard Deviation values for normalizing Tensor image", default=None
)
def __post_init__(self):
if self.classifications is not None:
self.classifications = list(
map(self.clstype, self.classifications)
)
class PyTorchModelContext(ModelContext):
def __init__(self, parent):
super().__init__(parent)
if self.parent.config.classifications:
self.cids = self._mkcids(self.parent.config.classifications)
self.classifications = self._classifications(self.cids)
self.features = self._applicable_features()
self.model_path = self._model_path()
self._model = None
self.counter = 0
if self.parent.config.enableGPU and torch.cuda.is_available():
self.device = torch.device("cuda:0")
self.logger.info("Using CUDA")
else:
self.device = torch.device("cpu")
async def __aenter__(self):
if os.path.isfile(self.model_path):
self.logger.info(f"Using saved model from {self.model_path}")
self._model = torch.load(self.model_path)
else:
self._model = self.createModel()
self.set_model_parameters()
self.criterion = self.parent.config.loss.function
self.optimizer = getattr(optim, self.parent.config.optimizer)(
self.model_parameters, lr=0.001
)
self.exp_lr_scheduler = lr_scheduler.StepLR(
self.optimizer, step_size=5, gamma=0.1
)
return self
async def __aexit__(self, exc_type, exc_value, traceback):
pass
def set_model_parameters(self):
self.model_parameters = self._model.parameters()
def _classifications(self, cids):
"""
Map classifications to numeric values
"""
classifications = {value: key for key, value in cids.items()}
self.logger.debug(
"classifications(%d): %r", len(classifications), classifications
)
return classifications
def _applicable_features(self):
return [name for name in self.parent.config.features.names()]
def _model_path(self):
if self.parent.config.directory is None:
return None
if not os.path.isdir(self.parent.config.directory):
raise NotADirectoryError(
"%s is not a directory" % (self.parent.config.directory)
)
os.makedirs(self.parent.config.directory, exist_ok=True)
return os.path.join(self.parent.config.directory, "model.pt")
def _mkcids(self, classifications):
"""
Create an index, possible classification mapping and sort the list of
classifications first.
"""
cids = dict(
zip(range(0, len(classifications)), sorted(classifications))
)
self.logger.debug("cids(%d): %r", len(cids), cids)
return cids
async def dataset_generator(self, sources: Sources):
self.logger.debug("Training on features: %r", self.features)
x_cols: Dict[str, Any] = {feature: [] for feature in self.features}
y_cols = []
all_records = []
all_sources = sources.with_features(
self.features + [self.parent.config.predict.name]
)
async for record in all_sources:
for feature, results in record.features(self.features).items():
x_cols[feature].append(np.array(results))
y_cols.append(
self.classifications[
record.feature(self.parent.config.predict.name)
]
if self.classifications
else record.feature(self.parent.config.predict.name)
)
if (len(self.features)) > 1:
self.logger.critical(
"Found more than one feature to train on. Only first feature will be used"
)
if not y_cols:
raise ValueError("No records to train on")
y_cols = np.array(y_cols)
for feature in x_cols:
x_cols[feature] = np.array(x_cols[feature])
self.logger.info("------ Record Data ------")
self.logger.info("x_cols: %d", len(list(x_cols.values())[0]))
self.logger.info("y_cols: %d", len(y_cols))
self.logger.info("-----------------------")
x_cols = x_cols[self.features[0]]
dataset = NumpyToTensor(
x_cols,
y_cols,
size=self.parent.config.imageSize,
norm_mean=self.parent.config.normalize_mean,
norm_std=self.parent.config.normalize_std,
)
return dataset, len(dataset)
async def prediction_data_generator(self, data):
dataset = NumpyToTensor(
[data],
size=self.parent.config.imageSize,
norm_mean=self.parent.config.normalize_mean,
norm_std=self.parent.config.normalize_std,
)
dataloader = torch.utils.data.DataLoader(dataset)
return dataloader
async def train(self, sources: Sources):
dataset, size = await self.dataset_generator(sources)
size = {
"Training": size - int(self.parent.config.validation_split * size),
"Validation": int(self.parent.config.validation_split * size),
}
if self.parent.config.validation_split:
data = dict(
zip(
["Training", "Validation"],
list(
torch.utils.data.random_split(
dataset, [size["Training"], size["Validation"]]
)
),
)
)
self.logger.info(
"Data split into Training samples: {} and Validation samples: {}".format(
size["Training"], size["Validation"]
)
)
dataloaders = {
x: torch.utils.data.DataLoader(
data[x],
batch_size=self.parent.config.batch_size,
shuffle=True,
num_workers=4,
)
for x in ["Training", "Validation"]
}
else:
dataloaders = {
"Training": torch.utils.data.DataLoader(
dataset,
batch_size=self.parent.config.batch_size,
shuffle=True,
num_workers=4,
)
}
since = time.time()
best_model_wts = copy.deepcopy(self._model.state_dict())
best_acc = 0.0
for epoch in range(self.parent.config.epochs):
self.logger.debug(
"Epoch {}/{}".format(epoch + 1, self.parent.config.epochs)
)
self.logger.debug("-" * 10)
for phase in dataloaders.keys():
if phase == "Training":
self._model.train()
else:
self._model.eval()
running_loss = 0.0
running_corrects = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(self.device)
labels = labels.to(self.device)
self.optimizer.zero_grad()
with torch.set_grad_enabled(phase == "Training"):
outputs = self._model(inputs)
if self.classifications:
_, preds = torch.max(outputs, 1)
loss = self.criterion(outputs, labels)
if phase == "Training":
loss.backward()
self.optimizer.step()
running_loss += loss.item() * inputs.size(0)
if self.classifications:
running_corrects += torch.sum(preds == labels.data)
if phase == "Training":
self.exp_lr_scheduler.step()
epoch_loss = running_loss / size[phase]
epoch_acc = (
running_corrects.double() / size[phase]
if self.classifications
else 1.0 - epoch_loss
)
self.logger.debug(
"{} Loss: {:.4f} Acc: {:.4f}".format(
phase, epoch_loss, epoch_acc
)
)
if phase == "Validation":
if epoch_acc >= best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(
self._model.state_dict()
)
self.counter = 0
else:
self.counter += 1
if best_acc == 1.0:
self.counter = self.parent.config.patience
self.logger.debug("")
if self.counter == self.parent.config.patience:
self.logger.info(
f"Early stopping: Validation Loss didn't improve for {self.counter} "
+ "consecutive epochs OR maximum accuracy attained."
)
break
time_elapsed = time.time() - since
self.logger.info(
"Training complete in {:.0f}m {:.0f}s".format(
time_elapsed // 60, time_elapsed % 60
)
)
if self.parent.config.validation_split:
self.logger.info(
"Best Validation Accuracy: {:4f}".format(best_acc)
)
self._model.load_state_dict(best_model_wts)
torch.save(self._model, self.model_path)
async def accuracy(self, sources: Sources) -> Accuracy:
if not os.path.isfile(os.path.join(self.model_path)):
raise ModelNotTrained("Train model before assessing for accuracy.")
dataset, size = await self.dataset_generator(sources)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=self.parent.config.batch_size,
shuffle=True,
num_workers=4,
)
self._model.eval()
if self.classifications:
running_corrects = 0
for inputs, labels in dataloader:
inputs = inputs.to(inputs)
labels = labels.to(inputs)
with torch.set_grad_enabled(False):
outputs = self._model(inputs)
_, preds = torch.max(outputs, 1)
running_corrects += torch.sum(preds == labels.data)
acc = running_corrects.double() / size
else:
running_loss = 0.0
for inputs, labels in dataloader:
inputs = inputs.to(inputs)
labels = labels.to(inputs)
with torch.set_grad_enabled(False):
outputs = self._model(inputs)
loss = self.criterion(inputs, outputs)
running_loss += loss.item() * inputs.size(0)
total_loss = running_loss / size
acc = 1.0 - total_loss
return Accuracy(acc)
async def predict(
self, sources: SourcesContext
) -> AsyncIterator[Tuple[Record, Any, float]]:
"""
Uses trained data to make a prediction about the quality of a record.
"""
if not os.path.isfile(os.path.join(self.model_path)):
raise ModelNotTrained("Train model before prediction.")
self._model.eval()
async for record in sources.with_features(self.features):
feature_data = record.features(self.features)[self.features[0]]
predict = await self.prediction_data_generator(feature_data)
target = self.parent.config.predict.name
with torch.no_grad():
for val in predict:
val = val.to(self.device)
output = self._model(val)
if self.classifications:
prob = torch.nn.functional.softmax(output, dim=1)
confidence, prediction_value = prob.topk(1, dim=1)
record.predicted(
target,
self.cids[prediction_value.item()],
confidence,
)
else:
confidence = 1.0 - self.criterion(val, output).item()
record.predicted(target, output, confidence)
yield record
| 35.605839 | 93 | 0.549747 | import os
import pathlib
from typing import Any, Tuple, AsyncIterator, List, Type, Dict
import copy
import time
import torch
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
from dffml.record import Record
from dffml.model.accuracy import Accuracy
from dffml.base import config, field
from dffml.feature.feature import Feature, Features
from dffml.source.source import Sources, SourcesContext
from dffml.model.model import ModelContext, ModelNotTrained
from .utils import NumpyToTensor, PyTorchLoss, CrossEntropyLossFunction
@config
class PyTorchModelConfig:
predict: Feature = field("Feature name holding classification value")
features: Features = field("Features to train on")
directory: pathlib.Path = field("Directory where state should be saved")
classifications: List[str] = field(
"Options for value of classification", default=None
)
clstype: Type = field("Data type of classifications values", default=str)
imageSize: int = field(
"Common size for all images to resize and crop to", default=None
)
enableGPU: bool = field("Utilize GPUs for processing", default=False)
epochs: int = field(
"Number of iterations to pass over all records in a source", default=20
)
batch_size: int = field("Batch size", default=32)
validation_split: float = field(
"Split training data for Validation", default=0.0
)
patience: int = field(
"Early stops the training if validation loss doesn't improve after a given patience",
default=5,
)
loss: PyTorchLoss = field(
"Loss Functions available in PyTorch",
default=CrossEntropyLossFunction,
)
optimizer: str = field(
"Optimizer Algorithms available in PyTorch", default="SGD"
)
normalize_mean: List[float] = field(
"Mean values for normalizing Tensor image", default=None
)
normalize_std: List[float] = field(
"Standard Deviation values for normalizing Tensor image", default=None
)
def __post_init__(self):
if self.classifications is not None:
self.classifications = list(
map(self.clstype, self.classifications)
)
class PyTorchModelContext(ModelContext):
def __init__(self, parent):
super().__init__(parent)
if self.parent.config.classifications:
self.cids = self._mkcids(self.parent.config.classifications)
self.classifications = self._classifications(self.cids)
self.features = self._applicable_features()
self.model_path = self._model_path()
self._model = None
self.counter = 0
if self.parent.config.enableGPU and torch.cuda.is_available():
self.device = torch.device("cuda:0")
self.logger.info("Using CUDA")
else:
self.device = torch.device("cpu")
async def __aenter__(self):
if os.path.isfile(self.model_path):
self.logger.info(f"Using saved model from {self.model_path}")
self._model = torch.load(self.model_path)
else:
self._model = self.createModel()
self.set_model_parameters()
self.criterion = self.parent.config.loss.function
self.optimizer = getattr(optim, self.parent.config.optimizer)(
self.model_parameters, lr=0.001
)
self.exp_lr_scheduler = lr_scheduler.StepLR(
self.optimizer, step_size=5, gamma=0.1
)
return self
async def __aexit__(self, exc_type, exc_value, traceback):
pass
def set_model_parameters(self):
self.model_parameters = self._model.parameters()
def _classifications(self, cids):
classifications = {value: key for key, value in cids.items()}
self.logger.debug(
"classifications(%d): %r", len(classifications), classifications
)
return classifications
def _applicable_features(self):
return [name for name in self.parent.config.features.names()]
def _model_path(self):
if self.parent.config.directory is None:
return None
if not os.path.isdir(self.parent.config.directory):
raise NotADirectoryError(
"%s is not a directory" % (self.parent.config.directory)
)
os.makedirs(self.parent.config.directory, exist_ok=True)
return os.path.join(self.parent.config.directory, "model.pt")
def _mkcids(self, classifications):
cids = dict(
zip(range(0, len(classifications)), sorted(classifications))
)
self.logger.debug("cids(%d): %r", len(cids), cids)
return cids
async def dataset_generator(self, sources: Sources):
self.logger.debug("Training on features: %r", self.features)
x_cols: Dict[str, Any] = {feature: [] for feature in self.features}
y_cols = []
all_records = []
all_sources = sources.with_features(
self.features + [self.parent.config.predict.name]
)
async for record in all_sources:
for feature, results in record.features(self.features).items():
x_cols[feature].append(np.array(results))
y_cols.append(
self.classifications[
record.feature(self.parent.config.predict.name)
]
if self.classifications
else record.feature(self.parent.config.predict.name)
)
if (len(self.features)) > 1:
self.logger.critical(
"Found more than one feature to train on. Only first feature will be used"
)
if not y_cols:
raise ValueError("No records to train on")
y_cols = np.array(y_cols)
for feature in x_cols:
x_cols[feature] = np.array(x_cols[feature])
self.logger.info("------ Record Data ------")
self.logger.info("x_cols: %d", len(list(x_cols.values())[0]))
self.logger.info("y_cols: %d", len(y_cols))
self.logger.info("-----------------------")
x_cols = x_cols[self.features[0]]
dataset = NumpyToTensor(
x_cols,
y_cols,
size=self.parent.config.imageSize,
norm_mean=self.parent.config.normalize_mean,
norm_std=self.parent.config.normalize_std,
)
return dataset, len(dataset)
async def prediction_data_generator(self, data):
dataset = NumpyToTensor(
[data],
size=self.parent.config.imageSize,
norm_mean=self.parent.config.normalize_mean,
norm_std=self.parent.config.normalize_std,
)
dataloader = torch.utils.data.DataLoader(dataset)
return dataloader
async def train(self, sources: Sources):
dataset, size = await self.dataset_generator(sources)
size = {
"Training": size - int(self.parent.config.validation_split * size),
"Validation": int(self.parent.config.validation_split * size),
}
if self.parent.config.validation_split:
data = dict(
zip(
["Training", "Validation"],
list(
torch.utils.data.random_split(
dataset, [size["Training"], size["Validation"]]
)
),
)
)
self.logger.info(
"Data split into Training samples: {} and Validation samples: {}".format(
size["Training"], size["Validation"]
)
)
dataloaders = {
x: torch.utils.data.DataLoader(
data[x],
batch_size=self.parent.config.batch_size,
shuffle=True,
num_workers=4,
)
for x in ["Training", "Validation"]
}
else:
dataloaders = {
"Training": torch.utils.data.DataLoader(
dataset,
batch_size=self.parent.config.batch_size,
shuffle=True,
num_workers=4,
)
}
since = time.time()
best_model_wts = copy.deepcopy(self._model.state_dict())
best_acc = 0.0
for epoch in range(self.parent.config.epochs):
self.logger.debug(
"Epoch {}/{}".format(epoch + 1, self.parent.config.epochs)
)
self.logger.debug("-" * 10)
for phase in dataloaders.keys():
if phase == "Training":
self._model.train()
else:
self._model.eval()
running_loss = 0.0
running_corrects = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(self.device)
labels = labels.to(self.device)
self.optimizer.zero_grad()
with torch.set_grad_enabled(phase == "Training"):
outputs = self._model(inputs)
if self.classifications:
_, preds = torch.max(outputs, 1)
loss = self.criterion(outputs, labels)
if phase == "Training":
loss.backward()
self.optimizer.step()
running_loss += loss.item() * inputs.size(0)
if self.classifications:
running_corrects += torch.sum(preds == labels.data)
if phase == "Training":
self.exp_lr_scheduler.step()
epoch_loss = running_loss / size[phase]
epoch_acc = (
running_corrects.double() / size[phase]
if self.classifications
else 1.0 - epoch_loss
)
self.logger.debug(
"{} Loss: {:.4f} Acc: {:.4f}".format(
phase, epoch_loss, epoch_acc
)
)
if phase == "Validation":
if epoch_acc >= best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(
self._model.state_dict()
)
self.counter = 0
else:
self.counter += 1
if best_acc == 1.0:
self.counter = self.parent.config.patience
self.logger.debug("")
if self.counter == self.parent.config.patience:
self.logger.info(
f"Early stopping: Validation Loss didn't improve for {self.counter} "
+ "consecutive epochs OR maximum accuracy attained."
)
break
time_elapsed = time.time() - since
self.logger.info(
"Training complete in {:.0f}m {:.0f}s".format(
time_elapsed // 60, time_elapsed % 60
)
)
if self.parent.config.validation_split:
self.logger.info(
"Best Validation Accuracy: {:4f}".format(best_acc)
)
self._model.load_state_dict(best_model_wts)
torch.save(self._model, self.model_path)
async def accuracy(self, sources: Sources) -> Accuracy:
if not os.path.isfile(os.path.join(self.model_path)):
raise ModelNotTrained("Train model before assessing for accuracy.")
dataset, size = await self.dataset_generator(sources)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=self.parent.config.batch_size,
shuffle=True,
num_workers=4,
)
self._model.eval()
if self.classifications:
running_corrects = 0
for inputs, labels in dataloader:
inputs = inputs.to(inputs)
labels = labels.to(inputs)
with torch.set_grad_enabled(False):
outputs = self._model(inputs)
_, preds = torch.max(outputs, 1)
running_corrects += torch.sum(preds == labels.data)
acc = running_corrects.double() / size
else:
running_loss = 0.0
for inputs, labels in dataloader:
inputs = inputs.to(inputs)
labels = labels.to(inputs)
with torch.set_grad_enabled(False):
outputs = self._model(inputs)
loss = self.criterion(inputs, outputs)
running_loss += loss.item() * inputs.size(0)
total_loss = running_loss / size
acc = 1.0 - total_loss
return Accuracy(acc)
async def predict(
self, sources: SourcesContext
) -> AsyncIterator[Tuple[Record, Any, float]]:
if not os.path.isfile(os.path.join(self.model_path)):
raise ModelNotTrained("Train model before prediction.")
self._model.eval()
async for record in sources.with_features(self.features):
feature_data = record.features(self.features)[self.features[0]]
predict = await self.prediction_data_generator(feature_data)
target = self.parent.config.predict.name
with torch.no_grad():
for val in predict:
val = val.to(self.device)
output = self._model(val)
if self.classifications:
prob = torch.nn.functional.softmax(output, dim=1)
confidence, prediction_value = prob.topk(1, dim=1)
record.predicted(
target,
self.cids[prediction_value.item()],
confidence,
)
else:
confidence = 1.0 - self.criterion(val, output).item()
record.predicted(target, output, confidence)
yield record
| true | true |
f7f79c6d2bf9bc408cdf7d41a9398a5b9215ea41 | 382 | py | Python | uniborg/__init__.py | Deepumad77/CartoonBot | eff24f40043e41feadc70a63fa655353c1783ee0 | [
"Apache-2.0"
] | 2 | 2019-10-23T12:30:32.000Z | 2019-10-23T20:33:26.000Z | uniborg/__init__.py | Deepumad77/CartoonBot | eff24f40043e41feadc70a63fa655353c1783ee0 | [
"Apache-2.0"
] | null | null | null | uniborg/__init__.py | Deepumad77/CartoonBot | eff24f40043e41feadc70a63fa655353c1783ee0 | [
"Apache-2.0"
] | 12 | 2019-10-25T18:48:16.000Z | 2020-04-26T11:31:21.000Z | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from .uniborg import *
from sample_config import Config
# add modules to this list
MODULE = []
# add syntax to this dictionary using SYNTAX.update
SYNTAX = {}
BUILD = "USER-43x01"
| 27.285714 | 69 | 0.730366 |
from .uniborg import *
from sample_config import Config
MODULE = []
SYNTAX = {}
BUILD = "USER-43x01"
| true | true |
f7f79d6672610da6c279ab9bb7489aafbe904bfb | 760 | py | Python | Tools/collect_entropy.py | ElielGez/Malicious_URL_zdm_new_features_lee_eliel | 04935b5b1b280906d684be36977151d89a09b4c6 | [
"MIT"
] | null | null | null | Tools/collect_entropy.py | ElielGez/Malicious_URL_zdm_new_features_lee_eliel | 04935b5b1b280906d684be36977151d89a09b4c6 | [
"MIT"
] | null | null | null | Tools/collect_entropy.py | ElielGez/Malicious_URL_zdm_new_features_lee_eliel | 04935b5b1b280906d684be36977151d89a09b4c6 | [
"MIT"
] | null | null | null | import pandas as pd
from collections import defaultdict
from urllib.parse import urlparse
import math
df = pd.read_csv('Final_newData_withFeatures.csv')
urls = df['0']
entropies = []
for index, url in enumerate(urls):
domain=""
if url[:4] == 'http':
domain = urlparse(url).netloc
else:
domain = urlparse('http://'+url).netloc
entropy = 0
str_len = len(domain)
chars = defaultdict(int)
for char in domain:
chars[char] += 1
for char in domain:
pj = (chars[char]/str_len)
entropy += pj*math.log(pj,2)
entropies.append((-1)*entropy)
df['6'] = pd.Series(entropies)
#df.drop('Unnamed: 0', inplace=True, axis=1)
#df=df[df['length'] != -1]
df.to_csv('superFinal.csv') | 22.352941 | 50 | 0.614474 | import pandas as pd
from collections import defaultdict
from urllib.parse import urlparse
import math
df = pd.read_csv('Final_newData_withFeatures.csv')
urls = df['0']
entropies = []
for index, url in enumerate(urls):
domain=""
if url[:4] == 'http':
domain = urlparse(url).netloc
else:
domain = urlparse('http://'+url).netloc
entropy = 0
str_len = len(domain)
chars = defaultdict(int)
for char in domain:
chars[char] += 1
for char in domain:
pj = (chars[char]/str_len)
entropy += pj*math.log(pj,2)
entropies.append((-1)*entropy)
df['6'] = pd.Series(entropies)
df.to_csv('superFinal.csv') | true | true |
f7f79e6a898811403fa0a855310783e6325c6aa8 | 43,283 | py | Python | alpha_pipe/analyzer/utils.py | gayhub-wpp/alpha_pipe_oldversion | 877047442ae939df3f15611d48ea2df1476a940c | [
"MIT"
] | null | null | null | alpha_pipe/analyzer/utils.py | gayhub-wpp/alpha_pipe_oldversion | 877047442ae939df3f15611d48ea2df1476a940c | [
"MIT"
] | null | null | null | alpha_pipe/analyzer/utils.py | gayhub-wpp/alpha_pipe_oldversion | 877047442ae939df3f15611d48ea2df1476a940c | [
"MIT"
] | 1 | 2021-11-10T22:28:20.000Z | 2021-11-10T22:28:20.000Z | from numpy import sqrt, mean
from collections import Iterable
from functools import wraps
import six
import pandas as pd
import numpy as np
import re
import warnings
from IPython.display import display
from pandas.tseries.offsets import CustomBusinessDay, Day, BusinessDay
from scipy.stats import mode
class NonMatchingTimezoneError(Exception):
pass
class MaxLossExceededError(Exception):
pass
def rethrow(exception, additional_message):
"""
Re-raise the last exception that was active in the current scope
without losing the stacktrace but adding an additional message.
This is hacky because it has to be compatible with both python 2/3
"""
e = exception
m = additional_message
if not e.args:
e.args = (m,)
else:
e.args = (e.args[0] + m,) + e.args[1:]
raise e
def non_unique_bin_edges_error(func):
"""
Give user a more informative error in case it is not possible
to properly calculate quantiles on the input dataframe (factor)
"""
message = """
An error occurred while computing bins/quantiles on the input provided.
This usually happens when the input contains too many identical
values and they span more than one quantile. The quantiles are choosen
to have the same number of records each, but the same value cannot span
multiple quantiles. Possible workarounds are:
1 - Decrease the number of quantiles
2 - Specify a custom quantiles range, e.g. [0, .50, .75, 1.] to get unequal
number of records per quantile
3 - Use 'bins' option instead of 'quantiles', 'bins' chooses the
buckets to be evenly spaced according to the values themselves, while
'quantiles' forces the buckets to have the same number of records.
4 - for factors with discrete values use the 'bins' option with custom
ranges and create a range for each discrete value
Please see utils.get_clean_factor_and_forward_returns documentation for
full documentation of 'bins' and 'quantiles' options.
"""
def dec(*args, **kwargs):
try:
return func(*args, **kwargs)
except ValueError as e:
if 'Bin edges must be unique' in str(e):
rethrow(e, message)
raise
return dec
@non_unique_bin_edges_error
def quantize_factor(factor_data,
quantiles=5,
bins=None,
by_group=False,
no_raise=False,
zero_aware=False):
"""
Computes period wise factor quantiles.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
quantiles : int or sequence[float]
Number of equal-sized quantile buckets to use in factor bucketing.
Alternately sequence of quantiles, allowing non-equal-sized buckets
e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95]
Only one of 'quantiles' or 'bins' can be not-None
bins : int or sequence[float]
Number of equal-width (valuewise) bins to use in factor bucketing.
Alternately sequence of bin edges allowing for non-uniform bin width
e.g. [-4, -2, -0.5, 0, 10]
Only one of 'quantiles' or 'bins' can be not-None
by_group : bool, optional
If True, compute quantile buckets separately for each group.
no_raise: bool, optional
If True, no exceptions are thrown and the values for which the
exception would have been thrown are set to np.NaN
zero_aware : bool, optional
If True, compute quantile buckets separately for positive and negative
signal values. This is useful if your signal is centered and zero is
the separation between long and short signals, respectively.
Returns
-------
factor_quantile : pd.Series
Factor quantiles indexed by date and asset.
"""
if not ((quantiles is not None and bins is None) or
(quantiles is None and bins is not None)):
raise ValueError('Either quantiles or bins should be provided')
if zero_aware and not (isinstance(quantiles, int)
or isinstance(bins, int)):
msg = ("zero_aware should only be True when quantiles or bins is an"
" integer")
raise ValueError(msg)
def quantile_calc(x, _quantiles, _bins, _zero_aware, _no_raise):
try:
if _quantiles is not None and _bins is None and not _zero_aware:
return pd.qcut(x, _quantiles, labels=False) + 1
elif _quantiles is not None and _bins is None and _zero_aware:
pos_quantiles = pd.qcut(x[x >= 0], _quantiles // 2,
labels=False) + _quantiles // 2 + 1
neg_quantiles = pd.qcut(x[x < 0], _quantiles // 2,
labels=False) + 1
return pd.concat([pos_quantiles, neg_quantiles]).sort_index()
elif _bins is not None and _quantiles is None and not _zero_aware:
return pd.cut(x, _bins, labels=False) + 1
elif _bins is not None and _quantiles is None and _zero_aware:
pos_bins = pd.cut(x[x >= 0], _bins // 2,
labels=False) + _bins // 2 + 1
neg_bins = pd.cut(x[x < 0], _bins // 2,
labels=False) + 1
return pd.concat([pos_bins, neg_bins]).sort_index()
except Exception as e:
if _no_raise:
return pd.Series(index=x.index)
raise e
grouper = [factor_data.index.get_level_values('date')]
if by_group:
grouper.append('group')
factor_quantile = factor_data.groupby(grouper)['factor'] \
.apply(quantile_calc, quantiles, bins, zero_aware, no_raise)
factor_quantile.name = 'factor_quantile'
return factor_quantile.dropna()
def infer_trading_calendar(factor_idx, prices_idx):
"""
Infer the trading calendar from factor and price information.
Parameters
----------
factor_idx : pd.DatetimeIndex
The factor datetimes for which we are computing the forward returns
prices_idx : pd.DatetimeIndex
The prices datetimes associated withthe factor data
Returns
-------
calendar : pd.DateOffset
"""
full_idx = factor_idx.union(prices_idx)
traded_weekdays = []
holidays = []
days_of_the_week = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
for day, day_str in enumerate(days_of_the_week):
weekday_mask = (full_idx.dayofweek == day)
# drop days of the week that are not traded at all
if not weekday_mask.any():
continue
traded_weekdays.append(day_str)
# look for holidays
used_weekdays = full_idx[weekday_mask].normalize()
all_weekdays = pd.date_range(full_idx.min(), full_idx.max(),
freq=CustomBusinessDay(weekmask=day_str)
).normalize()
_holidays = all_weekdays.difference(used_weekdays)
_holidays = [timestamp.date() for timestamp in _holidays]
holidays.extend(_holidays)
traded_weekdays = ' '.join(traded_weekdays)
return CustomBusinessDay(weekmask=traded_weekdays, holidays=holidays)
def compute_forward_returns(factor,
prices,
periods=(1, 5, 10),
filter_zscore=None,
cumulative_returns=True):
"""
Finds the N period forward returns (as percent change) for each asset
provided.
Parameters
----------
factor : pd.Series - MultiIndex
A MultiIndex Series indexed by timestamp (level 0) and asset
(level 1), containing the values for a single alpha factor.
- See full explanation in utils.get_clean_factor_and_forward_returns
prices : pd.DataFrame
Pricing data to use in forward price calculation.
Assets as columns, dates as index. Pricing data must
span the factor analysis time period plus an additional buffer window
that is greater than the maximum number of expected periods
in the forward returns calculations.
periods : sequence[int]
periods to compute forward returns on.
filter_zscore : int or float, optional
Sets forward returns greater than X standard deviations
from the the mean to nan. Set it to 'None' to avoid filtering.
Caution: this outlier filtering incorporates lookahead bias.
cumulative_returns : bool, optional
If True, forward returns columns will contain cumulative returns.
Setting this to False is useful if you want to analyze how predictive
a factor is for a single forward day.
Returns
-------
forward_returns : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by timestamp (level 0) and asset
(level 1), containing the forward returns for assets.
Forward returns column names follow the format accepted by
pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc).
'date' index freq property (forward_returns.index.levels[0].freq)
will be set to a trading calendar (pandas DateOffset) inferred
from the input data (see infer_trading_calendar for more details).
"""
factor_dateindex = factor.index.levels[0]
if factor_dateindex.tz != prices.index.tz:
raise NonMatchingTimezoneError("The timezone of 'factor' is not the "
"same as the timezone of 'prices'. See "
"the pandas methods tz_localize and "
"tz_convert.")
freq = infer_trading_calendar(factor_dateindex, prices.index)
factor_dateindex = factor_dateindex.intersection(prices.index)
if len(factor_dateindex) == 0:
raise ValueError("Factor and prices indices don't match: make sure "
"they have the same convention in terms of datetimes "
"and symbol-names")
# chop prices down to only the assets we care about (= unique assets in
# `factor`). we could modify `prices` in place, but that might confuse
# the caller.
prices = prices.filter(items=factor.index.levels[1])
raw_values_dict = {}
column_list = []
for period in sorted(periods):
if cumulative_returns:
returns = prices.pct_change(period)
else:
returns = prices.pct_change()
forward_returns = \
returns.shift(-period).reindex(factor_dateindex)
if filter_zscore is not None:
mask = abs(
forward_returns - forward_returns.mean()
) > (filter_zscore * forward_returns.std())
forward_returns[mask] = np.nan
#
# Find the period length, which will be the column name. We'll test
# several entries in order to find out the most likely period length
# (in case the user passed inconsinstent data)
#
days_diffs = []
for i in range(30):
if i >= len(forward_returns.index):
break
p_idx = prices.index.get_loc(forward_returns.index[i])
if p_idx is None or p_idx < 0 or (
p_idx + period) >= len(prices.index):
continue
start = prices.index[p_idx]
end = prices.index[p_idx + period]
period_len = diff_custom_calendar_timedeltas(start, end, freq)
days_diffs.append(period_len.components.days)
delta_days = period_len.components.days - mode(days_diffs).mode[0]
period_len -= pd.Timedelta(days=delta_days)
label = timedelta_to_string(period_len)
column_list.append(label)
raw_values_dict[label] = np.concatenate(forward_returns.values)
df = pd.DataFrame.from_dict(raw_values_dict)
df.set_index(
pd.MultiIndex.from_product(
[factor_dateindex, prices.columns],
names=['date', 'asset']
),
inplace=True
)
df = df.reindex(factor.index)
# now set the columns correctly
df = df[column_list]
df.index.levels[0].freq = freq
df.index.set_names(['date', 'asset'], inplace=True)
return df
def backshift_returns_series(series, N):
"""Shift a multi-indexed series backwards by N observations in
the first level.
This can be used to convert backward-looking returns into a
forward-returns series.
"""
ix = series.index
dates, sids = ix.levels
date_labels, sid_labels = map(np.array, ix.labels)
# Output date labels will contain the all but the last N dates.
new_dates = dates[:-N]
# Output data will remove the first M rows, where M is the index of the
# last record with one of the first N dates.
cutoff = date_labels.searchsorted(N)
new_date_labels = date_labels[cutoff:] - N
new_sid_labels = sid_labels[cutoff:]
new_values = series.values[cutoff:]
assert new_date_labels[0] == 0
new_index = pd.MultiIndex(
levels=[new_dates, sids],
labels=[new_date_labels, new_sid_labels],
sortorder=1,
names=ix.names,
)
return pd.Series(data=new_values, index=new_index)
def demean_forward_returns(factor_data, grouper=None):
"""
Convert forward returns to returns relative to mean
period wise all-universe or group returns.
group-wise normalization incorporates the assumption of a
group neutral portfolio constraint and thus allows allows the
factor to be evaluated across groups.
For example, if AAPL 5 period return is 0.1% and mean 5 period
return for the Technology stocks in our universe was 0.5% in the
same period, the group adjusted 5 period return for AAPL in this
period is -0.4%.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
Forward returns indexed by date and asset.
Separate column for each forward return window.
grouper : list
If True, demean according to group.
Returns
-------
adjusted_forward_returns : pd.DataFrame - MultiIndex
DataFrame of the same format as the input, but with each
security's returns normalized by group.
"""
factor_data = factor_data.copy()
# from IPython.display import display
# display(factor_data)
if not grouper:
grouper = factor_data.index.get_level_values('date')
cols = get_forward_returns_columns(factor_data.columns)
# factor_data[cols] = factor_data.groupby(grouper)[cols] \
# .transform(lambda x: x - x.mean())
factor_data[cols] = factor_data.groupby(
grouper, as_index=False
)[cols.append(pd.Index(['weights']))].apply(
lambda x: x[cols].subtract(
np.average(x[cols], axis=0,
weights=x['weights'].fillna(0.0).values),
axis=1
)
)
return factor_data
def print_table(table, name=None, fmt=None):
"""
Pretty print a pandas DataFrame.
Uses HTML output if running inside Jupyter Notebook, otherwise
formatted text output.
Parameters
----------
table : pd.Series or pd.DataFrame
Table to pretty-print.
name : str, optional
Table name to display in upper left corner.
fmt : str, optional
Formatter to use for displaying table elements.
E.g. '{0:.2f}%' for displaying 100 as '100.00%'.
Restores original setting after displaying.
"""
if isinstance(table, pd.Series):
table = pd.DataFrame(table)
if isinstance(table, pd.DataFrame):
table.columns.name = name
prev_option = pd.get_option('display.float_format')
if fmt is not None:
pd.set_option('display.float_format', lambda x: fmt.format(x))
display(table)
if fmt is not None:
pd.set_option('display.float_format', prev_option)
def get_clean_factor(factor,
forward_returns,
groupby=None,
binning_by_group=False,
quantiles=5,
bins=None,
groupby_labels=None,
max_loss=0.35,
zero_aware=False):
"""
Formats the factor data, forward return data, and group mappings into a
DataFrame that contains aligned MultiIndex indices of timestamp and asset.
The returned data will be formatted to be suitable for Alphalens functions.
It is safe to skip a call to this function and still make use of Alphalens
functionalities as long as the factor data conforms to the format returned
from get_clean_factor_and_forward_returns and documented here
Parameters
----------
factor : pd.Series - MultiIndex
A MultiIndex Series indexed by timestamp (level 0) and asset
(level 1), containing the values for a single alpha factor.
::
-----------------------------------
date | asset |
-----------------------------------
| AAPL | 0.5
-----------------------
| BA | -1.1
-----------------------
2014-01-01 | CMG | 1.7
-----------------------
| DAL | -0.1
-----------------------
| LULU | 2.7
-----------------------
forward_returns : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by timestamp (level 0) and asset
(level 1), containing the forward returns for assets.
Forward returns column names must follow the format accepted by
pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc).
'date' index freq property must be set to a trading calendar
(pandas DateOffset), see infer_trading_calendar for more details.
This information is currently used only in cumulative returns
computation
::
---------------------------------------
| | 1D | 5D | 10D
---------------------------------------
date | asset | | |
---------------------------------------
| AAPL | 0.09|-0.01|-0.079
----------------------------
| BA | 0.02| 0.06| 0.020
----------------------------
2014-01-01 | CMG | 0.03| 0.09| 0.036
----------------------------
| DAL |-0.02|-0.06|-0.029
----------------------------
| LULU |-0.03| 0.05|-0.009
----------------------------
groupby : pd.Series - MultiIndex or dict
Either A MultiIndex Series indexed by date and asset,
containing the period wise group codes for each asset, or
a dict of asset to group mappings. If a dict is passed,
it is assumed that group mappings are unchanged for the
entire time period of the passed factor data.
binning_by_group : bool
If True, compute quantile buckets separately for each group.
This is useful when the factor values range vary considerably
across gorups so that it is wise to make the binning group relative.
You should probably enable this if the factor is intended
to be analyzed for a group neutral portfolio
quantiles : int or sequence[float]
Number of equal-sized quantile buckets to use in factor bucketing.
Alternately sequence of quantiles, allowing non-equal-sized buckets
e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95]
Only one of 'quantiles' or 'bins' can be not-None
bins : int or sequence[float]
Number of equal-width (valuewise) bins to use in factor bucketing.
Alternately sequence of bin edges allowing for non-uniform bin width
e.g. [-4, -2, -0.5, 0, 10]
Chooses the buckets to be evenly spaced according to the values
themselves. Useful when the factor contains discrete values.
Only one of 'quantiles' or 'bins' can be not-None
groupby_labels : dict
A dictionary keyed by group code with values corresponding
to the display name for each group.
max_loss : float, optional
Maximum percentage (0.00 to 1.00) of factor data dropping allowed,
computed comparing the number of items in the input factor index and
the number of items in the output DataFrame index.
Factor data can be partially dropped due to being flawed itself
(e.g. NaNs), not having provided enough price data to compute
forward returns for all factor values, or because it is not possible
to perform binning.
Set max_loss=0 to avoid Exceptions suppression.
zero_aware : bool, optional
If True, compute quantile buckets separately for positive and negative
signal values. This is useful if your signal is centered and zero is
the separation between long and short signals, respectively.
'quantiles' is None.
Returns
-------
merged_data : pd.DataFrame - MultiIndex
A MultiIndex Series indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- forward returns column names follow the format accepted by
pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc)
- 'date' index freq property (merged_data.index.levels[0].freq) is the
same as that of the input forward returns data. This is currently
used only in cumulative returns computation
::
-------------------------------------------------------------------
| | 1D | 5D | 10D |factor|group|factor_quantile
-------------------------------------------------------------------
date | asset | | | | | |
-------------------------------------------------------------------
| AAPL | 0.09|-0.01|-0.079| 0.5 | G1 | 3
--------------------------------------------------------
| BA | 0.02| 0.06| 0.020| -1.1 | G2 | 5
--------------------------------------------------------
2014-01-01 | CMG | 0.03| 0.09| 0.036| 1.7 | G2 | 1
--------------------------------------------------------
| DAL |-0.02|-0.06|-0.029| -0.1 | G3 | 5
--------------------------------------------------------
| LULU |-0.03| 0.05|-0.009| 2.7 | G1 | 2
--------------------------------------------------------
"""
initial_amount = float(len(factor.index))
factor_copy = factor.copy()
factor_copy.index = factor_copy.index.rename(['date', 'asset'])
factor_copy = factor_copy[np.isfinite(factor_copy)]
merged_data = forward_returns.copy()
merged_data['factor'] = factor_copy
if groupby is not None:
if isinstance(groupby, dict):
diff = set(factor_copy.index.get_level_values(
'asset')) - set(groupby.keys())
if len(diff) > 0:
raise KeyError(
"Assets {} not in group mapping".format(
list(diff)))
ss = pd.Series(groupby)
groupby = pd.Series(index=factor_copy.index,
data=ss[factor_copy.index.get_level_values(
'asset')].values)
if groupby_labels is not None:
diff = set(groupby.values) - set(groupby_labels.keys())
if len(diff) > 0:
raise KeyError(
"groups {} not in passed group names".format(
list(diff)))
sn = pd.Series(groupby_labels)
groupby = pd.Series(index=groupby.index,
data=sn[groupby.values].values)
merged_data['group'] = groupby.astype('category')
merged_data = merged_data.dropna()
fwdret_amount = float(len(merged_data.index))
no_raise = False if max_loss == 0 else True
quantile_data = quantize_factor(
merged_data,
quantiles,
bins,
binning_by_group,
no_raise,
zero_aware
)
merged_data['factor_quantile'] = quantile_data
merged_data = merged_data.dropna()
binning_amount = float(len(merged_data.index))
tot_loss = (initial_amount - binning_amount) / initial_amount
fwdret_loss = (initial_amount - fwdret_amount) / initial_amount
bin_loss = tot_loss - fwdret_loss
print("Dropped %.1f%% entries from factor data: %.1f%% in forward "
"returns computation and %.1f%% in binning phase "
"(set max_loss=0 to see potentially suppressed Exceptions)." %
(tot_loss * 100, fwdret_loss * 100, bin_loss * 100))
if tot_loss > max_loss:
message = ("max_loss (%.1f%%) exceeded %.1f%%, consider increasing it."
% (max_loss * 100, tot_loss * 100))
raise MaxLossExceededError(message)
else:
print("max_loss is %.1f%%, not exceeded: OK!" % (max_loss * 100))
return merged_data
def get_clean_factor_and_forward_returns(factor,
prices,
groupby=None,
binning_by_group=False,
quantiles=5,
bins=None,
periods=(1, 5, 10),
filter_zscore=20,
groupby_labels=None,
max_loss=0.35,
zero_aware=False,
cumulative_returns=True):
"""
Formats the factor data, pricing data, and group mappings into a DataFrame
that contains aligned MultiIndex indices of timestamp and asset. The
returned data will be formatted to be suitable for Alphalens functions.
It is safe to skip a call to this function and still make use of Alphalens
functionalities as long as the factor data conforms to the format returned
from get_clean_factor_and_forward_returns and documented here
Parameters
----------
factor : pd.Series - MultiIndex
A MultiIndex Series indexed by timestamp (level 0) and asset
(level 1), containing the values for a single alpha factor.
::
-----------------------------------
date | asset |
-----------------------------------
| AAPL | 0.5
-----------------------
| BA | -1.1
-----------------------
2014-01-01 | CMG | 1.7
-----------------------
| DAL | -0.1
-----------------------
| LULU | 2.7
-----------------------
prices : pd.DataFrame
A wide form Pandas DataFrame indexed by timestamp with assets
in the columns.
Pricing data must span the factor analysis time period plus an
additional buffer window that is greater than the maximum number
of expected periods in the forward returns calculations.
It is important to pass the correct pricing data in depending on
what time of period your signal was generated so to avoid lookahead
bias, or delayed calculations.
'Prices' must contain at least an entry for each timestamp/asset
combination in 'factor'. This entry should reflect the buy price
for the assets and usually it is the next available price after the
factor is computed but it can also be a later price if the factor is
meant to be traded later (e.g. if the factor is computed at market
open but traded 1 hour after market open the price information should
be 1 hour after market open).
'Prices' must also contain entries for timestamps following each
timestamp/asset combination in 'factor', as many more timestamps
as the maximum value in 'periods'. The asset price after 'period'
timestamps will be considered the sell price for that asset when
computing 'period' forward returns.
::
----------------------------------------------------
| AAPL | BA | CMG | DAL | LULU |
----------------------------------------------------
Date | | | | | |
----------------------------------------------------
2014-01-01 |605.12| 24.58| 11.72| 54.43 | 37.14 |
----------------------------------------------------
2014-01-02 |604.35| 22.23| 12.21| 52.78 | 33.63 |
----------------------------------------------------
2014-01-03 |607.94| 21.68| 14.36| 53.94 | 29.37 |
----------------------------------------------------
groupby : pd.Series - MultiIndex or dict
Either A MultiIndex Series indexed by date and asset,
containing the period wise group codes for each asset, or
a dict of asset to group mappings. If a dict is passed,
it is assumed that group mappings are unchanged for the
entire time period of the passed factor data.
binning_by_group : bool
If True, compute quantile buckets separately for each group.
This is useful when the factor values range vary considerably
across gorups so that it is wise to make the binning group relative.
You should probably enable this if the factor is intended
to be analyzed for a group neutral portfolio
quantiles : int or sequence[float]
Number of equal-sized quantile buckets to use in factor bucketing.
Alternately sequence of quantiles, allowing non-equal-sized buckets
e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95]
Only one of 'quantiles' or 'bins' can be not-None
bins : int or sequence[float]
Number of equal-width (valuewise) bins to use in factor bucketing.
Alternately sequence of bin edges allowing for non-uniform bin width
e.g. [-4, -2, -0.5, 0, 10]
Chooses the buckets to be evenly spaced according to the values
themselves. Useful when the factor contains discrete values.
Only one of 'quantiles' or 'bins' can be not-None
periods : sequence[int]
periods to compute forward returns on.
filter_zscore : int or float, optional
Sets forward returns greater than X standard deviations
from the the mean to nan. Set it to 'None' to avoid filtering.
Caution: this outlier filtering incorporates lookahead bias.
groupby_labels : dict
A dictionary keyed by group code with values corresponding
to the display name for each group.
max_loss : float, optional
Maximum percentage (0.00 to 1.00) of factor data dropping allowed,
computed comparing the number of items in the input factor index and
the number of items in the output DataFrame index.
Factor data can be partially dropped due to being flawed itself
(e.g. NaNs), not having provided enough price data to compute
forward returns for all factor values, or because it is not possible
to perform binning.
Set max_loss=0 to avoid Exceptions suppression.
zero_aware : bool, optional
If True, compute quantile buckets separately for positive and negative
signal values. This is useful if your signal is centered and zero is
the separation between long and short signals, respectively.
cumulative_returns : bool, optional
If True, forward returns columns will contain cumulative returns.
Setting this to False is useful if you want to analyze how predictive
a factor is for a single forward day.
Returns
-------
merged_data : pd.DataFrame - MultiIndex
A MultiIndex Series indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- forward returns column names follow the format accepted by
pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc)
- 'date' index freq property (merged_data.index.levels[0].freq) will be
set to a trading calendar (pandas DateOffset) inferred from the input
data (see infer_trading_calendar for more details). This is currently
used only in cumulative returns computation
::
-------------------------------------------------------------------
| | 1D | 5D | 10D |factor|group|factor_quantile
-------------------------------------------------------------------
date | asset | | | | | |
-------------------------------------------------------------------
| AAPL | 0.09|-0.01|-0.079| 0.5 | G1 | 3
--------------------------------------------------------
| BA | 0.02| 0.06| 0.020| -1.1 | G2 | 5
--------------------------------------------------------
2014-01-01 | CMG | 0.03| 0.09| 0.036| 1.7 | G2 | 1
--------------------------------------------------------
| DAL |-0.02|-0.06|-0.029| -0.1 | G3 | 5
--------------------------------------------------------
| LULU |-0.03| 0.05|-0.009| 2.7 | G1 | 2
--------------------------------------------------------
See Also
--------
utils.get_clean_factor
For use when forward returns are already available.
"""
forward_returns = compute_forward_returns(
factor,
prices,
periods,
filter_zscore,
cumulative_returns,
)
factor_data = get_clean_factor(factor, forward_returns, groupby=groupby,
groupby_labels=groupby_labels,
quantiles=quantiles, bins=bins,
binning_by_group=binning_by_group,
max_loss=max_loss, zero_aware=zero_aware)
return factor_data
def rate_of_returns(ret, period):
return ((np.nansum(ret) + 1)**(1. / period)) - 1
def rate_of_return(period_ret, base_period):
"""
Convert returns to 'one_period_len' rate of returns: that is the value the
returns would have every 'one_period_len' if they had grown at a steady
rate
Parameters
----------
period_ret: pd.DataFrame
DataFrame containing returns values with column headings representing
the return period.
base_period: string
The base period length used in the conversion
It must follow pandas.Timedelta constructor format (e.g. '1 days',
'1D', '30m', '3h', '1D1h', etc)
Returns
-------
pd.DataFrame
DataFrame in same format as input but with 'one_period_len' rate of
returns values.
"""
period_len = period_ret.name
conversion_factor = (pd.Timedelta(base_period) /
pd.Timedelta(period_len))
return period_ret.add(1).pow(conversion_factor).sub(1)
def std_conversion(period_std, base_period):
"""
one_period_len standard deviation (or standard error) approximation
Parameters
----------
period_std: pd.DataFrame
DataFrame containing standard deviation or standard error values
with column headings representing the return period.
base_period: string
The base period length used in the conversion
It must follow pandas.Timedelta constructor format (e.g. '1 days',
'1D', '30m', '3h', '1D1h', etc)
Returns
-------
pd.DataFrame
DataFrame in same format as input but with one-period
standard deviation/error values.
"""
period_len = period_std.name
conversion_factor = (pd.Timedelta(period_len) /
pd.Timedelta(base_period))
return period_std / np.sqrt(conversion_factor)
def get_forward_returns_columns(columns):
"""
返回远期收益的序列
"""
pattern = re.compile(r"^(return\(.+\))$", re.IGNORECASE)
valid_columns = [(pattern.match(col) is not None) for col in columns]
return columns[valid_columns]
def timedelta_to_string(timedelta):
"""
Utility that converts a pandas.Timedelta to a string representation
compatible with pandas.Timedelta constructor format
Parameters
----------
timedelta: pd.Timedelta
Returns
-------
string
string representation of 'timedelta'
"""
c = timedelta.components
format = ''
if c.days != 0:
format += '%dD' % c.days
if c.hours > 0:
format += '%dh' % c.hours
if c.minutes > 0:
format += '%dm' % c.minutes
if c.seconds > 0:
format += '%ds' % c.seconds
if c.milliseconds > 0:
format += '%dms' % c.milliseconds
if c.microseconds > 0:
format += '%dus' % c.microseconds
if c.nanoseconds > 0:
format += '%dns' % c.nanoseconds
return format
def timedelta_strings_to_integers(sequence):
"""
Converts pandas string representations of timedeltas into integers of days.
Parameters
----------
sequence : iterable
List or array of timedelta string representations, e.g. ['1D', '5D'].
Returns
-------
sequence : list
Integer days corresponding to the input sequence, e.g. [1, 5].
"""
return list(map(lambda x: pd.Timedelta(x).days, sequence))
def add_custom_calendar_timedelta(input, timedelta, freq):
"""
Add timedelta to 'input' taking into consideration custom frequency, which
is used to deal with custom calendars, such as a trading calendar
Parameters
----------
input : pd.DatetimeIndex or pd.Timestamp
timedelta : pd.Timedelta
freq : pd.DataOffset (CustomBusinessDay, Day or BusinessDay)
Returns
-------
pd.DatetimeIndex or pd.Timestamp
input + timedelta
"""
if not isinstance(freq, (Day, BusinessDay, CustomBusinessDay)):
raise ValueError("freq must be Day, BDay or CustomBusinessDay")
days = timedelta.components.days
offset = timedelta - pd.Timedelta(days=days)
return input + freq * days + offset
def diff_custom_calendar_timedeltas(start, end, freq):
"""
Compute the difference between two pd.Timedelta taking into consideration
custom frequency, which is used to deal with custom calendars, such as a
trading calendar
Parameters
----------
start : pd.Timestamp
end : pd.Timestamp
freq : CustomBusinessDay (see infer_trading_calendar)
freq : pd.DataOffset (CustomBusinessDay, Day or BDay)
Returns
-------
pd.Timedelta
end - start
"""
if not isinstance(freq, (Day, BusinessDay, CustomBusinessDay)):
raise ValueError("freq must be Day, BusinessDay or CustomBusinessDay")
weekmask = getattr(freq, 'weekmask', None)
holidays = getattr(freq, 'holidays', None)
if weekmask is None and holidays is None:
if isinstance(freq, Day):
weekmask = 'Mon Tue Wed Thu Fri Sat Sun'
holidays = []
elif isinstance(freq, BusinessDay):
weekmask = 'Mon Tue Wed Thu Fri'
holidays = []
if weekmask is not None and holidays is not None:
# we prefer this method as it is faster
actual_days = np.busday_count(np.array(start).astype('datetime64[D]'),
np.array(end).astype('datetime64[D]'),
weekmask, holidays)
else:
# default, it is slow
actual_days = pd.date_range(start, end, freq=freq).shape[0] - 1
if not freq.onOffset(start):
actual_days -= 1
timediff = end - start
delta_days = timediff.components.days - actual_days
return timediff - pd.Timedelta(days=delta_days)
def convert_to_forward_returns_columns(period):
try:
return 'period_{:d}'.format(period)
except ValueError:
return period
def ignore_warning(message='', category=Warning, module='', lineno=0, append=False):
"""过滤 warnings"""
def decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=message, category=category,
module=module, lineno=lineno, append=append)
return func(*args, **kwargs)
return func_wrapper
return decorator
def ensure_tuple(x):
if isinstance(x, six.string_types) or not isinstance(x, Iterable):
return (x,)
else:
return tuple(x)
def Indicators(value):
columns = ['总收益', '年化收益', '波动率', '夏普比', '最大回撤', '卡玛比率', '日胜率', '盈亏比']
def MaxBack(value):
drawback = []
for i, v in enumerate(value):
drawback.append(max(1-value[i:]/v))
MaxBack = max(drawback)
return MaxBack
value = [i/value[0] for i in value]
AllRtn = round(value[-1]*100-100, 2)
AulRtn = round(pow(value[-1], 250/len(value))*100-100, 2)
value = pd.Series(value)
Rtns = value.pct_change(1).dropna()
Volity = round(sqrt(Rtns.var()*250)*100, 2)
SpRatio = round((AulRtn-4)/Volity, 2)
MaxBack = round(MaxBack(value)*100, 2)
CmRatio = round(AulRtn/MaxBack, 2)
R1 = [i for i in Rtns.values if i > 0]
R2 = [i for i in Rtns.values if i < 0]
WinRate = round(len(R1)/(len(R1)+len(R2))*100, 3)
BidRatio = round(-mean(R1)/mean(R2), 3)
data = [AllRtn, AulRtn, Volity, SpRatio,
MaxBack, CmRatio, WinRate, BidRatio]
result = pd.Series(index=columns, data=data)
return result
| 39.383985 | 85 | 0.575676 | from numpy import sqrt, mean
from collections import Iterable
from functools import wraps
import six
import pandas as pd
import numpy as np
import re
import warnings
from IPython.display import display
from pandas.tseries.offsets import CustomBusinessDay, Day, BusinessDay
from scipy.stats import mode
class NonMatchingTimezoneError(Exception):
pass
class MaxLossExceededError(Exception):
pass
def rethrow(exception, additional_message):
e = exception
m = additional_message
if not e.args:
e.args = (m,)
else:
e.args = (e.args[0] + m,) + e.args[1:]
raise e
def non_unique_bin_edges_error(func):
message = """
An error occurred while computing bins/quantiles on the input provided.
This usually happens when the input contains too many identical
values and they span more than one quantile. The quantiles are choosen
to have the same number of records each, but the same value cannot span
multiple quantiles. Possible workarounds are:
1 - Decrease the number of quantiles
2 - Specify a custom quantiles range, e.g. [0, .50, .75, 1.] to get unequal
number of records per quantile
3 - Use 'bins' option instead of 'quantiles', 'bins' chooses the
buckets to be evenly spaced according to the values themselves, while
'quantiles' forces the buckets to have the same number of records.
4 - for factors with discrete values use the 'bins' option with custom
ranges and create a range for each discrete value
Please see utils.get_clean_factor_and_forward_returns documentation for
full documentation of 'bins' and 'quantiles' options.
"""
def dec(*args, **kwargs):
try:
return func(*args, **kwargs)
except ValueError as e:
if 'Bin edges must be unique' in str(e):
rethrow(e, message)
raise
return dec
@non_unique_bin_edges_error
def quantize_factor(factor_data,
quantiles=5,
bins=None,
by_group=False,
no_raise=False,
zero_aware=False):
if not ((quantiles is not None and bins is None) or
(quantiles is None and bins is not None)):
raise ValueError('Either quantiles or bins should be provided')
if zero_aware and not (isinstance(quantiles, int)
or isinstance(bins, int)):
msg = ("zero_aware should only be True when quantiles or bins is an"
" integer")
raise ValueError(msg)
def quantile_calc(x, _quantiles, _bins, _zero_aware, _no_raise):
try:
if _quantiles is not None and _bins is None and not _zero_aware:
return pd.qcut(x, _quantiles, labels=False) + 1
elif _quantiles is not None and _bins is None and _zero_aware:
pos_quantiles = pd.qcut(x[x >= 0], _quantiles // 2,
labels=False) + _quantiles // 2 + 1
neg_quantiles = pd.qcut(x[x < 0], _quantiles // 2,
labels=False) + 1
return pd.concat([pos_quantiles, neg_quantiles]).sort_index()
elif _bins is not None and _quantiles is None and not _zero_aware:
return pd.cut(x, _bins, labels=False) + 1
elif _bins is not None and _quantiles is None and _zero_aware:
pos_bins = pd.cut(x[x >= 0], _bins // 2,
labels=False) + _bins // 2 + 1
neg_bins = pd.cut(x[x < 0], _bins // 2,
labels=False) + 1
return pd.concat([pos_bins, neg_bins]).sort_index()
except Exception as e:
if _no_raise:
return pd.Series(index=x.index)
raise e
grouper = [factor_data.index.get_level_values('date')]
if by_group:
grouper.append('group')
factor_quantile = factor_data.groupby(grouper)['factor'] \
.apply(quantile_calc, quantiles, bins, zero_aware, no_raise)
factor_quantile.name = 'factor_quantile'
return factor_quantile.dropna()
def infer_trading_calendar(factor_idx, prices_idx):
full_idx = factor_idx.union(prices_idx)
traded_weekdays = []
holidays = []
days_of_the_week = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
for day, day_str in enumerate(days_of_the_week):
weekday_mask = (full_idx.dayofweek == day)
if not weekday_mask.any():
continue
traded_weekdays.append(day_str)
used_weekdays = full_idx[weekday_mask].normalize()
all_weekdays = pd.date_range(full_idx.min(), full_idx.max(),
freq=CustomBusinessDay(weekmask=day_str)
).normalize()
_holidays = all_weekdays.difference(used_weekdays)
_holidays = [timestamp.date() for timestamp in _holidays]
holidays.extend(_holidays)
traded_weekdays = ' '.join(traded_weekdays)
return CustomBusinessDay(weekmask=traded_weekdays, holidays=holidays)
def compute_forward_returns(factor,
prices,
periods=(1, 5, 10),
filter_zscore=None,
cumulative_returns=True):
factor_dateindex = factor.index.levels[0]
if factor_dateindex.tz != prices.index.tz:
raise NonMatchingTimezoneError("The timezone of 'factor' is not the "
"same as the timezone of 'prices'. See "
"the pandas methods tz_localize and "
"tz_convert.")
freq = infer_trading_calendar(factor_dateindex, prices.index)
factor_dateindex = factor_dateindex.intersection(prices.index)
if len(factor_dateindex) == 0:
raise ValueError("Factor and prices indices don't match: make sure "
"they have the same convention in terms of datetimes "
"and symbol-names")
# chop prices down to only the assets we care about (= unique assets in
# `factor`). we could modify `prices` in place, but that might confuse
# the caller.
prices = prices.filter(items=factor.index.levels[1])
raw_values_dict = {}
column_list = []
for period in sorted(periods):
if cumulative_returns:
returns = prices.pct_change(period)
else:
returns = prices.pct_change()
forward_returns = \
returns.shift(-period).reindex(factor_dateindex)
if filter_zscore is not None:
mask = abs(
forward_returns - forward_returns.mean()
) > (filter_zscore * forward_returns.std())
forward_returns[mask] = np.nan
#
# Find the period length, which will be the column name. We'll test
days_diffs = []
for i in range(30):
if i >= len(forward_returns.index):
break
p_idx = prices.index.get_loc(forward_returns.index[i])
if p_idx is None or p_idx < 0 or (
p_idx + period) >= len(prices.index):
continue
start = prices.index[p_idx]
end = prices.index[p_idx + period]
period_len = diff_custom_calendar_timedeltas(start, end, freq)
days_diffs.append(period_len.components.days)
delta_days = period_len.components.days - mode(days_diffs).mode[0]
period_len -= pd.Timedelta(days=delta_days)
label = timedelta_to_string(period_len)
column_list.append(label)
raw_values_dict[label] = np.concatenate(forward_returns.values)
df = pd.DataFrame.from_dict(raw_values_dict)
df.set_index(
pd.MultiIndex.from_product(
[factor_dateindex, prices.columns],
names=['date', 'asset']
),
inplace=True
)
df = df.reindex(factor.index)
df = df[column_list]
df.index.levels[0].freq = freq
df.index.set_names(['date', 'asset'], inplace=True)
return df
def backshift_returns_series(series, N):
ix = series.index
dates, sids = ix.levels
date_labels, sid_labels = map(np.array, ix.labels)
new_dates = dates[:-N]
cutoff = date_labels.searchsorted(N)
new_date_labels = date_labels[cutoff:] - N
new_sid_labels = sid_labels[cutoff:]
new_values = series.values[cutoff:]
assert new_date_labels[0] == 0
new_index = pd.MultiIndex(
levels=[new_dates, sids],
labels=[new_date_labels, new_sid_labels],
sortorder=1,
names=ix.names,
)
return pd.Series(data=new_values, index=new_index)
def demean_forward_returns(factor_data, grouper=None):
factor_data = factor_data.copy()
if not grouper:
grouper = factor_data.index.get_level_values('date')
cols = get_forward_returns_columns(factor_data.columns)
factor_data[cols] = factor_data.groupby(
grouper, as_index=False
)[cols.append(pd.Index(['weights']))].apply(
lambda x: x[cols].subtract(
np.average(x[cols], axis=0,
weights=x['weights'].fillna(0.0).values),
axis=1
)
)
return factor_data
def print_table(table, name=None, fmt=None):
if isinstance(table, pd.Series):
table = pd.DataFrame(table)
if isinstance(table, pd.DataFrame):
table.columns.name = name
prev_option = pd.get_option('display.float_format')
if fmt is not None:
pd.set_option('display.float_format', lambda x: fmt.format(x))
display(table)
if fmt is not None:
pd.set_option('display.float_format', prev_option)
def get_clean_factor(factor,
forward_returns,
groupby=None,
binning_by_group=False,
quantiles=5,
bins=None,
groupby_labels=None,
max_loss=0.35,
zero_aware=False):
initial_amount = float(len(factor.index))
factor_copy = factor.copy()
factor_copy.index = factor_copy.index.rename(['date', 'asset'])
factor_copy = factor_copy[np.isfinite(factor_copy)]
merged_data = forward_returns.copy()
merged_data['factor'] = factor_copy
if groupby is not None:
if isinstance(groupby, dict):
diff = set(factor_copy.index.get_level_values(
'asset')) - set(groupby.keys())
if len(diff) > 0:
raise KeyError(
"Assets {} not in group mapping".format(
list(diff)))
ss = pd.Series(groupby)
groupby = pd.Series(index=factor_copy.index,
data=ss[factor_copy.index.get_level_values(
'asset')].values)
if groupby_labels is not None:
diff = set(groupby.values) - set(groupby_labels.keys())
if len(diff) > 0:
raise KeyError(
"groups {} not in passed group names".format(
list(diff)))
sn = pd.Series(groupby_labels)
groupby = pd.Series(index=groupby.index,
data=sn[groupby.values].values)
merged_data['group'] = groupby.astype('category')
merged_data = merged_data.dropna()
fwdret_amount = float(len(merged_data.index))
no_raise = False if max_loss == 0 else True
quantile_data = quantize_factor(
merged_data,
quantiles,
bins,
binning_by_group,
no_raise,
zero_aware
)
merged_data['factor_quantile'] = quantile_data
merged_data = merged_data.dropna()
binning_amount = float(len(merged_data.index))
tot_loss = (initial_amount - binning_amount) / initial_amount
fwdret_loss = (initial_amount - fwdret_amount) / initial_amount
bin_loss = tot_loss - fwdret_loss
print("Dropped %.1f%% entries from factor data: %.1f%% in forward "
"returns computation and %.1f%% in binning phase "
"(set max_loss=0 to see potentially suppressed Exceptions)." %
(tot_loss * 100, fwdret_loss * 100, bin_loss * 100))
if tot_loss > max_loss:
message = ("max_loss (%.1f%%) exceeded %.1f%%, consider increasing it."
% (max_loss * 100, tot_loss * 100))
raise MaxLossExceededError(message)
else:
print("max_loss is %.1f%%, not exceeded: OK!" % (max_loss * 100))
return merged_data
def get_clean_factor_and_forward_returns(factor,
prices,
groupby=None,
binning_by_group=False,
quantiles=5,
bins=None,
periods=(1, 5, 10),
filter_zscore=20,
groupby_labels=None,
max_loss=0.35,
zero_aware=False,
cumulative_returns=True):
forward_returns = compute_forward_returns(
factor,
prices,
periods,
filter_zscore,
cumulative_returns,
)
factor_data = get_clean_factor(factor, forward_returns, groupby=groupby,
groupby_labels=groupby_labels,
quantiles=quantiles, bins=bins,
binning_by_group=binning_by_group,
max_loss=max_loss, zero_aware=zero_aware)
return factor_data
def rate_of_returns(ret, period):
return ((np.nansum(ret) + 1)**(1. / period)) - 1
def rate_of_return(period_ret, base_period):
period_len = period_ret.name
conversion_factor = (pd.Timedelta(base_period) /
pd.Timedelta(period_len))
return period_ret.add(1).pow(conversion_factor).sub(1)
def std_conversion(period_std, base_period):
period_len = period_std.name
conversion_factor = (pd.Timedelta(period_len) /
pd.Timedelta(base_period))
return period_std / np.sqrt(conversion_factor)
def get_forward_returns_columns(columns):
pattern = re.compile(r"^(return\(.+\))$", re.IGNORECASE)
valid_columns = [(pattern.match(col) is not None) for col in columns]
return columns[valid_columns]
def timedelta_to_string(timedelta):
c = timedelta.components
format = ''
if c.days != 0:
format += '%dD' % c.days
if c.hours > 0:
format += '%dh' % c.hours
if c.minutes > 0:
format += '%dm' % c.minutes
if c.seconds > 0:
format += '%ds' % c.seconds
if c.milliseconds > 0:
format += '%dms' % c.milliseconds
if c.microseconds > 0:
format += '%dus' % c.microseconds
if c.nanoseconds > 0:
format += '%dns' % c.nanoseconds
return format
def timedelta_strings_to_integers(sequence):
return list(map(lambda x: pd.Timedelta(x).days, sequence))
def add_custom_calendar_timedelta(input, timedelta, freq):
if not isinstance(freq, (Day, BusinessDay, CustomBusinessDay)):
raise ValueError("freq must be Day, BDay or CustomBusinessDay")
days = timedelta.components.days
offset = timedelta - pd.Timedelta(days=days)
return input + freq * days + offset
def diff_custom_calendar_timedeltas(start, end, freq):
if not isinstance(freq, (Day, BusinessDay, CustomBusinessDay)):
raise ValueError("freq must be Day, BusinessDay or CustomBusinessDay")
weekmask = getattr(freq, 'weekmask', None)
holidays = getattr(freq, 'holidays', None)
if weekmask is None and holidays is None:
if isinstance(freq, Day):
weekmask = 'Mon Tue Wed Thu Fri Sat Sun'
holidays = []
elif isinstance(freq, BusinessDay):
weekmask = 'Mon Tue Wed Thu Fri'
holidays = []
if weekmask is not None and holidays is not None:
actual_days = np.busday_count(np.array(start).astype('datetime64[D]'),
np.array(end).astype('datetime64[D]'),
weekmask, holidays)
else:
actual_days = pd.date_range(start, end, freq=freq).shape[0] - 1
if not freq.onOffset(start):
actual_days -= 1
timediff = end - start
delta_days = timediff.components.days - actual_days
return timediff - pd.Timedelta(days=delta_days)
def convert_to_forward_returns_columns(period):
try:
return 'period_{:d}'.format(period)
except ValueError:
return period
def ignore_warning(message='', category=Warning, module='', lineno=0, append=False):
def decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=message, category=category,
module=module, lineno=lineno, append=append)
return func(*args, **kwargs)
return func_wrapper
return decorator
def ensure_tuple(x):
if isinstance(x, six.string_types) or not isinstance(x, Iterable):
return (x,)
else:
return tuple(x)
def Indicators(value):
columns = ['总收益', '年化收益', '波动率', '夏普比', '最大回撤', '卡玛比率', '日胜率', '盈亏比']
def MaxBack(value):
drawback = []
for i, v in enumerate(value):
drawback.append(max(1-value[i:]/v))
MaxBack = max(drawback)
return MaxBack
value = [i/value[0] for i in value]
AllRtn = round(value[-1]*100-100, 2)
AulRtn = round(pow(value[-1], 250/len(value))*100-100, 2)
value = pd.Series(value)
Rtns = value.pct_change(1).dropna()
Volity = round(sqrt(Rtns.var()*250)*100, 2)
SpRatio = round((AulRtn-4)/Volity, 2)
MaxBack = round(MaxBack(value)*100, 2)
CmRatio = round(AulRtn/MaxBack, 2)
R1 = [i for i in Rtns.values if i > 0]
R2 = [i for i in Rtns.values if i < 0]
WinRate = round(len(R1)/(len(R1)+len(R2))*100, 3)
BidRatio = round(-mean(R1)/mean(R2), 3)
data = [AllRtn, AulRtn, Volity, SpRatio,
MaxBack, CmRatio, WinRate, BidRatio]
result = pd.Series(index=columns, data=data)
return result
| true | true |
f7f79e9cc8edc20a07d9051c0fa5faebcbee7370 | 1,091 | py | Python | tests/test_data_factory.py | ExesiosPB/libm | 09c2638d895a4ba69e0d7f4f0e353f27d4b7911f | [
"MIT"
] | null | null | null | tests/test_data_factory.py | ExesiosPB/libm | 09c2638d895a4ba69e0d7f4f0e353f27d4b7911f | [
"MIT"
] | null | null | null | tests/test_data_factory.py | ExesiosPB/libm | 09c2638d895a4ba69e0d7f4f0e353f27d4b7911f | [
"MIT"
] | null | null | null | import unittest
import pandas as pd
from scripts import FilePaths
from scripts import data_factory as factory
class TestDataFactory(unittest.TestCase):
def setUp(self):
self.__df = pd.read_pickle(FilePaths.us_patents_random_100_pickle_name)
self.__df = self.__df.reset_index()
def test_reads_xls(self):
df = factory.get('tests/data/USPTO-random-100.xls')
self.assertListEqual(list(self.__df['abstract']), list(df['abstract']))
def test_reads_xlsx(self):
df = factory.get('tests/data/USPTO-random-100.xlsx')
self.assertListEqual(list(self.__df['abstract']), list(df['abstract']))
@unittest.skip('Unicode char fails under windows; see task #172 bug')
def test_reads_csv(self):
df = factory.get('tests/data/USPTO-random-100.csv')
self.assertListEqual(list(self.__df['abstract']), list(df['abstract']))
def test_reads_pickles(self):
df = factory.get('tests/data/USPTO-random-100.pkl.bz2')
self.assertEquals(len(df['abstract']), 100)
if __name__ == '__main__':
unittest.main()
| 31.171429 | 79 | 0.688359 | import unittest
import pandas as pd
from scripts import FilePaths
from scripts import data_factory as factory
class TestDataFactory(unittest.TestCase):
def setUp(self):
self.__df = pd.read_pickle(FilePaths.us_patents_random_100_pickle_name)
self.__df = self.__df.reset_index()
def test_reads_xls(self):
df = factory.get('tests/data/USPTO-random-100.xls')
self.assertListEqual(list(self.__df['abstract']), list(df['abstract']))
def test_reads_xlsx(self):
df = factory.get('tests/data/USPTO-random-100.xlsx')
self.assertListEqual(list(self.__df['abstract']), list(df['abstract']))
@unittest.skip('Unicode char fails under windows; see task #172 bug')
def test_reads_csv(self):
df = factory.get('tests/data/USPTO-random-100.csv')
self.assertListEqual(list(self.__df['abstract']), list(df['abstract']))
def test_reads_pickles(self):
df = factory.get('tests/data/USPTO-random-100.pkl.bz2')
self.assertEquals(len(df['abstract']), 100)
if __name__ == '__main__':
unittest.main()
| true | true |
f7f79f95cbd38e510a8f764251792ea664f48379 | 12,719 | py | Python | cubedash/summary/_model.py | andrewdhicks/datacube-explorer | e372da5b697e57113deb239edc52411f5ddad551 | [
"Apache-2.0"
] | null | null | null | cubedash/summary/_model.py | andrewdhicks/datacube-explorer | e372da5b697e57113deb239edc52411f5ddad551 | [
"Apache-2.0"
] | null | null | null | cubedash/summary/_model.py | andrewdhicks/datacube-explorer | e372da5b697e57113deb239edc52411f5ddad551 | [
"Apache-2.0"
] | null | null | null | import warnings
from collections import Counter
from dataclasses import dataclass
from datetime import datetime, date
from typing import Iterable, Optional, Set, Tuple, Union, List
import shapely
import shapely.ops
import structlog
from shapely.geometry import MultiPolygon
from shapely.geometry.base import BaseGeometry
from datacube.model import Dataset, Range
from datacube.utils.geometry import Geometry
_LOG = structlog.get_logger()
@dataclass
class TimePeriodOverview:
# These four elements make up a pseudo-id of the time period we've summarised.
#
# -> None means "all"
product_name: str
year: Optional[int]
month: Optional[int]
day: Optional[int]
dataset_count: int
timeline_dataset_counts: Counter
region_dataset_counts: Counter
timeline_period: str
time_range: Range
footprint_geometry: Union[shapely.geometry.MultiPolygon, shapely.geometry.Polygon]
footprint_crs: str
footprint_count: int
# The most newly created dataset
newest_dataset_creation_time: datetime
# List of CRSes that these datasets are in
crses: Set[str]
size_bytes: int
# What version of our product table this was based on (the last_refresh_time on ProductSummary)
product_refresh_time: datetime
# When this summary was generated. Set on the server.
summary_gen_time: datetime = None
def __str__(self):
return (
f"{self.label} "
f"({self.dataset_count} dataset{'s' if self.dataset_count > 1 else ''})"
)
@property
def label(self):
return " ".join([(str(p) if p else "all") for p in self.period_tuple])
@property
def period_tuple(self):
"""
This is the pseudo-id of the product time period we've summarised.
Any of them can be None to represent 'all'
"""
return self.product_name, self.year, self.month, self.day
@period_tuple.setter
def period_tuple(self, v: Tuple[str, Optional[int], Optional[int], Optional[int]]):
self.product_name, self.year, self.month, self.day = v
def as_flat_period(self):
"""
How we "flatten" the time-slice for storage in DB columns. Must remain stable!
A "period type" enum, and a single date.
"""
return self.flat_period_representation(self.year, self.month, self.day)
@classmethod
def flat_period_representation(
cls, year: Optional[int], month: Optional[int], day: Optional[int]
):
period = "all"
if year:
period = "year"
if month:
period = "month"
if day:
period = "day"
return period, date(year or 1900, month or 1, day or 1)
@classmethod
def from_flat_period_representation(self, period_type: str, start_day: date):
year = None
month = None
day = None
if period_type != "all":
year = start_day.year
if period_type != "year":
month = start_day.month
if period_type != "month":
day = start_day.day
return year, month, day
@classmethod
def add_periods(
cls,
periods: Iterable["TimePeriodOverview"],
# This is in CRS units. Albers, so 1KM.
# Lower value will have a more accurate footprint and much larger page load times.
footprint_tolerance=1000.0,
):
periods = [p for p in periods if p is not None and p.dataset_count > 0]
period = "day"
crses = set(p.footprint_crs for p in periods)
if not crses:
footprint_crs = None
elif len(crses) == 1:
[footprint_crs] = crses
else:
# All generated summaries should be the same, so this can only occur if someone's changes
# output crs setting on an existing cubedash instance.
raise NotImplementedError("Time summaries use inconsistent CRSes.")
timeline_counter = Counter()
for p in periods:
timeline_counter.update(p.timeline_dataset_counts)
period = p.timeline_period
timeline_counter, period = cls._group_counter_if_needed(
timeline_counter, period
)
# The period elements that are the same across all of them.
# (it will be the period of the result)
common_time_period = list(periods[0].period_tuple) if periods else ([None] * 4)
region_counter = Counter()
for time_period in periods:
region_counter.update(time_period.region_dataset_counts)
# Attempt to fix broken geometries.
# -> The 'high_tide_comp_20p' tests give an example of this: geometry is valid when
# created, but after serialisation+deserialisation become invalid due to float
# rounding.
if (
time_period.footprint_geometry
and not time_period.footprint_geometry.is_valid
):
_LOG.info("invalid_stored_geometry", summary=time_period.period_tuple)
time_period.footprint_geometry = time_period.footprint_geometry.buffer(
0
)
# We're looking for the time period common to them all.
# Strike out any elements that differ between our periods.
this_period = time_period.period_tuple
for i, elem in enumerate(common_time_period):
if elem is not None and (elem != this_period[i]):
# All following should be blank too, since this is a hierarchy.
_erase_elements_from(common_time_period, i)
break
with_valid_geometries = [
p
for p in periods
if p.footprint_count
and p.footprint_geometry
and p.footprint_geometry.is_valid
and not p.footprint_geometry.is_empty
]
geometry_union = _create_unified_footprint(
with_valid_geometries, footprint_tolerance
)
total_datasets = sum(p.dataset_count for p in periods)
# Non-null properties here are the ones that are the same across all inputs.
product_name, year, month, day = common_time_period
return TimePeriodOverview(
product_name=product_name,
year=year,
month=month,
day=day,
dataset_count=total_datasets,
timeline_dataset_counts=timeline_counter,
timeline_period=period,
region_dataset_counts=region_counter,
time_range=Range(
min(r.time_range.begin for r in periods) if periods else None,
max(r.time_range.end for r in periods) if periods else None,
),
footprint_geometry=geometry_union,
footprint_crs=footprint_crs,
footprint_count=sum(p.footprint_count for p in with_valid_geometries),
newest_dataset_creation_time=max(
(
p.newest_dataset_creation_time
for p in periods
if p.newest_dataset_creation_time is not None
),
default=None,
),
crses=set.union(*(o.crses for o in periods)) if periods else set(),
# Why choose the max version? Because we assume older ones didn't need to be replaced,
# so the most recent refresh time is the version that we are current with.
product_refresh_time=max(
(
p.product_refresh_time
for p in periods
if p.product_refresh_time is not None
),
default=None,
),
summary_gen_time=min(
(p.summary_gen_time for p in periods if p.summary_gen_time is not None),
default=None,
),
size_bytes=sum(p.size_bytes for p in periods if p.size_bytes is not None),
)
@property
def footprint_wgs84(self) -> Optional[MultiPolygon]:
if not self.footprint_geometry:
return None
if not self.footprint_crs:
warnings.warn(f"Geometry without a crs for {self}")
return None
return (
Geometry(self.footprint_geometry, crs=self.footprint_crs)
.to_crs("EPSG:4326", wrapdateline=True)
.geom
)
@staticmethod
def _group_counter_if_needed(counter, period):
if len(counter) > 366:
if period == "day":
counter = Counter(
datetime(date.year, date.month, 1).date()
for date in counter.elements()
)
period = "month"
elif period == "month":
counter = Counter(
datetime(date.year, 1, 1).date() for date in counter.elements()
)
period = "year"
return counter, period
@property
def footprint_srid(self):
if self.footprint_crs is None:
return None
epsg = self.footprint_crs.lower()
if not epsg.startswith("epsg:"):
_LOG.warn("unsupported.to_srid", crs=self.footprint_crs)
return None
return int(epsg.split(":")[1])
def _has_shape(datasets: Tuple[Dataset, Tuple[BaseGeometry, bool]]) -> bool:
dataset, (shape, was_valid) = datasets
return shape is not None
def _erase_elements_from(items: List, start_i: int):
"""
Erase from the given 'i' onward
>>> _erase_elements_from([1, 2, 3], 0)
[None, None, None]
>>> _erase_elements_from([1, 2, 3], 1)
[1, None, None]
>>> _erase_elements_from([1, 2, 3], 2)
[1, 2, None]
>>> _erase_elements_from([1, 2, 3], 3)
[1, 2, 3]
"""
items[start_i:] = [None] * (len(items) - start_i)
# Return the list just for convenience in doctest. It's actually mutable.
return items
def _create_unified_footprint(
with_valid_geometries: List["TimePeriodOverview"], footprint_tolerance: float
):
"""
Union the given time period's footprints, trying to fix any invalid geometries.
"""
if not with_valid_geometries:
return None
try:
geometry_union = shapely.ops.unary_union(
[p.footprint_geometry for p in with_valid_geometries]
)
except ValueError:
# Attempt 2 at union: Exaggerate the overlap *slightly* to
# avoid non-noded intersection.
# TODO: does shapely have a snap-to-grid?
try:
_LOG.warn("summary.footprint.invalid_union", exc_info=True)
geometry_union = shapely.ops.unary_union(
[p.footprint_geometry.buffer(0.001) for p in with_valid_geometries]
)
except ValueError:
_LOG.warn("summary.footprint.invalid_buffered_union", exc_info=True)
# Attempt 3 at union: Recursive filter bad polygons first
polygonlist = _polygon_chain(with_valid_geometries)
filtered_geom = _filter_geom(polygonlist)
geometry_union = shapely.ops.unary_union(filtered_geom)
if footprint_tolerance is not None:
geometry_union = geometry_union.simplify(footprint_tolerance)
return geometry_union
def _polygon_chain(valid_geometries: Iterable[TimePeriodOverview]) -> list:
"""Chain all the given [Mutli]Polygons into a single list."""
polygonlist = []
for poly in valid_geometries:
if type(poly.footprint_geometry) is MultiPolygon:
for p in list(poly.footprint_geometry):
polygonlist.append(p)
else:
polygonlist.append(poly.footprint_geometry)
return polygonlist
def _filter_geom(geomlist: List[BaseGeometry], start=0) -> List[BaseGeometry]:
"""
Recursive filtering of un-unionable polygons. Input list is modified in-place.
Exhaustively searches for a run of polygons that cause a union error
(eg. "non-noded intersection"), and cuts out the first one that it finds.
"""
# Pass through empty lists
if len(geomlist) == 0:
return geomlist
# Process non-empty lists
if start == len(geomlist):
geomlist.pop()
return geomlist
else:
for i in range(len(geomlist) - start):
try:
shapely.ops.unary_union(geomlist[0 : i + start])
except ValueError:
del geomlist[i + start]
start = start + i
break
if i == len(geomlist) - 1 - start:
return geomlist
_filter_geom(geomlist, start)
return geomlist
| 34.375676 | 101 | 0.610425 | import warnings
from collections import Counter
from dataclasses import dataclass
from datetime import datetime, date
from typing import Iterable, Optional, Set, Tuple, Union, List
import shapely
import shapely.ops
import structlog
from shapely.geometry import MultiPolygon
from shapely.geometry.base import BaseGeometry
from datacube.model import Dataset, Range
from datacube.utils.geometry import Geometry
_LOG = structlog.get_logger()
@dataclass
class TimePeriodOverview:
#
# -> None means "all"
product_name: str
year: Optional[int]
month: Optional[int]
day: Optional[int]
dataset_count: int
timeline_dataset_counts: Counter
region_dataset_counts: Counter
timeline_period: str
time_range: Range
footprint_geometry: Union[shapely.geometry.MultiPolygon, shapely.geometry.Polygon]
footprint_crs: str
footprint_count: int
# The most newly created dataset
newest_dataset_creation_time: datetime
# List of CRSes that these datasets are in
crses: Set[str]
size_bytes: int
# What version of our product table this was based on (the last_refresh_time on ProductSummary)
product_refresh_time: datetime
# When this summary was generated. Set on the server.
summary_gen_time: datetime = None
def __str__(self):
return (
f"{self.label} "
f"({self.dataset_count} dataset{'s' if self.dataset_count > 1 else ''})"
)
@property
def label(self):
return " ".join([(str(p) if p else "all") for p in self.period_tuple])
@property
def period_tuple(self):
return self.product_name, self.year, self.month, self.day
@period_tuple.setter
def period_tuple(self, v: Tuple[str, Optional[int], Optional[int], Optional[int]]):
self.product_name, self.year, self.month, self.day = v
def as_flat_period(self):
return self.flat_period_representation(self.year, self.month, self.day)
@classmethod
def flat_period_representation(
cls, year: Optional[int], month: Optional[int], day: Optional[int]
):
period = "all"
if year:
period = "year"
if month:
period = "month"
if day:
period = "day"
return period, date(year or 1900, month or 1, day or 1)
@classmethod
def from_flat_period_representation(self, period_type: str, start_day: date):
year = None
month = None
day = None
if period_type != "all":
year = start_day.year
if period_type != "year":
month = start_day.month
if period_type != "month":
day = start_day.day
return year, month, day
@classmethod
def add_periods(
cls,
periods: Iterable["TimePeriodOverview"],
# This is in CRS units. Albers, so 1KM.
# Lower value will have a more accurate footprint and much larger page load times.
footprint_tolerance=1000.0,
):
periods = [p for p in periods if p is not None and p.dataset_count > 0]
period = "day"
crses = set(p.footprint_crs for p in periods)
if not crses:
footprint_crs = None
elif len(crses) == 1:
[footprint_crs] = crses
else:
# All generated summaries should be the same, so this can only occur if someone's changes
raise NotImplementedError("Time summaries use inconsistent CRSes.")
timeline_counter = Counter()
for p in periods:
timeline_counter.update(p.timeline_dataset_counts)
period = p.timeline_period
timeline_counter, period = cls._group_counter_if_needed(
timeline_counter, period
)
common_time_period = list(periods[0].period_tuple) if periods else ([None] * 4)
region_counter = Counter()
for time_period in periods:
region_counter.update(time_period.region_dataset_counts)
if (
time_period.footprint_geometry
and not time_period.footprint_geometry.is_valid
):
_LOG.info("invalid_stored_geometry", summary=time_period.period_tuple)
time_period.footprint_geometry = time_period.footprint_geometry.buffer(
0
)
# Strike out any elements that differ between our periods.
this_period = time_period.period_tuple
for i, elem in enumerate(common_time_period):
if elem is not None and (elem != this_period[i]):
# All following should be blank too, since this is a hierarchy.
_erase_elements_from(common_time_period, i)
break
with_valid_geometries = [
p
for p in periods
if p.footprint_count
and p.footprint_geometry
and p.footprint_geometry.is_valid
and not p.footprint_geometry.is_empty
]
geometry_union = _create_unified_footprint(
with_valid_geometries, footprint_tolerance
)
total_datasets = sum(p.dataset_count for p in periods)
# Non-null properties here are the ones that are the same across all inputs.
product_name, year, month, day = common_time_period
return TimePeriodOverview(
product_name=product_name,
year=year,
month=month,
day=day,
dataset_count=total_datasets,
timeline_dataset_counts=timeline_counter,
timeline_period=period,
region_dataset_counts=region_counter,
time_range=Range(
min(r.time_range.begin for r in periods) if periods else None,
max(r.time_range.end for r in periods) if periods else None,
),
footprint_geometry=geometry_union,
footprint_crs=footprint_crs,
footprint_count=sum(p.footprint_count for p in with_valid_geometries),
newest_dataset_creation_time=max(
(
p.newest_dataset_creation_time
for p in periods
if p.newest_dataset_creation_time is not None
),
default=None,
),
crses=set.union(*(o.crses for o in periods)) if periods else set(),
# Why choose the max version? Because we assume older ones didn't need to be replaced,
product_refresh_time=max(
(
p.product_refresh_time
for p in periods
if p.product_refresh_time is not None
),
default=None,
),
summary_gen_time=min(
(p.summary_gen_time for p in periods if p.summary_gen_time is not None),
default=None,
),
size_bytes=sum(p.size_bytes for p in periods if p.size_bytes is not None),
)
@property
def footprint_wgs84(self) -> Optional[MultiPolygon]:
if not self.footprint_geometry:
return None
if not self.footprint_crs:
warnings.warn(f"Geometry without a crs for {self}")
return None
return (
Geometry(self.footprint_geometry, crs=self.footprint_crs)
.to_crs("EPSG:4326", wrapdateline=True)
.geom
)
@staticmethod
def _group_counter_if_needed(counter, period):
if len(counter) > 366:
if period == "day":
counter = Counter(
datetime(date.year, date.month, 1).date()
for date in counter.elements()
)
period = "month"
elif period == "month":
counter = Counter(
datetime(date.year, 1, 1).date() for date in counter.elements()
)
period = "year"
return counter, period
@property
def footprint_srid(self):
if self.footprint_crs is None:
return None
epsg = self.footprint_crs.lower()
if not epsg.startswith("epsg:"):
_LOG.warn("unsupported.to_srid", crs=self.footprint_crs)
return None
return int(epsg.split(":")[1])
def _has_shape(datasets: Tuple[Dataset, Tuple[BaseGeometry, bool]]) -> bool:
dataset, (shape, was_valid) = datasets
return shape is not None
def _erase_elements_from(items: List, start_i: int):
items[start_i:] = [None] * (len(items) - start_i)
return items
def _create_unified_footprint(
with_valid_geometries: List["TimePeriodOverview"], footprint_tolerance: float
):
if not with_valid_geometries:
return None
try:
geometry_union = shapely.ops.unary_union(
[p.footprint_geometry for p in with_valid_geometries]
)
except ValueError:
# Attempt 2 at union: Exaggerate the overlap *slightly* to
# avoid non-noded intersection.
# TODO: does shapely have a snap-to-grid?
try:
_LOG.warn("summary.footprint.invalid_union", exc_info=True)
geometry_union = shapely.ops.unary_union(
[p.footprint_geometry.buffer(0.001) for p in with_valid_geometries]
)
except ValueError:
_LOG.warn("summary.footprint.invalid_buffered_union", exc_info=True)
# Attempt 3 at union: Recursive filter bad polygons first
polygonlist = _polygon_chain(with_valid_geometries)
filtered_geom = _filter_geom(polygonlist)
geometry_union = shapely.ops.unary_union(filtered_geom)
if footprint_tolerance is not None:
geometry_union = geometry_union.simplify(footprint_tolerance)
return geometry_union
def _polygon_chain(valid_geometries: Iterable[TimePeriodOverview]) -> list:
polygonlist = []
for poly in valid_geometries:
if type(poly.footprint_geometry) is MultiPolygon:
for p in list(poly.footprint_geometry):
polygonlist.append(p)
else:
polygonlist.append(poly.footprint_geometry)
return polygonlist
def _filter_geom(geomlist: List[BaseGeometry], start=0) -> List[BaseGeometry]:
# Pass through empty lists
if len(geomlist) == 0:
return geomlist
# Process non-empty lists
if start == len(geomlist):
geomlist.pop()
return geomlist
else:
for i in range(len(geomlist) - start):
try:
shapely.ops.unary_union(geomlist[0 : i + start])
except ValueError:
del geomlist[i + start]
start = start + i
break
if i == len(geomlist) - 1 - start:
return geomlist
_filter_geom(geomlist, start)
return geomlist
| true | true |
f7f7a0b5415aa14c33a4a2155512edd84fb8bdb7 | 2,802 | py | Python | chat_app/chat_server/server_client.py | obin1000/chatapp-computer-networks | 5d8380ac594683f873e10362a4f6351731147912 | [
"MIT"
] | null | null | null | chat_app/chat_server/server_client.py | obin1000/chatapp-computer-networks | 5d8380ac594683f873e10362a4f6351731147912 | [
"MIT"
] | null | null | null | chat_app/chat_server/server_client.py | obin1000/chatapp-computer-networks | 5d8380ac594683f873e10362a4f6351731147912 | [
"MIT"
] | null | null | null | import threading
from time import sleep
import protocol
class ServerClient:
CLIENT_POLL_INTERVAL = 0.1
RECEIVE_SIZE = 1024
def __init__(self, client_connection, address=None, username=None):
print('Created user from {} name: {}'.format(address, username))
self.client_connection = client_connection
self.address = address
self.username = username
self.done_handshake = False
self.poll_thread = None
# Variable to stop the poll threads
self.alive = True
self.inbox = []
self.start_polling()
def get_handshake_done(self):
return self.done_handshake
def handshake_done(self):
self.done_handshake = True
def is_alive(self):
return self.alive
def kill(self):
self.alive = False
def set_username(self, username):
self.username = username
def get_username(self):
return self.username
def set_address(self, address):
self.address = address
def get_address(self):
return self.address
def get_next_message(self):
if self.inbox:
return self.inbox.pop(0)
else:
return None
def start_polling(self):
self.poll_thread = threading.Thread(target=self._client_poll)
self.poll_thread.start()
def _client_poll(self):
message = ''
while self.alive:
sleep(self.CLIENT_POLL_INTERVAL)
try:
data = self.client_connection.recv(self.RECEIVE_SIZE)
message += data.decode()
# Detect closed socket
if not data:
self.alive = False
# For messages lager than the buffer, search for the message end.
if protocol.MESSAGE_END not in message:
continue
print('Received {} from {}'.format(message, self.address))
self.inbox.append(message)
except Exception as e:
print('Failed receiving, did the connection close? {}'.format(e))
self.alive = False
# Reset message for next message
message = ''
def send(self, message):
"""
Send a message to this client
:param message: The message to send
:return: None
"""
print('Sending: {}'.format(message))
try:
self.client_connection.sendall(str.encode(message))
except Exception as e:
print('Failed sending to user: {} message: {}'.format(self.username, message))
def __str__(self):
return '{}: {}, {}'.format(self.address, self.username, self.alive)
def __del__(self):
self.alive = False
self.client_connection.close()
| 26.942308 | 90 | 0.584226 | import threading
from time import sleep
import protocol
class ServerClient:
CLIENT_POLL_INTERVAL = 0.1
RECEIVE_SIZE = 1024
def __init__(self, client_connection, address=None, username=None):
print('Created user from {} name: {}'.format(address, username))
self.client_connection = client_connection
self.address = address
self.username = username
self.done_handshake = False
self.poll_thread = None
self.alive = True
self.inbox = []
self.start_polling()
def get_handshake_done(self):
return self.done_handshake
def handshake_done(self):
self.done_handshake = True
def is_alive(self):
return self.alive
def kill(self):
self.alive = False
def set_username(self, username):
self.username = username
def get_username(self):
return self.username
def set_address(self, address):
self.address = address
def get_address(self):
return self.address
def get_next_message(self):
if self.inbox:
return self.inbox.pop(0)
else:
return None
def start_polling(self):
self.poll_thread = threading.Thread(target=self._client_poll)
self.poll_thread.start()
def _client_poll(self):
message = ''
while self.alive:
sleep(self.CLIENT_POLL_INTERVAL)
try:
data = self.client_connection.recv(self.RECEIVE_SIZE)
message += data.decode()
if not data:
self.alive = False
if protocol.MESSAGE_END not in message:
continue
print('Received {} from {}'.format(message, self.address))
self.inbox.append(message)
except Exception as e:
print('Failed receiving, did the connection close? {}'.format(e))
self.alive = False
message = ''
def send(self, message):
print('Sending: {}'.format(message))
try:
self.client_connection.sendall(str.encode(message))
except Exception as e:
print('Failed sending to user: {} message: {}'.format(self.username, message))
def __str__(self):
return '{}: {}, {}'.format(self.address, self.username, self.alive)
def __del__(self):
self.alive = False
self.client_connection.close()
| true | true |
f7f7a1abfcbbaf7f0908158ea48ceb031c503902 | 947 | py | Python | access_spotify.py | x-meienberg/smartspot | af49d55f32c6135bab22c012c8889ae54a7da665 | [
"MIT"
] | 1 | 2020-12-28T20:28:58.000Z | 2020-12-28T20:28:58.000Z | access_spotify.py | x-meienberg/smartspot | af49d55f32c6135bab22c012c8889ae54a7da665 | [
"MIT"
] | null | null | null | access_spotify.py | x-meienberg/smartspot | af49d55f32c6135bab22c012c8889ae54a7da665 | [
"MIT"
] | null | null | null | import json
import requests
import config
CLIENT_ID = config.client_id
CLIENT_Secret = config.client_secret
AUTH_URL = 'https://accounts.spotify.com/api/token'
auth_response = requests.post(AUTH_URL, {
'grant_type': 'client_credentials',
'client_id': CLIENT_ID,
'client_secret': CLIENT_Secret,
})
#convert the response to JSON
auth_response_data = auth_response.json()
#save access token
access_token = auth_response_data['access_token']
headers = {
'Authorization': 'Bearer {token}'.format(token=access_token)
}
BASE_URL = 'https://api.spotify.com/v1/'
track_id = '6y0igZArWVi6Iz0rj35c1Y'
r = requests.get(BASE_URL + 'audio-features/' + track_id, headers = headers)
r = r.json()
# Print the audio features of track
print(r)
#t = requests.get(BASE_URL + 'audio-analysis/'+ track_id, headers = headers)
#t = t.json()
# Print the Audio Analysis of the track
#print(t)
test = 0
for i in range(7):
test = test+i
| 18.568627 | 76 | 0.721225 | import json
import requests
import config
CLIENT_ID = config.client_id
CLIENT_Secret = config.client_secret
AUTH_URL = 'https://accounts.spotify.com/api/token'
auth_response = requests.post(AUTH_URL, {
'grant_type': 'client_credentials',
'client_id': CLIENT_ID,
'client_secret': CLIENT_Secret,
})
auth_response_data = auth_response.json()
access_token = auth_response_data['access_token']
headers = {
'Authorization': 'Bearer {token}'.format(token=access_token)
}
BASE_URL = 'https://api.spotify.com/v1/'
track_id = '6y0igZArWVi6Iz0rj35c1Y'
r = requests.get(BASE_URL + 'audio-features/' + track_id, headers = headers)
r = r.json()
print(r)
test = 0
for i in range(7):
test = test+i
| true | true |
f7f7a2733eceb3fa4f3b81a247461b24cd349fd5 | 7,988 | py | Python | modules/dashboard/course_settings.py | esacosta/u-mooc | 8d9a1427c988121e12dd6e2f7c8835f0e561c507 | [
"Apache-2.0"
] | 1 | 2015-10-06T14:01:44.000Z | 2015-10-06T14:01:44.000Z | modules/dashboard/course_settings.py | esacosta/u-mooc | 8d9a1427c988121e12dd6e2f7c8835f0e561c507 | [
"Apache-2.0"
] | null | null | null | modules/dashboard/course_settings.py | esacosta/u-mooc | 8d9a1427c988121e12dd6e2f7c8835f0e561c507 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting updates to basic course settings."""
__author__ = 'Abhinav Khandelwal (abhinavk@google.com)'
from controllers.utils import ApplicationHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import courses
from models import roles
from models import transforms
from models import vfs
from modules.oeditor import oeditor
import yaml
import messages
from google.appengine.api import users
def is_editable_fs(app_context):
return app_context.fs.impl.__class__ == vfs.DatastoreBackedFileSystem
class CourseSettingsRights(object):
"""Manages view/edit rights for files."""
@classmethod
def can_view(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class CourseSettingsHandler(ApplicationHandler):
"""Course settings handler."""
def post_edit_basic_course_settings(self):
"""Handles editing of course.yaml."""
assert is_editable_fs(self.app_context)
# Check if course.yaml exists; create if not.
fs = self.app_context.fs.impl
course_yaml = fs.physical_to_logical('/course.yaml')
if not fs.isfile(course_yaml):
fs.put(course_yaml, vfs.string_to_stream(
courses.EMPTY_COURSE_YAML % users.get_current_user().email()))
self.redirect(self.get_action_url(
'edit_basic_settings', key='/course.yaml'))
def get_edit_basic_settings(self):
"""Shows editor for course.yaml."""
key = self.request.get('key')
exit_url = self.canonicalize_url('/dashboard?action=settings')
rest_url = self.canonicalize_url('/rest/course/settings')
form_html = oeditor.ObjectEditor.get_html_for(
self,
CourseSettingsRESTHandler.REGISTORY.get_json_schema(),
CourseSettingsRESTHandler.REGISTORY.get_schema_dict(),
key, rest_url, exit_url,
required_modules=CourseSettingsRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Edit Settings')
template_values['page_description'] = messages.EDIT_SETTINGS_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
class CourseSettingsRESTHandler(BaseRESTHandler):
"""Provides REST API for a file."""
REGISTORY = courses.create_course_registry()
REQUIRED_MODULES = [
'inputex-date', 'inputex-string', 'inputex-textarea', 'inputex-url',
'inputex-checkbox', 'inputex-select', 'inputex-uneditable', 'gcb-rte']
URI = '/rest/course/settings'
@classmethod
def validate_content(cls, content):
yaml.safe_load(content)
def get_course_dict(self):
return self.get_course().get_environ(self.app_context)
def get_group_id(self, email):
if not email or not '@googlegroups.com' in email:
return None
return email.split('@')[0]
def get_groups_web_url(self, email):
group_id = self.get_group_id(email)
if not group_id:
return None
return 'https://groups.google.com/group/' + group_id
def get_groups_embed_url(self, email):
group_id = self.get_group_id(email)
if not group_id:
return None
return 'https://groups.google.com/forum/embed/?place=forum/' + group_id
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
assert is_editable_fs(self.app_context)
key = self.request.get('key')
if not CourseSettingsRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
# Load data if possible.
fs = self.app_context.fs.impl
filename = fs.physical_to_logical(key)
try:
stream = fs.get(filename)
except: # pylint: disable=bare-except
stream = None
if not stream:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
# Prepare data.
entity = {}
CourseSettingsRESTHandler.REGISTORY.convert_entity_to_json_entity(
self.get_course_dict(), entity)
# Render JSON response.
json_payload = transforms.dict_to_json(
entity,
CourseSettingsRESTHandler.REGISTORY.get_json_schema_dict())
transforms.send_json_response(
self, 200, 'Success.',
payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(
'basic-course-settings-put'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
assert is_editable_fs(self.app_context)
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'basic-course-settings-put', {'key': key}):
return
if not CourseSettingsRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
payload = request.get('payload')
request_data = {}
CourseSettingsRESTHandler.REGISTORY.convert_json_to_entity(
transforms.loads(payload), request_data)
course_data = request_data['course']
if 'forum_email' in course_data.keys():
forum_email = course_data['forum_email']
forum_web_url = self.get_groups_web_url(forum_email)
if forum_web_url:
course_data['forum_url'] = forum_web_url
forum_web_url = self.get_groups_embed_url(forum_email)
if forum_web_url:
course_data['forum_embed_url'] = forum_web_url
if 'announcement_list_email' in course_data.keys():
announcement_email = course_data['announcement_list_email']
announcement_web_url = self.get_groups_web_url(announcement_email)
if announcement_web_url:
course_data['announcement_list_url'] = announcement_web_url
entity = courses.deep_dict_merge(request_data, self.get_course_dict())
content = yaml.safe_dump(entity)
try:
self.validate_content(content)
content_stream = vfs.string_to_stream(unicode(content))
except Exception as e: # pylint: disable=W0703
transforms.send_json_response(self, 412, 'Validation error: %s' % e)
return
# Store new file content.
fs = self.app_context.fs.impl
filename = fs.physical_to_logical(key)
fs.put(filename, content_stream)
# Send reply.
transforms.send_json_response(self, 200, 'Saved.')
def delete(self):
"""Handles REST DELETE verb."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
| 34.882096 | 80 | 0.660491 |
__author__ = 'Abhinav Khandelwal (abhinavk@google.com)'
from controllers.utils import ApplicationHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import courses
from models import roles
from models import transforms
from models import vfs
from modules.oeditor import oeditor
import yaml
import messages
from google.appengine.api import users
def is_editable_fs(app_context):
return app_context.fs.impl.__class__ == vfs.DatastoreBackedFileSystem
class CourseSettingsRights(object):
@classmethod
def can_view(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class CourseSettingsHandler(ApplicationHandler):
def post_edit_basic_course_settings(self):
assert is_editable_fs(self.app_context)
fs = self.app_context.fs.impl
course_yaml = fs.physical_to_logical('/course.yaml')
if not fs.isfile(course_yaml):
fs.put(course_yaml, vfs.string_to_stream(
courses.EMPTY_COURSE_YAML % users.get_current_user().email()))
self.redirect(self.get_action_url(
'edit_basic_settings', key='/course.yaml'))
def get_edit_basic_settings(self):
key = self.request.get('key')
exit_url = self.canonicalize_url('/dashboard?action=settings')
rest_url = self.canonicalize_url('/rest/course/settings')
form_html = oeditor.ObjectEditor.get_html_for(
self,
CourseSettingsRESTHandler.REGISTORY.get_json_schema(),
CourseSettingsRESTHandler.REGISTORY.get_schema_dict(),
key, rest_url, exit_url,
required_modules=CourseSettingsRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Edit Settings')
template_values['page_description'] = messages.EDIT_SETTINGS_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
class CourseSettingsRESTHandler(BaseRESTHandler):
REGISTORY = courses.create_course_registry()
REQUIRED_MODULES = [
'inputex-date', 'inputex-string', 'inputex-textarea', 'inputex-url',
'inputex-checkbox', 'inputex-select', 'inputex-uneditable', 'gcb-rte']
URI = '/rest/course/settings'
@classmethod
def validate_content(cls, content):
yaml.safe_load(content)
def get_course_dict(self):
return self.get_course().get_environ(self.app_context)
def get_group_id(self, email):
if not email or not '@googlegroups.com' in email:
return None
return email.split('@')[0]
def get_groups_web_url(self, email):
group_id = self.get_group_id(email)
if not group_id:
return None
return 'https://groups.google.com/group/' + group_id
def get_groups_embed_url(self, email):
group_id = self.get_group_id(email)
if not group_id:
return None
return 'https://groups.google.com/forum/embed/?place=forum/' + group_id
def get(self):
assert is_editable_fs(self.app_context)
key = self.request.get('key')
if not CourseSettingsRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
fs = self.app_context.fs.impl
filename = fs.physical_to_logical(key)
try:
stream = fs.get(filename)
except:
stream = None
if not stream:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
entity = {}
CourseSettingsRESTHandler.REGISTORY.convert_entity_to_json_entity(
self.get_course_dict(), entity)
json_payload = transforms.dict_to_json(
entity,
CourseSettingsRESTHandler.REGISTORY.get_json_schema_dict())
transforms.send_json_response(
self, 200, 'Success.',
payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(
'basic-course-settings-put'))
def put(self):
assert is_editable_fs(self.app_context)
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'basic-course-settings-put', {'key': key}):
return
if not CourseSettingsRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
payload = request.get('payload')
request_data = {}
CourseSettingsRESTHandler.REGISTORY.convert_json_to_entity(
transforms.loads(payload), request_data)
course_data = request_data['course']
if 'forum_email' in course_data.keys():
forum_email = course_data['forum_email']
forum_web_url = self.get_groups_web_url(forum_email)
if forum_web_url:
course_data['forum_url'] = forum_web_url
forum_web_url = self.get_groups_embed_url(forum_email)
if forum_web_url:
course_data['forum_embed_url'] = forum_web_url
if 'announcement_list_email' in course_data.keys():
announcement_email = course_data['announcement_list_email']
announcement_web_url = self.get_groups_web_url(announcement_email)
if announcement_web_url:
course_data['announcement_list_url'] = announcement_web_url
entity = courses.deep_dict_merge(request_data, self.get_course_dict())
content = yaml.safe_dump(entity)
try:
self.validate_content(content)
content_stream = vfs.string_to_stream(unicode(content))
except Exception as e:
transforms.send_json_response(self, 412, 'Validation error: %s' % e)
return
fs = self.app_context.fs.impl
filename = fs.physical_to_logical(key)
fs.put(filename, content_stream)
transforms.send_json_response(self, 200, 'Saved.')
def delete(self):
request = transforms.loads(self.request.get('request'))
key = request.get('key')
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
| true | true |
f7f7a2eacb247a6c8327c5cd6f0688dd6e99dd59 | 871 | py | Python | leetcode/1587-parallel-courses-ii.py | Magic07/online-judge-solutions | 02a289dd7eb52d7eafabc97bd1a043213b65f70a | [
"MIT"
] | null | null | null | leetcode/1587-parallel-courses-ii.py | Magic07/online-judge-solutions | 02a289dd7eb52d7eafabc97bd1a043213b65f70a | [
"MIT"
] | null | null | null | leetcode/1587-parallel-courses-ii.py | Magic07/online-judge-solutions | 02a289dd7eb52d7eafabc97bd1a043213b65f70a | [
"MIT"
] | null | null | null | class Solution:
def minNumberOfSemesters(self, n: int, dependencies: List[List[int]], k: int) -> int:
deps=[0]*(n)
for x,y in dependencies:
deps[y-1]+=1<<(x-1)
finished=[sys.maxsize]*(1<<(n)) # finished[i] is the semester when i is took.
finished[0]=0
for i in range(1<<n):
canTake=0
for j in range(n):
if deps[j]&i==deps[j]:
canTake+=1<<j
canTake&= ~i
t=canTake
while t>0:
if finished[i|t]>finished[i]+1 and bin(t).count('1') <= k:
finished[i|t]=min(finished[i|t], finished[i]+1)
t=(t-1)&canTake
return finished[-1]
# Ref: https://leetcode.com/problems/parallel-courses-ii/discuss/708263/Can-anyone-explain-the-bit-mask-method | 39.590909 | 110 | 0.495982 | class Solution:
def minNumberOfSemesters(self, n: int, dependencies: List[List[int]], k: int) -> int:
deps=[0]*(n)
for x,y in dependencies:
deps[y-1]+=1<<(x-1)
finished=[sys.maxsize]*(1<<(n))
finished[0]=0
for i in range(1<<n):
canTake=0
for j in range(n):
if deps[j]&i==deps[j]:
canTake+=1<<j
canTake&= ~i
t=canTake
while t>0:
if finished[i|t]>finished[i]+1 and bin(t).count('1') <= k:
finished[i|t]=min(finished[i|t], finished[i]+1)
t=(t-1)&canTake
return finished[-1]
| true | true |
f7f7a301377778c0d5e9a9630668a85ad4e79b4d | 534 | py | Python | src/app.py | joscelino/Preparacao_Ambiente_Python | 00421de0b998df0bc351b9107c6e2dda4a2a4256 | [
"MIT"
] | null | null | null | src/app.py | joscelino/Preparacao_Ambiente_Python | 00421de0b998df0bc351b9107c6e2dda4a2a4256 | [
"MIT"
] | null | null | null | src/app.py | joscelino/Preparacao_Ambiente_Python | 00421de0b998df0bc351b9107c6e2dda4a2a4256 | [
"MIT"
] | null | null | null | from functools import lru_cache
from typing import Union
@lru_cache
def soma(
valor_1: Union[str, int, float], valor_2: Union[str, int, float]
) -> Union[str, int, float]:
"""Sum two values.
:param - valor_1: first value
- valor_2: second value
:return - Sum of valor_1 and valor_2 only
"""
soma_dois_valores = valor_1 + valor_2
return soma_dois_valores
SOMA_DOIS_NUMEROS = soma(1, 2.0524899927999035)
SOMA_DUAS_STRINGS = soma("Mar", "ia")
print(SOMA_DOIS_NUMEROS)
print(SOMA_DUAS_STRINGS)
| 23.217391 | 68 | 0.700375 | from functools import lru_cache
from typing import Union
@lru_cache
def soma(
valor_1: Union[str, int, float], valor_2: Union[str, int, float]
) -> Union[str, int, float]:
soma_dois_valores = valor_1 + valor_2
return soma_dois_valores
SOMA_DOIS_NUMEROS = soma(1, 2.0524899927999035)
SOMA_DUAS_STRINGS = soma("Mar", "ia")
print(SOMA_DOIS_NUMEROS)
print(SOMA_DUAS_STRINGS)
| true | true |
f7f7a36f4fe281b308c2322de6e9cae91ef74558 | 4,188 | py | Python | scrapers/city24_scraper.py | Zims/scraping-flask-sample | 083a4cf142d26bd40c807b718dcbabae2efd3cb2 | [
"MIT"
] | null | null | null | scrapers/city24_scraper.py | Zims/scraping-flask-sample | 083a4cf142d26bd40c807b718dcbabae2efd3cb2 | [
"MIT"
] | null | null | null | scrapers/city24_scraper.py | Zims/scraping-flask-sample | 083a4cf142d26bd40c807b718dcbabae2efd3cb2 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
import pandas as pd
import time
from datetime import datetime, timezone, timedelta
import pytz
tz=pytz.timezone("Europe/Riga")
time_now = datetime.now(tz)
format = "%Y-%m-%d-%T"
time_now = time_now.strftime(format)
time_now = time_now
def refresh_time_24():
tz=pytz.timezone("Europe/Riga")
time_now = datetime.now(tz)
format = "%Y-%m-%d-%T"
time_now = time_now.strftime(format)
return time_now
def parse_city24_scraper():
def parse_page_city24(page=0):
for row in rows:
d = {}
try:
d["address"] = row.find("a", {"class": "addressLink"}).find("span").text.split(",")[0]
except:
d["address"] = None
try:
d["istabas"] = int(row.find("div", {"class": "column"}).find("ol").find_all("li")[1].find("strong").text)
except:
d["istabas"] = None
try:
d["platiba"] = float(row.find("div", {"class": "column"}).find("ol").find_all("li")[0].find("strong").text.split(" ")[0])
except:
d["platiba"] = None
try:
d["stavs"] = row.find("div", {"class": "column"}).find("ol").find_all("li")[2].find("strong").text
except:
d["stavs"] = None
try:
d["price_m2"] = float(row.find("div", {"class": "price_sqrm"}).text.replace(" ", "").replace("EUR/m²", "").replace(",", "."))
except:
d["price_m2"] = None
try:
d["price"] = int(row.find("div", {"class": "price"}).find("div").text.replace(" EUR", "").replace(" ", "").strip())
except:
d["price"] = None
try:
d["links"] = row.find("a", href=True)["href"]
except:
d["links"] = None
try:
d["vieta"] = row.find("a", {"class": "addressLink"}).find("span").text.split(",")[1]
except:
d["vieta"] = None
# try:
# d["promo"] = row.find_all("div", {"class": "column"})[1].find("div", {"class": "promo"}).find("span").text
# except:
# d["promo"] = None
d_list.append(d)
refresh_time_24()
headers = {'User-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'}
d_list = []
# TODO set range to (0, 9)
for page in range(0, 1):
url = f"https://www.city24.lv/real-estate-search/apartments-for-sale/R%C4%ABga-%C4%80genskalns/id=25875-city/pg={page}"
print(f"Processing page nr: {page} ...")
print(url)
response = requests.get(url, headers=headers)
content = response.text
soup = BeautifulSoup(content, "html.parser")
print(content)
# write content to file
with open(f"city24_scraper_{page}.html", "w") as f:
f.write(content)
# table = soup.find("div", {"id": "list-container"})
# rows = table.find_all("li", {"class": "new result regular"})
time.sleep(0.5)
# TODO uncoment next line
# parse_page_city24(page)
# create file
df = pd.DataFrame(d_list)
# print(df)
# real filtered file
# import pandas as pd
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter(f"output/{refresh_time_24()}_city24.xlsx", engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object. We also turn off the
# index column at the left of the output dataframe.
df.to_excel(writer, sheet_name='Sludinajumi')
# .dropna()
# Get the xlsxwriter workbook and worksheet objects.
workbook = writer.book
worksheet = writer.sheets['Sludinajumi']
# Get the dimensions of the dataframe.
(max_row, max_col) = df.shape
# Make the columns wider for clarity.
worksheet.set_column(0, max_col - 1, 12)
# Set the autofilter.
worksheet.autofilter(0, 0, max_row, max_col)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
# print("Done!")
| 31.253731 | 141 | 0.546323 | from bs4 import BeautifulSoup
import requests
import pandas as pd
import time
from datetime import datetime, timezone, timedelta
import pytz
tz=pytz.timezone("Europe/Riga")
time_now = datetime.now(tz)
format = "%Y-%m-%d-%T"
time_now = time_now.strftime(format)
time_now = time_now
def refresh_time_24():
tz=pytz.timezone("Europe/Riga")
time_now = datetime.now(tz)
format = "%Y-%m-%d-%T"
time_now = time_now.strftime(format)
return time_now
def parse_city24_scraper():
def parse_page_city24(page=0):
for row in rows:
d = {}
try:
d["address"] = row.find("a", {"class": "addressLink"}).find("span").text.split(",")[0]
except:
d["address"] = None
try:
d["istabas"] = int(row.find("div", {"class": "column"}).find("ol").find_all("li")[1].find("strong").text)
except:
d["istabas"] = None
try:
d["platiba"] = float(row.find("div", {"class": "column"}).find("ol").find_all("li")[0].find("strong").text.split(" ")[0])
except:
d["platiba"] = None
try:
d["stavs"] = row.find("div", {"class": "column"}).find("ol").find_all("li")[2].find("strong").text
except:
d["stavs"] = None
try:
d["price_m2"] = float(row.find("div", {"class": "price_sqrm"}).text.replace(" ", "").replace("EUR/m²", "").replace(",", "."))
except:
d["price_m2"] = None
try:
d["price"] = int(row.find("div", {"class": "price"}).find("div").text.replace(" EUR", "").replace(" ", "").strip())
except:
d["price"] = None
try:
d["links"] = row.find("a", href=True)["href"]
except:
d["links"] = None
try:
d["vieta"] = row.find("a", {"class": "addressLink"}).find("span").text.split(",")[1]
except:
d["vieta"] = None
d_list.append(d)
refresh_time_24()
headers = {'User-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'}
d_list = []
for page in range(0, 1):
url = f"https://www.city24.lv/real-estate-search/apartments-for-sale/R%C4%ABga-%C4%80genskalns/id=25875-city/pg={page}"
print(f"Processing page nr: {page} ...")
print(url)
response = requests.get(url, headers=headers)
content = response.text
soup = BeautifulSoup(content, "html.parser")
print(content)
with open(f"city24_scraper_{page}.html", "w") as f:
f.write(content)
time.sleep(0.5)
df = pd.DataFrame(d_list)
writer = pd.ExcelWriter(f"output/{refresh_time_24()}_city24.xlsx", engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sludinajumi')
workbook = writer.book
worksheet = writer.sheets['Sludinajumi']
(max_row, max_col) = df.shape
worksheet.set_column(0, max_col - 1, 12)
worksheet.autofilter(0, 0, max_row, max_col)
writer.save()
| true | true |
f7f7a5047a23fc341997bb30139e2e4812285af7 | 2,050 | py | Python | tests/validator20/validate_spec_url_test.py | stevesimmons/swagger_spec_validator | 219dd57fb4480d789b3ad381f69c1e9e03f926b7 | [
"Apache-2.0"
] | 100 | 2015-01-22T17:37:32.000Z | 2021-11-08T10:29:56.000Z | tests/validator20/validate_spec_url_test.py | stevesimmons/swagger_spec_validator | 219dd57fb4480d789b3ad381f69c1e9e03f926b7 | [
"Apache-2.0"
] | 144 | 2015-01-21T21:03:14.000Z | 2022-01-05T11:51:28.000Z | tests/validator20/validate_spec_url_test.py | stevesimmons/swagger_spec_validator | 219dd57fb4480d789b3ad381f69c1e9e03f926b7 | [
"Apache-2.0"
] | 81 | 2015-01-15T21:47:35.000Z | 2021-09-30T08:55:08.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
import mock
import pytest
from swagger_spec_validator.common import get_uri_from_file_path
from swagger_spec_validator.common import SwaggerValidationError
from swagger_spec_validator.common import SwaggerValidationWarning
from swagger_spec_validator.validator20 import validate_spec_url
from tests.conftest import is_urlopen_error
def test_success(petstore_contents):
with mock.patch(
'swagger_spec_validator.validator20.read_url',
return_value=json.loads(petstore_contents),
) as mock_read_url:
validate_spec_url('http://localhost/api-docs')
mock_read_url.assert_called_once_with('http://localhost/api-docs')
def test_success_crossref_url_yaml():
urlpath = get_uri_from_file_path(os.path.abspath("./tests/data/v2.0/minimal.yaml"))
validate_spec_url(urlpath)
def test_success_crossref_url_json():
urlpath = get_uri_from_file_path(os.path.abspath('./tests/data/v2.0/relative_ref.json'))
validate_spec_url(urlpath)
def test_complicated_refs_json():
urlpath = get_uri_from_file_path(os.path.abspath('./tests/data/v2.0/test_complicated_refs/swagger.json'))
validate_spec_url(urlpath)
def test_specs_with_empty_reference():
with pytest.warns(SwaggerValidationWarning) as warninfo:
validate_spec_url(
get_uri_from_file_path(
os.path.abspath('./tests/data/v2.0/invalid_swagger_spec_because_empty_reference.yaml'),
),
)
assert 'Identified $ref with None value. This is usually an error, although technically it might be allowed. ' \
'(path: #/definitions/model1/x-extends)' == str(warninfo.list[0].message)
def test_raise_SwaggerValidationError_on_urlopen_error():
with pytest.raises(SwaggerValidationError) as excinfo:
validate_spec_url('http://foo')
assert is_urlopen_error(excinfo.value)
| 34.166667 | 116 | 0.76878 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
import mock
import pytest
from swagger_spec_validator.common import get_uri_from_file_path
from swagger_spec_validator.common import SwaggerValidationError
from swagger_spec_validator.common import SwaggerValidationWarning
from swagger_spec_validator.validator20 import validate_spec_url
from tests.conftest import is_urlopen_error
def test_success(petstore_contents):
with mock.patch(
'swagger_spec_validator.validator20.read_url',
return_value=json.loads(petstore_contents),
) as mock_read_url:
validate_spec_url('http://localhost/api-docs')
mock_read_url.assert_called_once_with('http://localhost/api-docs')
def test_success_crossref_url_yaml():
urlpath = get_uri_from_file_path(os.path.abspath("./tests/data/v2.0/minimal.yaml"))
validate_spec_url(urlpath)
def test_success_crossref_url_json():
urlpath = get_uri_from_file_path(os.path.abspath('./tests/data/v2.0/relative_ref.json'))
validate_spec_url(urlpath)
def test_complicated_refs_json():
urlpath = get_uri_from_file_path(os.path.abspath('./tests/data/v2.0/test_complicated_refs/swagger.json'))
validate_spec_url(urlpath)
def test_specs_with_empty_reference():
with pytest.warns(SwaggerValidationWarning) as warninfo:
validate_spec_url(
get_uri_from_file_path(
os.path.abspath('./tests/data/v2.0/invalid_swagger_spec_because_empty_reference.yaml'),
),
)
assert 'Identified $ref with None value. This is usually an error, although technically it might be allowed. ' \
'(path: #/definitions/model1/x-extends)' == str(warninfo.list[0].message)
def test_raise_SwaggerValidationError_on_urlopen_error():
with pytest.raises(SwaggerValidationError) as excinfo:
validate_spec_url('http://foo')
assert is_urlopen_error(excinfo.value)
| true | true |
f7f7a7265f767b1ad1ebccf9838920c4239004cf | 1,067 | py | Python | backend/utils/test_data.py | HarishGuragol/Hack-eye-need-a-break | b116a80665692ab09f3fd54a9ae5bd5f2ba45b39 | [
"MIT"
] | null | null | null | backend/utils/test_data.py | HarishGuragol/Hack-eye-need-a-break | b116a80665692ab09f3fd54a9ae5bd5f2ba45b39 | [
"MIT"
] | null | null | null | backend/utils/test_data.py | HarishGuragol/Hack-eye-need-a-break | b116a80665692ab09f3fd54a9ae5bd5f2ba45b39 | [
"MIT"
] | null | null | null | import random
import time
from backend.session import create_session
from backend.utils.utils import create_new_user, add_eye_data, get_sensitivity_by_user_id, set_sensitivity, \
create_new_sensitivity
from db.models import User
def add_test_data():
test_user = {
"first_name": "test_first_name",
"last_name": "test_last_name",
"email": "test@test.com",
}
test_eye_data = [{
"x": random.randint(0, 100),
"y": random.randint(0, 100),
"timestamp": time.time() + random.randint(0, 1000) - 1000
} for _ in range(20000)]
with create_session() as sess:
user = sess.query(User).filter(User.c.email == test_user["email"]).one_or_none()
if not user:
user = create_new_user(
first_name=test_user["first_name"],
last_name=test_user["last_name"],
email=test_user["email"],
)
add_eye_data(user.id, test_eye_data)
if not get_sensitivity_by_user_id(user.id):
create_new_sensitivity(user.id, 0.6)
| 30.485714 | 109 | 0.632615 | import random
import time
from backend.session import create_session
from backend.utils.utils import create_new_user, add_eye_data, get_sensitivity_by_user_id, set_sensitivity, \
create_new_sensitivity
from db.models import User
def add_test_data():
test_user = {
"first_name": "test_first_name",
"last_name": "test_last_name",
"email": "test@test.com",
}
test_eye_data = [{
"x": random.randint(0, 100),
"y": random.randint(0, 100),
"timestamp": time.time() + random.randint(0, 1000) - 1000
} for _ in range(20000)]
with create_session() as sess:
user = sess.query(User).filter(User.c.email == test_user["email"]).one_or_none()
if not user:
user = create_new_user(
first_name=test_user["first_name"],
last_name=test_user["last_name"],
email=test_user["email"],
)
add_eye_data(user.id, test_eye_data)
if not get_sensitivity_by_user_id(user.id):
create_new_sensitivity(user.id, 0.6)
| true | true |
f7f7a7529d5591095ecdb29fc0db665d836f67b4 | 17,817 | py | Python | tensorflow_/tensorflowcv/models/mnasnet.py | raijinspecial/imgclsmob | c5d3ab207a6304f1343e4394f0467bdc7403a72a | [
"MIT"
] | null | null | null | tensorflow_/tensorflowcv/models/mnasnet.py | raijinspecial/imgclsmob | c5d3ab207a6304f1343e4394f0467bdc7403a72a | [
"MIT"
] | null | null | null | tensorflow_/tensorflowcv/models/mnasnet.py | raijinspecial/imgclsmob | c5d3ab207a6304f1343e4394f0467bdc7403a72a | [
"MIT"
] | null | null | null | """
MnasNet, implemented in TensorFlow.
Original paper: 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626.
"""
__all__ = ['MnasNet', 'mnasnet']
import os
import tensorflow as tf
from .common import conv2d, batchnorm, is_channels_first, flatten
def conv_block(x,
in_channels,
out_channels,
kernel_size,
strides,
padding,
groups,
activate,
training,
data_format,
name="conv_block"):
"""
Standard convolution block with Batch normalization and ReLU activation.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int
Number of groups.
activate : bool
Whether activate the convolution block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'conv_block'
Block name.
Returns
-------
Tensor
Resulted tensor.
"""
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
use_bias=False,
data_format=data_format,
name=name + "/conv")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/bn")
if activate:
x = tf.nn.relu(x, name=name + "/activ")
return x
def conv1x1_block(x,
in_channels,
out_channels,
activate=True,
training=False,
data_format="channels_last",
name="conv1x1_block"):
"""
1x1 version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
activate : bool, default True
Whether activate the convolution block.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv1x1_block'
Block name.
Returns
-------
Tensor
Resulted tensor.
"""
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=1,
padding=0,
groups=1,
activate=activate,
training=training,
data_format=data_format,
name=name)
def dwconv_block(x,
in_channels,
out_channels,
kernel_size,
strides,
activate=True,
training=False,
data_format="channels_last",
name="dwconv_block"):
"""
Depthwise version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
activate : bool, default True
Whether activate the convolution block.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'dwconv_block'
Block name.
Returns
-------
Tensor
Resulted tensor.
"""
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=(kernel_size // 2),
groups=out_channels,
activate=activate,
training=training,
data_format=data_format,
name=name)
def dws_conv_block(x,
in_channels,
out_channels,
training,
data_format,
name="dws_conv_block"):
"""
Depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'dws_conv_block'
Block name.
Returns
-------
Tensor
Resulted tensor.
"""
x = dwconv_block(
x=x,
in_channels=in_channels,
out_channels=in_channels,
kernel_size=3,
strides=1,
training=training,
data_format=data_format,
name=name + "/dw_conv")
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
training=training,
data_format=data_format,
name=name + "/pw_conv")
return x
def mnas_unit(x,
in_channels,
out_channels,
kernel_size,
strides,
expansion_factor,
training,
data_format,
name="mnas_unit"):
"""
So-called 'Linear Bottleneck' layer. It is used as a MobileNetV2 unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
expansion_factor : int
Factor for expansion of channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'mnas_unit'
Unit name.
Returns
-------
Tensor
Resulted tensor.
"""
residual = (in_channels == out_channels) and (strides == 1)
mid_channels = in_channels * expansion_factor
if residual:
identity = x
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
activate=True,
training=training,
data_format=data_format,
name=name + "/conv1")
x = dwconv_block(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
activate=True,
training=training,
data_format=data_format,
name=name + "/conv2")
x = conv1x1_block(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
activate=False,
training=training,
data_format=data_format,
name=name + "/conv3")
if residual:
x = x + identity
return x
def mnas_init_block(x,
in_channels,
out_channels_list,
training,
data_format,
name="mnas_init_block"):
"""
Depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels_list : list of 2 int
Numbers of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'mnas_init_block'
Block name.
Returns
-------
Tensor
Resulted tensor.
"""
x = conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels_list[0],
kernel_size=3,
strides=2,
padding=1,
groups=1,
activate=True,
training=training,
data_format=data_format,
name=name + "/conv1")
x = dws_conv_block(
x=x,
in_channels=out_channels_list[0],
out_channels=out_channels_list[1],
training=training,
data_format=data_format,
name=name + "/conv2")
return x
class MnasNet(object):
"""
MnasNet model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : list of 2 int
Numbers of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
kernel_sizes : list of list of int
Number of kernel sizes for each unit.
expansion_factors : list of list of int
Number of expansion factors for each unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernel_sizes,
expansion_factors,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(MnasNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.final_block_channels = final_block_channels
self.kernel_sizes = kernel_sizes
self.expansion_factors = expansion_factors
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = mnas_init_block(
x=x,
in_channels=in_channels,
out_channels_list=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels[-1]
for i, channels_per_stage in enumerate(self.channels):
kernel_sizes_per_stage = self.kernel_sizes[i]
expansion_factors_per_stage = self.expansion_factors[i]
for j, out_channels in enumerate(channels_per_stage):
kernel_size = kernel_sizes_per_stage[j]
expansion_factor = expansion_factors_per_stage[j]
strides = 2 if (j == 0) else 1
x = mnas_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
expansion_factor=expansion_factor,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=self.final_block_channels,
activate=True,
training=training,
data_format=self.data_format,
name="features/final_block")
# in_channels = self.final_block_channels
x = tf.layers.average_pooling2d(
inputs=x,
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.layers.dense(
inputs=x,
units=self.classes,
name="output")
return x
def get_mnasnet(model_name=None,
pretrained=False,
root=os.path.join('~', '.keras', 'models'),
**kwargs):
"""
Create MnasNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns
-------
functor
Functor for model graph creation with extra fields.
"""
init_block_channels = [32, 16]
final_block_channels = 1280
layers = [3, 3, 3, 2, 4, 1]
downsample = [1, 1, 1, 0, 1, 0]
channels_per_layers = [24, 40, 80, 96, 192, 320]
expansion_factors_per_layers = [3, 3, 6, 6, 6, 6]
kernel_sizes_per_layers = [3, 5, 5, 3, 5, 3]
default_kernel_size = 3
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [])
kernel_sizes = reduce(lambda x, y: x + [[y[0]] + [default_kernel_size] * (y[1] - 1)] if y[2] != 0 else x[:-1] + [
x[-1] + [y[0]] + [default_kernel_size] * (y[1] - 1)], zip(kernel_sizes_per_layers, layers, downsample), [])
expansion_factors = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(expansion_factors_per_layers, layers, downsample), [])
net = MnasNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernel_sizes=kernel_sizes,
expansion_factors=expansion_factors,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def mnasnet(**kwargs):
"""
MnasNet model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mnasnet(model_name="mnasnet", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
mnasnet,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
# assert (model != mnasnet or weight_count == 4308816)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 30.096284 | 118 | 0.583488 |
__all__ = ['MnasNet', 'mnasnet']
import os
import tensorflow as tf
from .common import conv2d, batchnorm, is_channels_first, flatten
def conv_block(x,
in_channels,
out_channels,
kernel_size,
strides,
padding,
groups,
activate,
training,
data_format,
name="conv_block"):
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
use_bias=False,
data_format=data_format,
name=name + "/conv")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/bn")
if activate:
x = tf.nn.relu(x, name=name + "/activ")
return x
def conv1x1_block(x,
in_channels,
out_channels,
activate=True,
training=False,
data_format="channels_last",
name="conv1x1_block"):
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=1,
padding=0,
groups=1,
activate=activate,
training=training,
data_format=data_format,
name=name)
def dwconv_block(x,
in_channels,
out_channels,
kernel_size,
strides,
activate=True,
training=False,
data_format="channels_last",
name="dwconv_block"):
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=(kernel_size // 2),
groups=out_channels,
activate=activate,
training=training,
data_format=data_format,
name=name)
def dws_conv_block(x,
in_channels,
out_channels,
training,
data_format,
name="dws_conv_block"):
x = dwconv_block(
x=x,
in_channels=in_channels,
out_channels=in_channels,
kernel_size=3,
strides=1,
training=training,
data_format=data_format,
name=name + "/dw_conv")
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
training=training,
data_format=data_format,
name=name + "/pw_conv")
return x
def mnas_unit(x,
in_channels,
out_channels,
kernel_size,
strides,
expansion_factor,
training,
data_format,
name="mnas_unit"):
residual = (in_channels == out_channels) and (strides == 1)
mid_channels = in_channels * expansion_factor
if residual:
identity = x
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
activate=True,
training=training,
data_format=data_format,
name=name + "/conv1")
x = dwconv_block(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
activate=True,
training=training,
data_format=data_format,
name=name + "/conv2")
x = conv1x1_block(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
activate=False,
training=training,
data_format=data_format,
name=name + "/conv3")
if residual:
x = x + identity
return x
def mnas_init_block(x,
in_channels,
out_channels_list,
training,
data_format,
name="mnas_init_block"):
x = conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels_list[0],
kernel_size=3,
strides=2,
padding=1,
groups=1,
activate=True,
training=training,
data_format=data_format,
name=name + "/conv1")
x = dws_conv_block(
x=x,
in_channels=out_channels_list[0],
out_channels=out_channels_list[1],
training=training,
data_format=data_format,
name=name + "/conv2")
return x
class MnasNet(object):
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernel_sizes,
expansion_factors,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(MnasNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.final_block_channels = final_block_channels
self.kernel_sizes = kernel_sizes
self.expansion_factors = expansion_factors
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
in_channels = self.in_channels
x = mnas_init_block(
x=x,
in_channels=in_channels,
out_channels_list=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels[-1]
for i, channels_per_stage in enumerate(self.channels):
kernel_sizes_per_stage = self.kernel_sizes[i]
expansion_factors_per_stage = self.expansion_factors[i]
for j, out_channels in enumerate(channels_per_stage):
kernel_size = kernel_sizes_per_stage[j]
expansion_factor = expansion_factors_per_stage[j]
strides = 2 if (j == 0) else 1
x = mnas_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
expansion_factor=expansion_factor,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=self.final_block_channels,
activate=True,
training=training,
data_format=self.data_format,
name="features/final_block")
x = tf.layers.average_pooling2d(
inputs=x,
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")
x = flatten(
x=x,
data_format=self.data_format)
x = tf.layers.dense(
inputs=x,
units=self.classes,
name="output")
return x
def get_mnasnet(model_name=None,
pretrained=False,
root=os.path.join('~', '.keras', 'models'),
**kwargs):
init_block_channels = [32, 16]
final_block_channels = 1280
layers = [3, 3, 3, 2, 4, 1]
downsample = [1, 1, 1, 0, 1, 0]
channels_per_layers = [24, 40, 80, 96, 192, 320]
expansion_factors_per_layers = [3, 3, 6, 6, 6, 6]
kernel_sizes_per_layers = [3, 5, 5, 3, 5, 3]
default_kernel_size = 3
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [])
kernel_sizes = reduce(lambda x, y: x + [[y[0]] + [default_kernel_size] * (y[1] - 1)] if y[2] != 0 else x[:-1] + [
x[-1] + [y[0]] + [default_kernel_size] * (y[1] - 1)], zip(kernel_sizes_per_layers, layers, downsample), [])
expansion_factors = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(expansion_factors_per_layers, layers, downsample), [])
net = MnasNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernel_sizes=kernel_sizes,
expansion_factors=expansion_factors,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def mnasnet(**kwargs):
return get_mnasnet(model_name="mnasnet", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
mnasnet,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| true | true |
f7f7a76c33fcb73ee049e769903859264f14581a | 4,019 | py | Python | person_detector.py | JTZ18/auto-tinder | c1d0795a402fa6c10d625d3b283597eb6b25c06b | [
"MIT"
] | 1 | 2022-01-19T16:27:49.000Z | 2022-01-19T16:27:49.000Z | person_detector.py | JTZ18/auto-tinder | c1d0795a402fa6c10d625d3b283597eb6b25c06b | [
"MIT"
] | 8 | 2020-09-25T21:38:04.000Z | 2022-03-12T00:14:36.000Z | person_detector.py | JTZ18/auto-tinder | c1d0795a402fa6c10d625d3b283597eb6b25c06b | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from object_detection.utils import ops as utils_ops
from PIL import Image
PERSON_CLASS = 1
SCORE_THRESHOLD = 0.5
def run_inference_for_single_image(image, sess):
ops = tf.compat.v1.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.compat.v1.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[1], image.shape[2])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: image})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.int64)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
def open_graph():
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.compat.v1.GraphDef()
with tf.compat.v2.io.gfile.GFile('ssd_mobilenet_v1_coco_2017_11_17/frozen_inference_graph.pb', 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def get_person(image_path, sess):
img = Image.open(image_path)
image_np = load_image_into_numpy_array(img)
image_np_expanded = np.expand_dims(image_np, axis=0)
output_dict = run_inference_for_single_image(image_np_expanded, sess)
persons_coordinates = []
for i in range(len(output_dict["detection_boxes"])):
score = output_dict["detection_scores"][i]
classtype = output_dict["detection_classes"][i]
if score > SCORE_THRESHOLD and classtype == PERSON_CLASS:
persons_coordinates.append(output_dict["detection_boxes"][i])
w, h = img.size
for person_coordinate in persons_coordinates:
cropped_img = img.crop((
int(w * person_coordinate[1]),
int(h * person_coordinate[0]),
int(w * person_coordinate[3]),
int(h * person_coordinate[2]),
))
return cropped_img
return None | 42.755319 | 116 | 0.690719 | import numpy as np
import tensorflow as tf
from object_detection.utils import ops as utils_ops
from PIL import Image
PERSON_CLASS = 1
SCORE_THRESHOLD = 0.5
def run_inference_for_single_image(image, sess):
ops = tf.compat.v1.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.compat.v1.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[1], image.shape[2])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name('image_tensor:0')
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: image})
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.int64)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
def open_graph():
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.compat.v1.GraphDef()
with tf.compat.v2.io.gfile.GFile('ssd_mobilenet_v1_coco_2017_11_17/frozen_inference_graph.pb', 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def get_person(image_path, sess):
img = Image.open(image_path)
image_np = load_image_into_numpy_array(img)
image_np_expanded = np.expand_dims(image_np, axis=0)
output_dict = run_inference_for_single_image(image_np_expanded, sess)
persons_coordinates = []
for i in range(len(output_dict["detection_boxes"])):
score = output_dict["detection_scores"][i]
classtype = output_dict["detection_classes"][i]
if score > SCORE_THRESHOLD and classtype == PERSON_CLASS:
persons_coordinates.append(output_dict["detection_boxes"][i])
w, h = img.size
for person_coordinate in persons_coordinates:
cropped_img = img.crop((
int(w * person_coordinate[1]),
int(h * person_coordinate[0]),
int(w * person_coordinate[3]),
int(h * person_coordinate[2]),
))
return cropped_img
return None | true | true |
f7f7a79db784675cf5ffbb21327bc3d4b5263a8d | 3,375 | py | Python | admin_kit/fields.py | mouryavenkat/django-admin-kit | d8a95967768c425021a3c864453a1f526f72f659 | [
"MIT"
] | null | null | null | admin_kit/fields.py | mouryavenkat/django-admin-kit | d8a95967768c425021a3c864453a1f526f72f659 | [
"MIT"
] | null | null | null | admin_kit/fields.py | mouryavenkat/django-admin-kit | d8a95967768c425021a3c864453a1f526f72f659 | [
"MIT"
] | null | null | null | """
Admin Kit Fields module
"""
import json
from django import forms
from .widgets import SelectWidget, SelectMultipleWidget
__all__ = ['BaseField', 'MultiSelectField', 'SelectField']
class BaseField(forms.Field):
"""
The Base Field for form fields
"""
def __init__(self, kit_config=None, ajax_source=None, ajax_target=None,
ajax_subscribe=None, default_value=None, *args, **kwargs):
"""
kit_config :: dict
The config map containing the parameters and their values
ajax_source :: str
The source value from which the values are retrieved
ajax_target :: str
The target value to which the values will be filled to
ajax_subscribe :: bool
If True, then with every change in ``ajax_target``,
it fills corresponding ``ajax_source``
"""
self.ajax_source = ajax_source
self.ajax_target = ajax_target
self.ajax_subscribe = ajax_subscribe
self.default_value = default_value
self.kit_config = dict()
if kit_config:
self.kit_config = kit_config
super(BaseField, self).__init__(*args, **kwargs)
def widget_attrs(self, widget):
"""
This will add ``data-kit-config`` attibute to the widget
"""
attrs = super(BaseField, self).widget_attrs(widget)
kit_config = self.kit_config.copy()
if self.ajax_source:
kit_config['ajax-source'] = self.ajax_source
if self.ajax_target:
kit_config['ajax-target'] = self.ajax_target
if self.ajax_subscribe:
kit_config['ajax-subscribe'] = self.ajax_subscribe
if self.default_value :
kit_config['default_value'] = self.default_value
attrs['data-kit-config'] = json.dumps(kit_config)
return attrs
class MultiSelectField(BaseField):
"""
This field is used to create MultiSelect Form fields.
"""
widget = SelectMultipleWidget
def __init__(self, seperator=',', choices=(), *args, **kwargs):
self.seperator = seperator
if 'coerce' in kwargs:
self._coerce = kwargs.pop('coerce')
super(MultiSelectField, self).__init__(*args, **kwargs)
self.choices = choices or [['', '']]
self.widget.choices = self.choices
def prepare_value(self, value):
value = super(MultiSelectField, self).prepare_value(value)
if hasattr(self, '_coerce'):
value = self._coerce(value)
if isinstance(value, list):
return value
if isinstance(value, str):
return value.split(self.seperator)
return value
def to_python(self, value):
value = super(MultiSelectField, self).to_python(value)
if isinstance(value, str):
return list(map(str.strip, value.split(self.seperator)))
return value
class SelectField(BaseField):
"""
This field is used to create MultiSelect Form fields.
"""
widget = SelectWidget
def __init__(self, choices=(), *args, **kwargs):
"""
Intializes SelectField
"""
if 'coerce' in kwargs:
kwargs.pop('coerce')
super(SelectField, self).__init__(*args, **kwargs)
self.choices = choices or [['', '']]
self.widget.choices = self.choices
| 30.405405 | 75 | 0.614519 |
import json
from django import forms
from .widgets import SelectWidget, SelectMultipleWidget
__all__ = ['BaseField', 'MultiSelectField', 'SelectField']
class BaseField(forms.Field):
def __init__(self, kit_config=None, ajax_source=None, ajax_target=None,
ajax_subscribe=None, default_value=None, *args, **kwargs):
self.ajax_source = ajax_source
self.ajax_target = ajax_target
self.ajax_subscribe = ajax_subscribe
self.default_value = default_value
self.kit_config = dict()
if kit_config:
self.kit_config = kit_config
super(BaseField, self).__init__(*args, **kwargs)
def widget_attrs(self, widget):
attrs = super(BaseField, self).widget_attrs(widget)
kit_config = self.kit_config.copy()
if self.ajax_source:
kit_config['ajax-source'] = self.ajax_source
if self.ajax_target:
kit_config['ajax-target'] = self.ajax_target
if self.ajax_subscribe:
kit_config['ajax-subscribe'] = self.ajax_subscribe
if self.default_value :
kit_config['default_value'] = self.default_value
attrs['data-kit-config'] = json.dumps(kit_config)
return attrs
class MultiSelectField(BaseField):
widget = SelectMultipleWidget
def __init__(self, seperator=',', choices=(), *args, **kwargs):
self.seperator = seperator
if 'coerce' in kwargs:
self._coerce = kwargs.pop('coerce')
super(MultiSelectField, self).__init__(*args, **kwargs)
self.choices = choices or [['', '']]
self.widget.choices = self.choices
def prepare_value(self, value):
value = super(MultiSelectField, self).prepare_value(value)
if hasattr(self, '_coerce'):
value = self._coerce(value)
if isinstance(value, list):
return value
if isinstance(value, str):
return value.split(self.seperator)
return value
def to_python(self, value):
value = super(MultiSelectField, self).to_python(value)
if isinstance(value, str):
return list(map(str.strip, value.split(self.seperator)))
return value
class SelectField(BaseField):
widget = SelectWidget
def __init__(self, choices=(), *args, **kwargs):
if 'coerce' in kwargs:
kwargs.pop('coerce')
super(SelectField, self).__init__(*args, **kwargs)
self.choices = choices or [['', '']]
self.widget.choices = self.choices
| true | true |
f7f7a7f87818cb6fde7688392f6d9e35e32414b9 | 1,246 | py | Python | mayan/apps/tags/migrations/0002_tag_selection.py | sophiawa/Mayan-EDMS | 42f20576d0c690b645a60bf53c5169cda4264231 | [
"Apache-2.0"
] | null | null | null | mayan/apps/tags/migrations/0002_tag_selection.py | sophiawa/Mayan-EDMS | 42f20576d0c690b645a60bf53c5169cda4264231 | [
"Apache-2.0"
] | 10 | 2021-03-19T23:48:12.000Z | 2022-03-12T00:41:49.000Z | mayan/apps/tags/migrations/0002_tag_selection.py | sophiawa/Mayan-EDMS | 42f20576d0c690b645a60bf53c5169cda4264231 | [
"Apache-2.0"
] | 1 | 2020-12-17T02:35:09.000Z | 2020-12-17T02:35:09.000Z | from django.db import migrations
import colorful.fields
COLOR_BLUE = 'blu'
COLOR_CORAL = 'crl'
COLOR_CYAN = 'cya'
COLOR_GREENYELLOW = 'gry'
COLOR_KHAKI = 'kki'
COLOR_LIGHTGREY = 'lig'
COLOR_MAGENTA = 'mag'
COLOR_ORANGE = 'org'
COLOR_RED = 'red'
COLOR_YELLOW = 'yel'
RGB_VALUES = {
COLOR_BLUE: '#0000ff',
COLOR_CORAL: '#ff7f50',
COLOR_CYAN: '#00ffff',
COLOR_GREENYELLOW: '#adff2f',
COLOR_KHAKI: '#f0e68c',
COLOR_LIGHTGREY: '#d3d3d3',
COLOR_MAGENTA: '#ff00ff',
COLOR_ORANGE: '#ffa500',
COLOR_RED: '#ff0000',
COLOR_YELLOW: '#ffff00',
}
def operation_convert_color_names_to_rgb(apps, schema_editor):
Tag = apps.get_model(app_label='tags', model_name='Tag')
for tag in Tag.objects.using(schema_editor.connection.alias).all():
tag.selection = RGB_VALUES[tag.color]
tag.save()
class Migration(migrations.Migration):
dependencies = [
('tags', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tag',
name='selection',
field=colorful.fields.RGBColorField(default='#FFFFFF'),
preserve_default=False,
),
migrations.RunPython(code=operation_convert_color_names_to_rgb),
]
| 23.509434 | 72 | 0.655698 | from django.db import migrations
import colorful.fields
COLOR_BLUE = 'blu'
COLOR_CORAL = 'crl'
COLOR_CYAN = 'cya'
COLOR_GREENYELLOW = 'gry'
COLOR_KHAKI = 'kki'
COLOR_LIGHTGREY = 'lig'
COLOR_MAGENTA = 'mag'
COLOR_ORANGE = 'org'
COLOR_RED = 'red'
COLOR_YELLOW = 'yel'
RGB_VALUES = {
COLOR_BLUE: '#0000ff',
COLOR_CORAL: '#ff7f50',
COLOR_CYAN: '#00ffff',
COLOR_GREENYELLOW: '#adff2f',
COLOR_KHAKI: '#f0e68c',
COLOR_LIGHTGREY: '#d3d3d3',
COLOR_MAGENTA: '#ff00ff',
COLOR_ORANGE: '#ffa500',
COLOR_RED: '#ff0000',
COLOR_YELLOW: '#ffff00',
}
def operation_convert_color_names_to_rgb(apps, schema_editor):
Tag = apps.get_model(app_label='tags', model_name='Tag')
for tag in Tag.objects.using(schema_editor.connection.alias).all():
tag.selection = RGB_VALUES[tag.color]
tag.save()
class Migration(migrations.Migration):
dependencies = [
('tags', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tag',
name='selection',
field=colorful.fields.RGBColorField(default='#FFFFFF'),
preserve_default=False,
),
migrations.RunPython(code=operation_convert_color_names_to_rgb),
]
| true | true |
f7f7a9a700223d6ac211496fd0831a78e185f259 | 21,098 | py | Python | main.py | Milenium-Cr/txt-casino | 2fd2072bb91a10f0589686de56d09dce1ba3d27a | [
"MIT"
] | 1 | 2020-09-08T15:45:39.000Z | 2020-09-08T15:45:39.000Z | main.py | Milenium-Cr/txt-casino | 2fd2072bb91a10f0589686de56d09dce1ba3d27a | [
"MIT"
] | null | null | null | main.py | Milenium-Cr/txt-casino | 2fd2072bb91a10f0589686de56d09dce1ba3d27a | [
"MIT"
] | 3 | 2020-08-28T15:06:40.000Z | 2021-04-24T14:33:29.000Z | import pickle
import os.path
from random import randint, choice
"""
* Хранилище в файле
* В настройки "автосохранение"
* Статус банка и хранилища в файле
"""
lvlupcost = [25, 30, 35, 40]
statesfile = "playerinfo.data"
checkfile = os.path.exists(f'{statesfile}')
if checkfile:
with open(statesfile, 'rb') as f:
loadeddata = pickle.load(f)
autoload = loadeddata["autoload"]
class Player:
def __init__(self):
# стартовая сумма денег
self.startmoney = 35
# "общие" переменные
self.money = self.startmoney
self.winstreak = 0
self.bet = 0
self.rate = 0
# банк (142-194)
self.bankcoins = 0
self.bankstatus = "НЕ ПОСТРОЕН"
# настройки
self.autoloadstat = "ВЫКЛ"
# для сохранения/загрузки прогресса (120-135)
self.state = {}
# супер-юзер (102-107)
self.gm = 0
self.status = ""
# переменные для "монеточки" (31-100)
self.coin = 0
self.buyHelp = None
self.CFgames = []
self.CFlist_status = ""
# переменные для "камень ножницы бумага"
self.kanobubot = 0
self.KNBgames = []
self.KNBlist_status = ""
# хранилище
self.storagedmoney = 0
self.putmoney = 0
self.storagelimit = 25
self.storagestatus = "НЕ ПОСТРОЕН"
self.storagelvl = 1
self.storagelvlup = 40
def coinflip(self):
print("Коинфлип (или же монеточка) - все зависит от вашей удачи.")
print("Поставьте ставку, и если вы победите, вы получите в 2 раза больше")
print("Если проиграете, вы потеряете ту сумму, которую вложили.")
print("Введите 0 для возвращения в меню.")
while True:
print("\nВаша ставка? {%s}" % self.money)
try:
self.rate = int(input(">>> "))
except ValueError:
self.rate = 0
if self.rate == 0:
break
if self.rate > self.money:
print("У вас мало денег для такой ставки!")
else:
if self.rate == self.money:
print("Видимо, вы очень смелый человек, раз решили потратить сразу все деньги...")
self.bet = int(input("\nОрел или решка? (1/2)\n>>> "))
self.coin = randint(1, 2)
if self.coin == self.bet:
self.money += self.rate
# +1 к винстрику
if self.winstreak < 0:
self.winstreak = 0
elif self.winstreak >= 0:
self.winstreak += 1
print(f"Вы победили! Ваш приз - {self.rate * 2}.\nВаш баланс - {self.money}.\n")
print(f"Ваш винстрик - {self.winstreak}")
self.CFgames.append("(+%(win)s) {Баланс %(money)s}" % {"win": self.rate, "money": self.money})
else:
print("К сожалению, вы проиграли.")
if self.winstreak >= 10:
print(f"Но так как у вас есть {self.winstreak}, вы можете потратить 10 ед., что-бы вернуть свои деньги.")
i = input("Согласны? (y/n)")
if i == 'y':
self.buyHelp = True
self.winstreak -= 10
print("Вы не потратили свои деньги.")
print(f"Ваш винстрик - {self.winstreak}")
self.CFgames.append("(Купил 2 шанс) {Баланс %s}" % self.money)
else:
self.buyHelp = False
else:
self.buyHelp = False
if not self.buyHelp:
self.money -= self.rate
print(f"Ваш баланс - {self.money} (-{self.rate})")
# обнуление/-1 к винстрику
if self.winstreak <= 0:
self.winstreak -= 1
elif self.winstreak > 0:
self.winstreak = 0
print(f"Ваш винстрик - {self.winstreak}")
self.CFgames.append("(-%(lose)s) {Баланс %(money)s}" % {"lose": self.rate, "money": self.money})
if self.money > 0:
print("Сыграем еще раз? (y/n)")
i = input(">>> ")
if i == 'n':
break
else:
print("Продолжаем!\n")
else:
print("У вас 0 денег.")
break
# coin = 0
self.bet = 0
self.rate = 0
def op(self):
self.gm = 1
self.money = 1000000
self.winstreak = 5000
self.status = "[АКТИВИРОВАНО]"
print("Super-User активирован!")
# история coinflip игр
def CFreplays(self):
if self.CFgames == []:
print("\nНеактивировано. Сыграйте 1 игру в CF.\n")
else:
self.CFlist_status = "[Активно!]"
count = 0
for j in self.CFgames:
count += 1
print(f"{count}. {j}")
def KNBreplays(self):
if self.KNBgames == []:
print("\nНеактивировано. Сыграйте 1 игру в KNB.\n")
else:
self.KNBlist_status = "[Активно!]"
count = 0
for j in self.KNBgames:
count += 1
print(f"{count}. {j}")
def savestate(self):
self.states = {
"money": self.money,
"winstreak": self.winstreak,
"coins": self.bankcoins,
"status": self.status,
"autoload": self.autoloadstat,
"bankstatus": self.bankstatus,
"storagestatus": self.storagestatus,
"storagelevel": self.storagelvl,
"storagelimit": self.storagelimit,
"storagedmoney": self.storagedmoney
}
with open(statesfile, "wb") as file:
pickle.dump(self.states, file)
print("\nПрогресс сохранен!\n")
def loadstate(self):
with open(statesfile, 'rb') as file:
loadeddata = pickle.load(file)
self.money = loadeddata["money"]
self.winstreak = loadeddata["winstreak"]
self.bankcoins = loadeddata["coins"]
self.status = loadeddata["status"]
self.autoloadstat = loadeddata["autoload"]
self.bankstatus = loadeddata["bankstatus"]
self.storagestatus = loadeddata["storagestatus"]
self.storagelvl = loadeddata["storagelevel"]
self.storagelimit = loadeddata["storagelimit"]
self.storagedmoney = loadeddata["storagedmoney"]
print("\nПрогресс загружен!\n")
def bank(self):
while True:
print("Выберите функцию:")
print("1. Обменять деньги на монеты {20 денег -> 1 монета}")
print("2. Обменять винстрик на монеты {3 винстрика -> 1 монета}")
print("3. Обменять монеты на деньги {1 монета -> 15 денег}")
print("4. Обменять монеты на винстрик {1 монета -> 3 винстрика}")
print("5. Что за банк?")
print("6. Выйти")
print(f"Запас ваших монеток в банке - {self.bankcoins}")
try:
act = int(input(">>> "))
except ValueError:
act = False
if act == 1:
if self.money >= 20:
self.money -= 20
self.bankcoins += 1
print(f"Вы купили 1 монету. {self.bankcoins}")
print(f"Ваш баланс - {self.money}")
else:
print(f"У вас мало денег для покупки монет.\nНакопите еще {25 - self.money}")
elif act == 2:
if self.winstreak >= 3:
self.winstreak -= 3
self.bankcoins += 1
print(f"Вы купили 1 монету. {{{self.backcoins}}}")
print(f"Ваш винстрик - {self.winstreak}")
else:
print(f"У вас мало винстрика для покупки монет! Накопите еще {5 - self.winstreak}")
elif act == 3:
if self.bankcoins >= 1:
self.bankcoins -= 1
self.money += 15
print(f"Вы обменяли 1 монету на 20 денег. ({self.bankcoins} монет осталось)")
print(f"Ваш баланс - {self.money}")
else:
print("Недостаточно монет.")
elif act == 4:
if self.bankcoins >= 1:
self.bankcoins -= 1
self.winstreak += 3
print(f"Вы обменяли 1 монету на 5 винстриков. ({self.bankcoins} монет осталось)")
print(f"Ваш винстрик - {self.winstreak}")
else:
print("\nНедостаточно монет\n")
elif act == 5:
print("\nЭто банк, в котором вы можете обменивать свои ресурсы на монеты. Монеты - накопительная валюта, и она будет лежать в банке бесконечно.\n")
else:
break
def settings(self):
while True:
print(f"1. Автозагрузка {self.autoloadstat}")
try:
setsettings = int(input(">>> "))
except ValueError:
setsettings = False
if setsettings == 1 and self.autoloadstat == "ВЫКЛ":
self.autoloadstat = "ВКЛ"
elif setsettings == 1 and self.autoloadstat == "ВКЛ":
self.autoloadstat = "ВЫКЛ"
else:
break
def resetprogress(self):
print("\nВы точно хотите сбросить прогресс? y/n || д/н")
act = input(">>> ")
if act == "y" or act == "д":
self.money = self.startmoney
self.winstreak = 0
self.bankcoins = 1
self.status = ""
self.CFgames = []
self.CFlist_status = ""
self.KNBgames = []
self.KNBlist_status = ""
self.storagelimit = 25
self.storagelvl = 1
self.storagedmoney = 0
self.bankstatus = "НЕ ПОСТРОЕН"
self.storagestatus = "НЕ ПОСТРОЕН"
print("Прогресс успешно сброшен.")
else:
pass
def games(self):
while True:
print("1. Коинфлип")
print("2. Камень-ножницы-бумага")
try:
act = int(input(">>> "))
except ValueError:
act = False
if act == 1:
user.coinflip()
elif act == 2:
user.kanobu()
else:
break
def kanobu(self):
print("КаНоБу (камень-ножницы-бумага).")
print("Приз увеличен до х3.")
print("При проигрыше, нельзя вернуть свои деньги.")
print("Введите 0 для выхода в меню.")
while True:
print("Какая будет ставка? {%s}" % self.money)
self.rate = int(input(">>> "))
if self.rate > self.money:
print("У вас нет таких денег!")
elif self.rate == 0:
break
else:
while True:
print("\n1. Камень")
print("2. Ножницы")
print("3. Бумага\n")
self.bet = int(input(">>> "))
if self.bet > 0 and self.bet < 4:
break
else:
print("\n\nНеверный ввод\n\n")
self.kanobubot = randint(1, 3)
if self.bet == self.kanobubot:
print("Ничья!")
print("Вы не потратили деньги и винстрик.")
self.KNBgames.append(f"НИЧЬЯ (Баланс{self.money})")
elif self.bet == 1 and \
self.kanobubot == 2 or \
self.bet == 2 and \
self.kanobubot == 3 or \
self.bet == 3 and \
self.kanobubot == 1:
self.money += self.rate * 3
# +1 к винстрику
if self.winstreak < 0:
self.winstreak = 0
elif self.winstreak >= 0:
self.winstreak += 1
print("Победа!")
print(f"Ваш баланс - {self.money} (+{self.rate * 3})")
print(f"Ваш винстрик - {self.winstreak}")
self.KNBgames.append(f"ПОБЕДА (Баланс: {self.money} (+{self.rate})")
elif self.bet == 1 and \
self.kanobubot == 3 or \
self.bet == 2 and \
self.kanobubot == 1 or \
self.bet == 3 and \
self.kanobubot == 2:
self.money -= self.rate
# обнуление/-1 к винстрику
if self.winstreak <= 0:
self.winstreak -= 1
elif self.winstreak > 0:
self.winstreak = 0
print("Вы проиграли!")
print(f"Ваш баланс - {self.money} (-{self.rate})")
print(f"Ваш винстрик - {self.winstreak}")
self.KNBgames.append(f"ПОРАЖЕНИЕ (Баланс: {self.money} (-{self.rate})")
def replays(self):
while True:
print(f"\n\n1. КоинФлип игры {self.CFlist_status}")
print(f"2. КаНоБу игры {self.KNBlist_status}")
try:
act = int(input(">>> "))
except ValueError:
act = False
if act == 1:
self.CFreplays()
elif act == 2:
self.KNBreplays()
else:
break
def storage(self):
while True:
print("1. Положить деньги")
print("2. Забрать деньги")
print("3. Прокачать хранилище")
print("4. Что за хранилище?")
print(f"Ваш баланс - {self.storagedmoney}/{self.storagelimit}")
act = int(input(">>> "))
if act == 1:
print(f"Сколько вы хотите положить? ({self.money})")
print("Введите 0 для возвращения в меню.")
try:
self.putmoney = int(input(">>> "))
except ValueError:
self.putmoney = False
if self.putmoney <= 0:
break
elif self.putmoney > self.money:
print("Мало денег для такой суммы.\n")
elif self.putmoney == 0:
break
else:
if self.storagedmoney + self.putmoney > self.storagelimit:
print("Операция отклонена.")
else:
self.money -= self.putmoney
self.storagedmoney += self.putmoney
print(f"Вы пополнили свое хранилище на {self.putmoney}.")
elif act == 2:
while True:
print("Сколько денег вы хотите забрать?")
print(f"Ваш баланс - {self.money}")
print(f"Баланс в хранилище - {self.storagedmoney}")
print("Введите 0 для возвразещения в меню.")
try:
self.putmoney = int(input(">>> "))
except ValueError:
self.putmoney = False
if self.storagelimit - self.putmoney < 0:
print("Операция отменена.")
elif self.putmoney > self.storagedmoney:
print("У вас мало денег в хранилище для забирания такой суммы.")
elif self.putmoney == 0:
break
else:
self.money += self.putmoney
self.storagedmoney -= self.putmoney
print(f"Вы пополнили свой баланс на {self.putmoney}")
print(f"Ваш баланс - {self.money}")
print(f"Баланс в хранилище - {self.storagedmoney}")
break
elif act == 3:
print(f"Уровень хранилища - {self.storagelvl}")
print(f"Повышение уровня стоит {self.storagelvlup} денег.")
print(f"Ваш баланс - {self.money}")
print("Будете повышать уровень? д/н")
act = input(">>> ")
if act == 'д':
if self.money >= self.storagelvlup:
self.money -= self.storagelvlup
self.storagelvl += 1
self.storagelimit += 15
self.storagelvlup += choice(lvlupcost)
print(f"Вы повысили уровень хранилища до {self.storagelvl}")
print(f"Лимит хранлища - {self.storagelimit} (+15)")
print(f"Ваш баланс - {self.money}")
else:
print("Мало денег.")
elif act == 'н':
pass
elif act == 4:
print("В хранилище вы можете положить свои деньги, и они никуда не пропадут.")
print("Так же, вы можете увеличивать лимит хранилища, покупая улучшения.")
print(f"Покупая улучшение, цена поднимается на случайное число от {lvlupcost[0]} до {lvlupcost[3]}.")
print(f"Уровень вашего хранилища - {self.storagelvl}.")
else:
break
def buildings(self):
while True:
print(f"\n1. Банк {self.bankstatus}")
print(f"2. Хранилище {self.storagestatus}")
try:
act = int(input(">>> "))
except ValueError:
act = False
if act == 1 and self.bankstatus == "НЕ ПОСТРОЕН":
print("Постройка банка стоит 75 денег.")
print("Построить? д/н")
act = input(">>> ")
if act == 'д' and self.money >= 75:
print("Банк построен. Теперь вы можете им пользоватся!")
self.bankstatus = ''
elif act == 'д' and self.money < 75:
print("Нехватает денег.")
else:
pass
elif act == 2 and self.storagestatus == "НЕ ПОСТРОЕН":
print("Постройка хранилища стоит 250 денег.")
print("Построить? д/н")
act = input(">>> ")
if act == 'д' and self.money >= 250:
print("Хранилище построено. Теперь вы можете им пользоватся!")
self.storagestatus = ""
elif act == 'д' and self.money < 250:
print("Нехватает денег.")
else:
pass
elif act == 1 and self.bankstatus == "":
self.bank()
elif act == 2 and self.storagestatus == "":
self.storage()
else:
break
def info(self):
print(f"\nБаланс - {user.money}")
print(f"Винстрик - {user.winstreak}")
if user.bankstatus == "":
print(f"Монеты в банке - {user.bankcoins}\n")
if user.storagestatus == "":
print(f"Баланс в хранилище - {user.storagedmoney}")
print()
user = Player()
if checkfile and autoload == "ВКЛ":
user.loadstate()
while True:
print("Выберите функцию")
print("1. Игры")
print("2. Права супер-юзера %s" % user.status)
print("3. История игр")
print("4. Сохранить прогресс")
print("5. Загрузить прогресс")
print("6. Постройки")
print("7. Настройки")
print("8. Баланс игрока")
print("9. Сбросить прогресс")
try:
act = int(input(">>> "))
except ValueError:
act = False
if act == 1:
user.games()
elif act == 2:
user.op()
elif act == 3:
user.replays()
elif act == 4:
user.savestate()
elif act == 5:
user.loadstate()
elif act == 6:
user.buildings()
elif act == 7:
user.settings()
elif act == 8:
user.info()
elif act == 9:
user.resetprogress()
else:
break
| 36.250859 | 163 | 0.45227 | import pickle
import os.path
from random import randint, choice
"""
* Хранилище в файле
* В настройки "автосохранение"
* Статус банка и хранилища в файле
"""
lvlupcost = [25, 30, 35, 40]
statesfile = "playerinfo.data"
checkfile = os.path.exists(f'{statesfile}')
if checkfile:
with open(statesfile, 'rb') as f:
loadeddata = pickle.load(f)
autoload = loadeddata["autoload"]
class Player:
def __init__(self):
self.startmoney = 35
self.money = self.startmoney
self.winstreak = 0
self.bet = 0
self.rate = 0
self.bankcoins = 0
self.bankstatus = "НЕ ПОСТРОЕН"
self.autoloadstat = "ВЫКЛ"
self.state = {}
self.gm = 0
self.status = ""
self.coin = 0
self.buyHelp = None
self.CFgames = []
self.CFlist_status = ""
self.kanobubot = 0
self.KNBgames = []
self.KNBlist_status = ""
self.storagedmoney = 0
self.putmoney = 0
self.storagelimit = 25
self.storagestatus = "НЕ ПОСТРОЕН"
self.storagelvl = 1
self.storagelvlup = 40
def coinflip(self):
print("Коинфлип (или же монеточка) - все зависит от вашей удачи.")
print("Поставьте ставку, и если вы победите, вы получите в 2 раза больше")
print("Если проиграете, вы потеряете ту сумму, которую вложили.")
print("Введите 0 для возвращения в меню.")
while True:
print("\nВаша ставка? {%s}" % self.money)
try:
self.rate = int(input(">>> "))
except ValueError:
self.rate = 0
if self.rate == 0:
break
if self.rate > self.money:
print("У вас мало денег для такой ставки!")
else:
if self.rate == self.money:
print("Видимо, вы очень смелый человек, раз решили потратить сразу все деньги...")
self.bet = int(input("\nОрел или решка? (1/2)\n>>> "))
self.coin = randint(1, 2)
if self.coin == self.bet:
self.money += self.rate
if self.winstreak < 0:
self.winstreak = 0
elif self.winstreak >= 0:
self.winstreak += 1
print(f"Вы победили! Ваш приз - {self.rate * 2}.\nВаш баланс - {self.money}.\n")
print(f"Ваш винстрик - {self.winstreak}")
self.CFgames.append("(+%(win)s) {Баланс %(money)s}" % {"win": self.rate, "money": self.money})
else:
print("К сожалению, вы проиграли.")
if self.winstreak >= 10:
print(f"Но так как у вас есть {self.winstreak}, вы можете потратить 10 ед., что-бы вернуть свои деньги.")
i = input("Согласны? (y/n)")
if i == 'y':
self.buyHelp = True
self.winstreak -= 10
print("Вы не потратили свои деньги.")
print(f"Ваш винстрик - {self.winstreak}")
self.CFgames.append("(Купил 2 шанс) {Баланс %s}" % self.money)
else:
self.buyHelp = False
else:
self.buyHelp = False
if not self.buyHelp:
self.money -= self.rate
print(f"Ваш баланс - {self.money} (-{self.rate})")
if self.winstreak <= 0:
self.winstreak -= 1
elif self.winstreak > 0:
self.winstreak = 0
print(f"Ваш винстрик - {self.winstreak}")
self.CFgames.append("(-%(lose)s) {Баланс %(money)s}" % {"lose": self.rate, "money": self.money})
if self.money > 0:
print("Сыграем еще раз? (y/n)")
i = input(">>> ")
if i == 'n':
break
else:
print("Продолжаем!\n")
else:
print("У вас 0 денег.")
break
self.bet = 0
self.rate = 0
def op(self):
self.gm = 1
self.money = 1000000
self.winstreak = 5000
self.status = "[АКТИВИРОВАНО]"
print("Super-User активирован!")
def CFreplays(self):
if self.CFgames == []:
print("\nНеактивировано. Сыграйте 1 игру в CF.\n")
else:
self.CFlist_status = "[Активно!]"
count = 0
for j in self.CFgames:
count += 1
print(f"{count}. {j}")
def KNBreplays(self):
if self.KNBgames == []:
print("\nНеактивировано. Сыграйте 1 игру в KNB.\n")
else:
self.KNBlist_status = "[Активно!]"
count = 0
for j in self.KNBgames:
count += 1
print(f"{count}. {j}")
def savestate(self):
self.states = {
"money": self.money,
"winstreak": self.winstreak,
"coins": self.bankcoins,
"status": self.status,
"autoload": self.autoloadstat,
"bankstatus": self.bankstatus,
"storagestatus": self.storagestatus,
"storagelevel": self.storagelvl,
"storagelimit": self.storagelimit,
"storagedmoney": self.storagedmoney
}
with open(statesfile, "wb") as file:
pickle.dump(self.states, file)
print("\nПрогресс сохранен!\n")
def loadstate(self):
with open(statesfile, 'rb') as file:
loadeddata = pickle.load(file)
self.money = loadeddata["money"]
self.winstreak = loadeddata["winstreak"]
self.bankcoins = loadeddata["coins"]
self.status = loadeddata["status"]
self.autoloadstat = loadeddata["autoload"]
self.bankstatus = loadeddata["bankstatus"]
self.storagestatus = loadeddata["storagestatus"]
self.storagelvl = loadeddata["storagelevel"]
self.storagelimit = loadeddata["storagelimit"]
self.storagedmoney = loadeddata["storagedmoney"]
print("\nПрогресс загружен!\n")
def bank(self):
while True:
print("Выберите функцию:")
print("1. Обменять деньги на монеты {20 денег -> 1 монета}")
print("2. Обменять винстрик на монеты {3 винстрика -> 1 монета}")
print("3. Обменять монеты на деньги {1 монета -> 15 денег}")
print("4. Обменять монеты на винстрик {1 монета -> 3 винстрика}")
print("5. Что за банк?")
print("6. Выйти")
print(f"Запас ваших монеток в банке - {self.bankcoins}")
try:
act = int(input(">>> "))
except ValueError:
act = False
if act == 1:
if self.money >= 20:
self.money -= 20
self.bankcoins += 1
print(f"Вы купили 1 монету. {self.bankcoins}")
print(f"Ваш баланс - {self.money}")
else:
print(f"У вас мало денег для покупки монет.\nНакопите еще {25 - self.money}")
elif act == 2:
if self.winstreak >= 3:
self.winstreak -= 3
self.bankcoins += 1
print(f"Вы купили 1 монету. {{{self.backcoins}}}")
print(f"Ваш винстрик - {self.winstreak}")
else:
print(f"У вас мало винстрика для покупки монет! Накопите еще {5 - self.winstreak}")
elif act == 3:
if self.bankcoins >= 1:
self.bankcoins -= 1
self.money += 15
print(f"Вы обменяли 1 монету на 20 денег. ({self.bankcoins} монет осталось)")
print(f"Ваш баланс - {self.money}")
else:
print("Недостаточно монет.")
elif act == 4:
if self.bankcoins >= 1:
self.bankcoins -= 1
self.winstreak += 3
print(f"Вы обменяли 1 монету на 5 винстриков. ({self.bankcoins} монет осталось)")
print(f"Ваш винстрик - {self.winstreak}")
else:
print("\nНедостаточно монет\n")
elif act == 5:
print("\nЭто банк, в котором вы можете обменивать свои ресурсы на монеты. Монеты - накопительная валюта, и она будет лежать в банке бесконечно.\n")
else:
break
def settings(self):
while True:
print(f"1. Автозагрузка {self.autoloadstat}")
try:
setsettings = int(input(">>> "))
except ValueError:
setsettings = False
if setsettings == 1 and self.autoloadstat == "ВЫКЛ":
self.autoloadstat = "ВКЛ"
elif setsettings == 1 and self.autoloadstat == "ВКЛ":
self.autoloadstat = "ВЫКЛ"
else:
break
def resetprogress(self):
print("\nВы точно хотите сбросить прогресс? y/n || д/н")
act = input(">>> ")
if act == "y" or act == "д":
self.money = self.startmoney
self.winstreak = 0
self.bankcoins = 1
self.status = ""
self.CFgames = []
self.CFlist_status = ""
self.KNBgames = []
self.KNBlist_status = ""
self.storagelimit = 25
self.storagelvl = 1
self.storagedmoney = 0
self.bankstatus = "НЕ ПОСТРОЕН"
self.storagestatus = "НЕ ПОСТРОЕН"
print("Прогресс успешно сброшен.")
else:
pass
def games(self):
while True:
print("1. Коинфлип")
print("2. Камень-ножницы-бумага")
try:
act = int(input(">>> "))
except ValueError:
act = False
if act == 1:
user.coinflip()
elif act == 2:
user.kanobu()
else:
break
def kanobu(self):
print("КаНоБу (камень-ножницы-бумага).")
print("Приз увеличен до х3.")
print("При проигрыше, нельзя вернуть свои деньги.")
print("Введите 0 для выхода в меню.")
while True:
print("Какая будет ставка? {%s}" % self.money)
self.rate = int(input(">>> "))
if self.rate > self.money:
print("У вас нет таких денег!")
elif self.rate == 0:
break
else:
while True:
print("\n1. Камень")
print("2. Ножницы")
print("3. Бумага\n")
self.bet = int(input(">>> "))
if self.bet > 0 and self.bet < 4:
break
else:
print("\n\nНеверный ввод\n\n")
self.kanobubot = randint(1, 3)
if self.bet == self.kanobubot:
print("Ничья!")
print("Вы не потратили деньги и винстрик.")
self.KNBgames.append(f"НИЧЬЯ (Баланс{self.money})")
elif self.bet == 1 and \
self.kanobubot == 2 or \
self.bet == 2 and \
self.kanobubot == 3 or \
self.bet == 3 and \
self.kanobubot == 1:
self.money += self.rate * 3
if self.winstreak < 0:
self.winstreak = 0
elif self.winstreak >= 0:
self.winstreak += 1
print("Победа!")
print(f"Ваш баланс - {self.money} (+{self.rate * 3})")
print(f"Ваш винстрик - {self.winstreak}")
self.KNBgames.append(f"ПОБЕДА (Баланс: {self.money} (+{self.rate})")
elif self.bet == 1 and \
self.kanobubot == 3 or \
self.bet == 2 and \
self.kanobubot == 1 or \
self.bet == 3 and \
self.kanobubot == 2:
self.money -= self.rate
if self.winstreak <= 0:
self.winstreak -= 1
elif self.winstreak > 0:
self.winstreak = 0
print("Вы проиграли!")
print(f"Ваш баланс - {self.money} (-{self.rate})")
print(f"Ваш винстрик - {self.winstreak}")
self.KNBgames.append(f"ПОРАЖЕНИЕ (Баланс: {self.money} (-{self.rate})")
def replays(self):
while True:
print(f"\n\n1. КоинФлип игры {self.CFlist_status}")
print(f"2. КаНоБу игры {self.KNBlist_status}")
try:
act = int(input(">>> "))
except ValueError:
act = False
if act == 1:
self.CFreplays()
elif act == 2:
self.KNBreplays()
else:
break
def storage(self):
while True:
print("1. Положить деньги")
print("2. Забрать деньги")
print("3. Прокачать хранилище")
print("4. Что за хранилище?")
print(f"Ваш баланс - {self.storagedmoney}/{self.storagelimit}")
act = int(input(">>> "))
if act == 1:
print(f"Сколько вы хотите положить? ({self.money})")
print("Введите 0 для возвращения в меню.")
try:
self.putmoney = int(input(">>> "))
except ValueError:
self.putmoney = False
if self.putmoney <= 0:
break
elif self.putmoney > self.money:
print("Мало денег для такой суммы.\n")
elif self.putmoney == 0:
break
else:
if self.storagedmoney + self.putmoney > self.storagelimit:
print("Операция отклонена.")
else:
self.money -= self.putmoney
self.storagedmoney += self.putmoney
print(f"Вы пополнили свое хранилище на {self.putmoney}.")
elif act == 2:
while True:
print("Сколько денег вы хотите забрать?")
print(f"Ваш баланс - {self.money}")
print(f"Баланс в хранилище - {self.storagedmoney}")
print("Введите 0 для возвразещения в меню.")
try:
self.putmoney = int(input(">>> "))
except ValueError:
self.putmoney = False
if self.storagelimit - self.putmoney < 0:
print("Операция отменена.")
elif self.putmoney > self.storagedmoney:
print("У вас мало денег в хранилище для забирания такой суммы.")
elif self.putmoney == 0:
break
else:
self.money += self.putmoney
self.storagedmoney -= self.putmoney
print(f"Вы пополнили свой баланс на {self.putmoney}")
print(f"Ваш баланс - {self.money}")
print(f"Баланс в хранилище - {self.storagedmoney}")
break
elif act == 3:
print(f"Уровень хранилища - {self.storagelvl}")
print(f"Повышение уровня стоит {self.storagelvlup} денег.")
print(f"Ваш баланс - {self.money}")
print("Будете повышать уровень? д/н")
act = input(">>> ")
if act == 'д':
if self.money >= self.storagelvlup:
self.money -= self.storagelvlup
self.storagelvl += 1
self.storagelimit += 15
self.storagelvlup += choice(lvlupcost)
print(f"Вы повысили уровень хранилища до {self.storagelvl}")
print(f"Лимит хранлища - {self.storagelimit} (+15)")
print(f"Ваш баланс - {self.money}")
else:
print("Мало денег.")
elif act == 'н':
pass
elif act == 4:
print("В хранилище вы можете положить свои деньги, и они никуда не пропадут.")
print("Так же, вы можете увеличивать лимит хранилища, покупая улучшения.")
print(f"Покупая улучшение, цена поднимается на случайное число от {lvlupcost[0]} до {lvlupcost[3]}.")
print(f"Уровень вашего хранилища - {self.storagelvl}.")
else:
break
def buildings(self):
while True:
print(f"\n1. Банк {self.bankstatus}")
print(f"2. Хранилище {self.storagestatus}")
try:
act = int(input(">>> "))
except ValueError:
act = False
if act == 1 and self.bankstatus == "НЕ ПОСТРОЕН":
print("Постройка банка стоит 75 денег.")
print("Построить? д/н")
act = input(">>> ")
if act == 'д' and self.money >= 75:
print("Банк построен. Теперь вы можете им пользоватся!")
self.bankstatus = ''
elif act == 'д' and self.money < 75:
print("Нехватает денег.")
else:
pass
elif act == 2 and self.storagestatus == "НЕ ПОСТРОЕН":
print("Постройка хранилища стоит 250 денег.")
print("Построить? д/н")
act = input(">>> ")
if act == 'д' and self.money >= 250:
print("Хранилище построено. Теперь вы можете им пользоватся!")
self.storagestatus = ""
elif act == 'д' and self.money < 250:
print("Нехватает денег.")
else:
pass
elif act == 1 and self.bankstatus == "":
self.bank()
elif act == 2 and self.storagestatus == "":
self.storage()
else:
break
def info(self):
print(f"\nБаланс - {user.money}")
print(f"Винстрик - {user.winstreak}")
if user.bankstatus == "":
print(f"Монеты в банке - {user.bankcoins}\n")
if user.storagestatus == "":
print(f"Баланс в хранилище - {user.storagedmoney}")
print()
user = Player()
if checkfile and autoload == "ВКЛ":
user.loadstate()
while True:
print("Выберите функцию")
print("1. Игры")
print("2. Права супер-юзера %s" % user.status)
print("3. История игр")
print("4. Сохранить прогресс")
print("5. Загрузить прогресс")
print("6. Постройки")
print("7. Настройки")
print("8. Баланс игрока")
print("9. Сбросить прогресс")
try:
act = int(input(">>> "))
except ValueError:
act = False
if act == 1:
user.games()
elif act == 2:
user.op()
elif act == 3:
user.replays()
elif act == 4:
user.savestate()
elif act == 5:
user.loadstate()
elif act == 6:
user.buildings()
elif act == 7:
user.settings()
elif act == 8:
user.info()
elif act == 9:
user.resetprogress()
else:
break
| false | true |
f7f7aa0f1bc1ebfeadf551d5ef1d5c3352afdd11 | 26,515 | py | Python | kartothek/core/common_metadata.py | jorisvandenbossche/kartothek | 18b11e7b060bb778668ffc4e2f468910120e6385 | [
"MIT"
] | 2 | 2019-05-29T09:45:20.000Z | 2019-06-24T19:06:46.000Z | kartothek/core/common_metadata.py | jorisvandenbossche/kartothek | 18b11e7b060bb778668ffc4e2f468910120e6385 | [
"MIT"
] | 18 | 2019-11-15T15:33:53.000Z | 2022-03-04T02:08:18.000Z | kartothek/core/common_metadata.py | jorisvandenbossche/kartothek | 18b11e7b060bb778668ffc4e2f468910120e6385 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import difflib
import logging
import pprint
from copy import copy, deepcopy
from functools import reduce
from typing import Set, Union
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import simplejson
from simplekv import KeyValueStore
from kartothek.core import naming
from kartothek.core._compat import load_json
from kartothek.core.utils import ensure_string_type
_logger = logging.getLogger()
class SchemaWrapper:
"""
Wrapper object for pyarrow.Schema to handle forwards and backwards compatibility.
"""
def __init__(self, schema, origin: Union[str, Set[str]]):
if isinstance(origin, str):
origin = {origin}
elif isinstance(origin, set):
origin = copy(origin)
if not all(isinstance(s, str) for s in origin):
raise TypeError("Schema origin elements must be strings.")
self.__schema = schema
self.__origin = origin
self._schema_compat()
def with_origin(self, origin: Union[str, Set[str]]) -> "SchemaWrapper":
"""
Create new SchemaWrapper with given origin.
Parameters
----------
origin:
New origin.
Returns
-------
schema:
New schema.
"""
return SchemaWrapper(self.__schema, origin)
def _schema_compat(self):
# https://issues.apache.org/jira/browse/ARROW-5104
schema = self.__schema
if self.__schema is not None and self.__schema.pandas_metadata is not None:
pandas_metadata = schema.pandas_metadata
index_cols = pandas_metadata["index_columns"]
if len(index_cols) > 1:
raise NotImplementedError("Treatement of MultiIndex not implemented.")
for ix, col in enumerate(index_cols):
# Range index is now serialized using start/end information. This special treatment
# removes it from the columns which is fine
if isinstance(col, dict):
pass
# other indices are still tracked as a column
else:
index_level_ix = schema.get_field_index(col)
# this may happen for the schema of an empty df
if index_level_ix >= 0:
schema = schema.remove(index_level_ix)
schema = schema.remove_metadata()
md = {b"pandas": _dict_to_binary(pandas_metadata)}
schema = schema.with_metadata(md)
self.__schema = schema
def internal(self):
return self.__schema
@property
def origin(self) -> Set[str]:
return copy(self.__origin)
def __repr__(self):
return self.__schema.__repr__()
def __eq__(self, other):
return self.equals(other)
def __ne__(self, other):
return not self.equals(other)
def __getstate__(self):
return (_schema2bytes(self.__schema), self.__origin)
def __setstate__(self, state):
self.__schema = _bytes2schema(state[0])
self.__origin = state[1]
def __getattr__(self, attr):
return getattr(self.__schema, attr)
def __hash__(self):
# see https://issues.apache.org/jira/browse/ARROW-2719
return hash(_schema2bytes(self.__schema))
def __getitem__(self, i):
return self.__schema[i]
def __len__(self):
return len(self.__schema)
def equals(self, other, check_metadata=False):
if isinstance(other, SchemaWrapper):
return self.__schema.equals(other.__schema, check_metadata)
else:
return self.__schema.equals(other, check_metadata)
equals.__doc__ = pa.Schema.equals.__doc__
def remove(self, i):
return SchemaWrapper(self.__schema.remove(i), self.__origin)
remove.__doc__ = pa.Schema.set.__doc__
def remove_metadata(self):
return SchemaWrapper(
self.__schema.remove_metadata(),
{s + "__no_metadata" for s in self.__origin},
)
remove_metadata.__doc__ = pa.Schema.remove_metadata.__doc__
def set(self, i, field):
return SchemaWrapper(self.__schema.set(i, field), self.__origin)
set.__doc__ = pa.Schema.set.__doc__
def normalize_column_order(schema, partition_keys=None):
"""
Normalize column order in schema.
Columns are sorted in the following way:
1. Partition keys (as provided by ``partition_keys``)
2. DataFrame columns in alphabetic order
3. Remaining fields as generated by pyarrow, mostly index columns
Parameters
----------
schema: SchemaWrapper
Schema information for DataFrame.
partition_keys: Union[None, List[str]]
Partition keys used to split the dataset.
Returns
-------
schema: SchemaWrapper
Schema information for DataFrame.
"""
if not isinstance(schema, SchemaWrapper):
schema = SchemaWrapper(schema, "__unknown__")
if partition_keys is None:
partition_keys = []
else:
partition_keys = list(partition_keys)
pandas_metadata = schema.pandas_metadata
origin = schema.origin
cols_partition = {}
cols_payload = []
cols_misc = []
for cmd in pandas_metadata["columns"]:
name = cmd.get("name")
field_name = cmd["field_name"]
field_idx = schema.get_field_index(field_name)
if field_idx >= 0:
field = schema[field_idx]
else:
field = None
if name is None:
cols_misc.append((cmd, field))
elif name in partition_keys:
cols_partition[name] = (cmd, field)
else:
cols_payload.append((name, cmd, field))
ordered = []
for k in partition_keys:
if k in cols_partition:
ordered.append(cols_partition[k])
ordered += [(cmd, f) for _name, cmd, f in sorted(cols_payload, key=lambda x: x[0])]
ordered += cols_misc
pandas_metadata["columns"] = [cmd for cmd, _ in ordered]
fields = [f for _, f in ordered if f is not None]
metadata = schema.metadata
metadata[b"pandas"] = _dict_to_binary(pandas_metadata)
schema = pa.schema(fields, metadata)
return SchemaWrapper(schema, origin)
def make_meta(obj, origin, partition_keys=None):
"""
Create metadata object for DataFrame.
.. note::
This function can, for convenience reasons, also be applied to schema objects in which case they are just
returned.
.. warning::
Information for categoricals will be stripped!
:meth:`normalize_type` will be applied to normalize type information and :meth:`normalize_column_order` will be
applied to to reorder column information.
Parameters
----------
obj: Union[DataFrame, Schema]
Object to extract metadata from.
origin: str
Origin of the schema data, used for debugging and error reporting.
partition_keys: Union[None, List[str]]
Partition keys used to split the dataset.
Returns
-------
schema: SchemaWrapper
Schema information for DataFrame.
"""
if isinstance(obj, SchemaWrapper):
return obj
if isinstance(obj, pa.Schema):
return normalize_column_order(
SchemaWrapper(obj, origin), partition_keys=partition_keys
)
if not isinstance(obj, pd.DataFrame):
raise ValueError("Input must be a pyarrow schema, or a pandas dataframe")
schema = pa.Schema.from_pandas(obj)
pandas_metadata = schema.pandas_metadata
# normalize types
fields = dict([(field.name, field.type) for field in schema])
for cmd in pandas_metadata["columns"]:
name = cmd.get("name")
if name is None:
continue
field_name = cmd["field_name"]
field_idx = schema.get_field_index(field_name)
field = schema[field_idx]
(
fields[field_name],
cmd["pandas_type"],
cmd["numpy_type"],
cmd["metadata"],
) = normalize_type(
field.type, cmd["pandas_type"], cmd["numpy_type"], cmd["metadata"]
)
metadata = schema.metadata
metadata[b"pandas"] = _dict_to_binary(pandas_metadata)
schema = pa.schema([pa.field(n, t) for n, t in fields.items()], metadata)
return normalize_column_order(SchemaWrapper(schema, origin), partition_keys)
def normalize_type(t_pa, t_pd, t_np, metadata):
"""
This will normalize types as followed:
- all signed integers (``int8``, ``int16``, ``int32``, ``int64``) will be converted to ``int64``
- all unsigned integers (``uint8``, ``uint16``, ``uint32``, ``uint64``) will be converted to ``uint64``
- all floats (``float32``, ``float64``) will be converted to ``float64``
- all list value types will be normalized (e.g. ``list[int16]`` to ``list[int64]``, ``list[list[uint8]]`` to
``list[list[uint64]]``)
- all dict value types will be normalized (e.g. ``dictionary<values=float32, indices=int16, ordered=0>`` to
``float64``)
Parameters
----------
t_pa: pyarrow.Type
pyarrow type object, e.g. ``pa.list_(pa.int8())``.
t_pd: string
pandas type identifier, e.g. ``"list[int8]"``.
t_np: string
numpy type identifier, e.g. ``"object"``.
metadata: Union[None, Dict[String, Any]]
metadata associated with the type, e.g. information about categorials.
Returns
-------
type_tuple: Tuple[pyarrow.Type, string, string, Union[None, Dict[String, Any]]]
tuple of ``t_pa``, ``t_pd``, ``t_np``, ``metadata`` for normalized type
"""
if pa.types.is_signed_integer(t_pa):
return pa.int64(), "int64", "int64", None
elif pa.types.is_unsigned_integer(t_pa):
return pa.uint64(), "uint64", "uint64", None
elif pa.types.is_floating(t_pa):
return pa.float64(), "float64", "float64", None
elif pa.types.is_list(t_pa):
t_pa2, t_pd2, t_np2, metadata2 = normalize_type(
t_pa.value_type, t_pd[len("list[") : -1], None, None
)
return pa.list_(t_pa2), "list[{}]".format(t_pd2), "object", None
elif pa.types.is_dictionary(t_pa):
# downcast to dictionary content, `t_pd` is useless in that case
return normalize_type(t_pa.value_type, t_np, t_np, None)
else:
return t_pa, t_pd, t_np, metadata
def _get_common_metadata_key(dataset_uuid, table):
return "{}/{}/{}".format(dataset_uuid, table, naming.TABLE_METADATA_FILE)
def read_schema_metadata(
dataset_uuid: str, store: KeyValueStore, table: str
) -> SchemaWrapper:
"""
Read schema and metadata from store.
Parameters
----------
dataset_uuid: str
Unique ID of the dataset in question.
store: obj
Object that implements `.get(key)` to read data.
table: str
Table to read metadata for.
Returns
-------
schema: Schema
Schema information for DataFrame/table.
"""
key = _get_common_metadata_key(dataset_uuid=dataset_uuid, table=table)
return SchemaWrapper(_bytes2schema(store.get(key)), key)
def store_schema_metadata(
schema: SchemaWrapper, dataset_uuid: str, store: KeyValueStore, table: str
) -> str:
"""
Store schema and metadata to store.
Parameters
----------
schema: Schema
Schema information for DataFrame/table.
dataset_uuid: str
Unique ID of the dataset in question.
store: obj
Object that implements `.put(key, data)` to write data.
table: str
Table to write metadata for.
Returns
-------
key: str
Key to which the metadata was written to.
"""
key = _get_common_metadata_key(dataset_uuid=dataset_uuid, table=table)
return store.put(key, _schema2bytes(schema.internal()))
def _schema2bytes(schema: SchemaWrapper) -> bytes:
buf = pa.BufferOutputStream()
pq.write_metadata(schema, buf, version="2.0", coerce_timestamps="us")
return buf.getvalue().to_pybytes()
def _bytes2schema(data: bytes) -> SchemaWrapper:
reader = pa.BufferReader(data)
schema = pq.read_schema(reader)
fields = []
for idx in range(len(schema)):
f = schema[idx]
# schema data recovered from parquet always contains timestamp data in us-granularity, but pandas will use
# ns-granularity, so we re-align the two different worlds here
if f.type == pa.timestamp("us"):
f = pa.field(f.name, pa.timestamp("ns"))
fields.append(f)
return pa.schema(fields, schema.metadata)
def _pandas_in_schemas(schemas):
"""
Check if any schema contains pandas metadata
"""
has_pandas = False
for schema in schemas:
if schema.metadata and b"pandas" in schema.metadata:
has_pandas = True
return has_pandas
def _determine_schemas_to_compare(schemas, ignore_pandas):
"""
Iterate over a list of `pyarrow.Schema` objects and prepares them for comparison by picking a reference
and determining all null columns.
.. note::
If pandas metadata exists, the version stored in the metadata is overwritten with the currently
installed version since we expect to stay backwards compatible
Returns
-------
reference: Schema
A reference schema which is picked from the input list. The reference schema is guaranteed
to be a schema having the least number of null columns of all input columns. The set of null
columns is guaranteed to be a true subset of all null columns of all input schemas. If no such
schema can be found, an Exception is raised
list_of_schemas: List[Tuple[Schema, List]]
A list holding pairs of (Schema, null_columns) where the null_columns are all columns which are null and
must be removed before comparing the schemas
"""
has_pandas = _pandas_in_schemas(schemas) and not ignore_pandas
schemas_to_evaluate = []
reference = None
null_cols_in_reference = set()
for schema in schemas:
if not isinstance(schema, SchemaWrapper):
schema = SchemaWrapper(schema, "__unknown__")
if has_pandas:
metadata = schema.metadata
if metadata is None or b"pandas" not in metadata:
raise ValueError(
"Pandas and non-Pandas schemas are not comparable. "
"Use ignore_pandas=True if you only want to compare "
"on Arrow level."
)
pandas_metadata = load_json(metadata[b"pandas"].decode("utf8"))
# we don't care about the pandas version, since we assume it's safe
# to read datasets that were written by older or newer versions.
pandas_metadata["pandas_version"] = "{}".format(pd.__version__)
metadata_clean = deepcopy(metadata)
metadata_clean[b"pandas"] = _dict_to_binary(pandas_metadata)
current = SchemaWrapper(pa.schema(schema, metadata_clean), schema.origin)
else:
current = schema
# If a field is null we cannot compare it and must therefore reject it
null_columns = {field.name for field in current if field.type == pa.null()}
# Determine a valid reference schema. A valid reference schema is considered to be the schema
# of all input schemas with the least empty columns.
# The reference schema ought to be a schema whose empty columns are a true subset for all sets
# of empty columns. This ensures that the actual reference schema is the schema with the most
# information possible. A schema which doesn't fulfil this requirement would weaken the
# comparison and would allow for false positives
# Trivial case
if reference is None:
reference = current
null_cols_in_reference = null_columns
# The reference has enough information to validate against current schema.
# Append it to the list of schemas to be verified
elif null_cols_in_reference.issubset(null_columns):
schemas_to_evaluate.append((current, null_columns))
# current schema includes all information of reference and more.
# Add reference to schemas_to_evaluate and update reference
elif null_columns.issubset(null_cols_in_reference):
schemas_to_evaluate.append((reference, null_cols_in_reference))
reference = current
null_cols_in_reference = null_columns
# If there is no clear subset available elect the schema with the least null columns as `reference`.
# Iterate over the null columns of `reference` and replace it with a non-null field of the `current`
# schema which recovers the loop invariant (null columns of `reference` is subset of `current`)
else:
if len(null_columns) < len(null_cols_in_reference):
reference, current = current, reference
null_cols_in_reference, null_columns = (
null_columns,
null_cols_in_reference,
)
for col in null_cols_in_reference - null_columns:
# Enrich the information in the reference by grabbing the missing fields
# from the current iteration. This assumes that we only check for global validity and
# isn't relevant where the reference comes from.
reference = _swap_fields_by_name(reference, current, col)
null_cols_in_reference.remove(col)
schemas_to_evaluate.append((current, null_columns))
assert (reference is not None) or (not schemas_to_evaluate)
return reference, schemas_to_evaluate
def _swap_fields_by_name(reference, current, field_name):
current_field = current.field(field_name)
reference_index = reference.get_field_index(field_name)
return reference.set(reference_index, current_field)
def _strip_columns_from_schema(schema, field_names):
stripped_schema = schema
for name in field_names:
ix = stripped_schema.get_field_index(name)
if ix >= 0:
stripped_schema = stripped_schema.remove(ix)
else:
# If the returned index is negative, the field doesn't exist in the schema.
# This is most likely an indicator for incompatible schemas and we refuse to strip the schema
# to not obfurscate the validation result
_logger.warning(
"Unexpected field `%s` encountered while trying to strip `null` columns.\n"
"Schema was:\n\n`%s`" % (name, schema)
)
return schema
return stripped_schema
def _remove_diff_header(diff):
diff = list(diff)
for ix, el in enumerate(diff):
# This marks the first actual entry of the diff
# e.g. @@ -1,5 + 2,5 @@
if el.startswith("@"):
return diff[ix:]
return diff
def _diff_schemas(first, second):
# see https://issues.apache.org/jira/browse/ARROW-4176
first_pyarrow_info = str(first.remove_metadata())
second_pyarrow_info = str(second.remove_metadata())
pyarrow_diff = _remove_diff_header(
difflib.unified_diff(
str(first_pyarrow_info).splitlines(), str(second_pyarrow_info).splitlines()
)
)
first_pandas_info = first.pandas_metadata
second_pandas_info = second.pandas_metadata
pandas_meta_diff = _remove_diff_header(
difflib.unified_diff(
pprint.pformat(first_pandas_info).splitlines(),
pprint.pformat(second_pandas_info).splitlines(),
)
)
diff_string = (
"Arrow schema:\n"
+ "\n".join(pyarrow_diff)
+ "\n\nPandas_metadata:\n"
+ "\n".join(pandas_meta_diff)
)
return diff_string
def validate_compatible(schemas, ignore_pandas=False):
"""
Validate that all schemas in a given list are compatible.
Apart from the pandas version preserved in the schema metadata, schemas must be completely identical. That includes
a perfect match of the whole metadata (except the pandas version) and pyarrow types.
Use :meth:`make_meta` and :meth:`normalize_column_order` for type and column order normalization.
In the case that all schemas don't contain any pandas metadata, we will check the Arrow
schemas directly for compatibility.
Parameters
----------
schemas: List[Schema]
Schema information from multiple sources, e.g. multiple partitions. List may be empty.
ignore_pandas: bool
Ignore the schema information given by Pandas an always use the Arrow schema.
Returns
-------
schema: SchemaWrapper
The reference schema which was tested against
Raises
------
ValueError
At least two schemas are incompatible.
"""
reference, schemas_to_evaluate = _determine_schemas_to_compare(
schemas, ignore_pandas
)
for current, null_columns in schemas_to_evaluate:
# Compare each schema to the reference but ignore the null_cols and the Pandas schema information.
reference_to_compare = _strip_columns_from_schema(
reference, null_columns
).remove_metadata()
current_to_compare = _strip_columns_from_schema(
current, null_columns
).remove_metadata()
def _fmt_origin(origin):
origin = sorted(origin)
# dask cuts of exception messages at 1k chars:
# https://github.com/dask/distributed/blob/6e0c0a6b90b1d3c/distributed/core.py#L964
# therefore, we cut the the maximum length
max_len = 200
inner_msg = ", ".join(origin)
ellipsis = "..."
if len(inner_msg) > max_len + len(ellipsis):
inner_msg = inner_msg[:max_len] + ellipsis
return "{{{}}}".format(inner_msg)
if reference_to_compare != current_to_compare:
schema_diff = _diff_schemas(reference, current)
exception_message = """Schema violation
Origin schema: {origin_schema}
Origin reference: {origin_reference}
Diff:
{schema_diff}
Reference schema:
{reference}""".format(
schema_diff=schema_diff,
reference=str(reference),
origin_schema=_fmt_origin(current.origin),
origin_reference=_fmt_origin(reference.origin),
)
raise ValueError(exception_message)
# add all origins to result AFTER error checking, otherwise the error message would be pretty misleading due to the
# reference containing all origins.
if reference is None:
return None
else:
return reference.with_origin(
reduce(
set.union,
(schema.origin for schema, _null_columns in schemas_to_evaluate),
reference.origin,
)
)
def validate_shared_columns(schemas, ignore_pandas=False):
"""
Validate that columns that are shared amongst schemas are compatible.
Only DataFrame columns are taken into account, other fields (like index data) are ignored. The following data must
be an exact match:
- metadata (as stored in the ``"columns"`` list of the ``b'pandas'`` schema metadata)
- pyarrow type (that means that e.g. ``int8`` and ``int64`` are NOT compatible)
Columns that are only present in a subset of the provided schemas must only be compatible for that subset, i.e.
non-existing columns are ignored. The order of the columns in the provided schemas is irrelevant.
Type normalization should be handled by :meth:`make_meta`.
In the case that all schemas don't contain any pandas metadata, we will check the Arrow
schemas directly for compatibility. Then the metadata information will not be checked
(as it is non-existent).
Parameters
----------
schemas: List[Schema]
Schema information from multiple sources, e.g. multiple tables. List may be empty.
ignore_pandas: bool
Ignore the schema information given by Pandas an always use the Arrow schema.
Raises
------
ValueError
Incompatible columns were found.
"""
seen = {}
has_pandas = _pandas_in_schemas(schemas) and not ignore_pandas
for schema in schemas:
if has_pandas:
metadata = schema.metadata
if metadata is None or b"pandas" not in metadata:
raise ValueError(
"Pandas and non-Pandas schemas are not comparable. "
"Use ignore_pandas=True if you only want to compare "
"on Arrow level."
)
pandas_metadata = load_json(metadata[b"pandas"].decode("utf8"))
columns = []
for cmd in pandas_metadata["columns"]:
name = cmd.get("name")
if name is None:
continue
columns.append(cmd["field_name"])
else:
columns = schema.names
for col in columns:
field_idx = schema.get_field_index(col)
field = schema[field_idx]
obj = (field, col)
if col in seen:
ref = seen[col]
if pa.types.is_null(ref[0].type) or pa.types.is_null(field.type):
continue
if ref != obj:
raise ValueError(
'Found incompatible entries for column "{}"\n{}\n{}'.format(
col, ref, obj
)
)
else:
seen[col] = obj
def _dict_to_binary(dct):
return simplejson.dumps(dct, sort_keys=True).encode("utf8")
def empty_dataframe_from_schema(schema, columns=None, date_as_object=False):
"""
Create an empty DataFrame from provided schema.
Parameters
----------
schema: Schema
Schema information of the new empty DataFrame.
columns: Union[None, List[str]]
Optional list of columns that should be part of the resulting DataFrame. All columns in that list must also be
part of the provided schema.
Returns
-------
DataFrame
Empty DataFrame with requested columns and types.
"""
df = schema.internal().empty_table().to_pandas(date_as_object=date_as_object)
df.columns = df.columns.map(ensure_string_type)
if columns is not None:
df = df[columns]
return df
| 34.660131 | 119 | 0.640053 |
import difflib
import logging
import pprint
from copy import copy, deepcopy
from functools import reduce
from typing import Set, Union
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import simplejson
from simplekv import KeyValueStore
from kartothek.core import naming
from kartothek.core._compat import load_json
from kartothek.core.utils import ensure_string_type
_logger = logging.getLogger()
class SchemaWrapper:
def __init__(self, schema, origin: Union[str, Set[str]]):
if isinstance(origin, str):
origin = {origin}
elif isinstance(origin, set):
origin = copy(origin)
if not all(isinstance(s, str) for s in origin):
raise TypeError("Schema origin elements must be strings.")
self.__schema = schema
self.__origin = origin
self._schema_compat()
def with_origin(self, origin: Union[str, Set[str]]) -> "SchemaWrapper":
return SchemaWrapper(self.__schema, origin)
def _schema_compat(self):
schema = self.__schema
if self.__schema is not None and self.__schema.pandas_metadata is not None:
pandas_metadata = schema.pandas_metadata
index_cols = pandas_metadata["index_columns"]
if len(index_cols) > 1:
raise NotImplementedError("Treatement of MultiIndex not implemented.")
for ix, col in enumerate(index_cols):
if isinstance(col, dict):
pass
else:
index_level_ix = schema.get_field_index(col)
if index_level_ix >= 0:
schema = schema.remove(index_level_ix)
schema = schema.remove_metadata()
md = {b"pandas": _dict_to_binary(pandas_metadata)}
schema = schema.with_metadata(md)
self.__schema = schema
def internal(self):
return self.__schema
@property
def origin(self) -> Set[str]:
return copy(self.__origin)
def __repr__(self):
return self.__schema.__repr__()
def __eq__(self, other):
return self.equals(other)
def __ne__(self, other):
return not self.equals(other)
def __getstate__(self):
return (_schema2bytes(self.__schema), self.__origin)
def __setstate__(self, state):
self.__schema = _bytes2schema(state[0])
self.__origin = state[1]
def __getattr__(self, attr):
return getattr(self.__schema, attr)
def __hash__(self):
return hash(_schema2bytes(self.__schema))
def __getitem__(self, i):
return self.__schema[i]
def __len__(self):
return len(self.__schema)
def equals(self, other, check_metadata=False):
if isinstance(other, SchemaWrapper):
return self.__schema.equals(other.__schema, check_metadata)
else:
return self.__schema.equals(other, check_metadata)
equals.__doc__ = pa.Schema.equals.__doc__
def remove(self, i):
return SchemaWrapper(self.__schema.remove(i), self.__origin)
remove.__doc__ = pa.Schema.set.__doc__
def remove_metadata(self):
return SchemaWrapper(
self.__schema.remove_metadata(),
{s + "__no_metadata" for s in self.__origin},
)
remove_metadata.__doc__ = pa.Schema.remove_metadata.__doc__
def set(self, i, field):
return SchemaWrapper(self.__schema.set(i, field), self.__origin)
set.__doc__ = pa.Schema.set.__doc__
def normalize_column_order(schema, partition_keys=None):
if not isinstance(schema, SchemaWrapper):
schema = SchemaWrapper(schema, "__unknown__")
if partition_keys is None:
partition_keys = []
else:
partition_keys = list(partition_keys)
pandas_metadata = schema.pandas_metadata
origin = schema.origin
cols_partition = {}
cols_payload = []
cols_misc = []
for cmd in pandas_metadata["columns"]:
name = cmd.get("name")
field_name = cmd["field_name"]
field_idx = schema.get_field_index(field_name)
if field_idx >= 0:
field = schema[field_idx]
else:
field = None
if name is None:
cols_misc.append((cmd, field))
elif name in partition_keys:
cols_partition[name] = (cmd, field)
else:
cols_payload.append((name, cmd, field))
ordered = []
for k in partition_keys:
if k in cols_partition:
ordered.append(cols_partition[k])
ordered += [(cmd, f) for _name, cmd, f in sorted(cols_payload, key=lambda x: x[0])]
ordered += cols_misc
pandas_metadata["columns"] = [cmd for cmd, _ in ordered]
fields = [f for _, f in ordered if f is not None]
metadata = schema.metadata
metadata[b"pandas"] = _dict_to_binary(pandas_metadata)
schema = pa.schema(fields, metadata)
return SchemaWrapper(schema, origin)
def make_meta(obj, origin, partition_keys=None):
if isinstance(obj, SchemaWrapper):
return obj
if isinstance(obj, pa.Schema):
return normalize_column_order(
SchemaWrapper(obj, origin), partition_keys=partition_keys
)
if not isinstance(obj, pd.DataFrame):
raise ValueError("Input must be a pyarrow schema, or a pandas dataframe")
schema = pa.Schema.from_pandas(obj)
pandas_metadata = schema.pandas_metadata
fields = dict([(field.name, field.type) for field in schema])
for cmd in pandas_metadata["columns"]:
name = cmd.get("name")
if name is None:
continue
field_name = cmd["field_name"]
field_idx = schema.get_field_index(field_name)
field = schema[field_idx]
(
fields[field_name],
cmd["pandas_type"],
cmd["numpy_type"],
cmd["metadata"],
) = normalize_type(
field.type, cmd["pandas_type"], cmd["numpy_type"], cmd["metadata"]
)
metadata = schema.metadata
metadata[b"pandas"] = _dict_to_binary(pandas_metadata)
schema = pa.schema([pa.field(n, t) for n, t in fields.items()], metadata)
return normalize_column_order(SchemaWrapper(schema, origin), partition_keys)
def normalize_type(t_pa, t_pd, t_np, metadata):
if pa.types.is_signed_integer(t_pa):
return pa.int64(), "int64", "int64", None
elif pa.types.is_unsigned_integer(t_pa):
return pa.uint64(), "uint64", "uint64", None
elif pa.types.is_floating(t_pa):
return pa.float64(), "float64", "float64", None
elif pa.types.is_list(t_pa):
t_pa2, t_pd2, t_np2, metadata2 = normalize_type(
t_pa.value_type, t_pd[len("list[") : -1], None, None
)
return pa.list_(t_pa2), "list[{}]".format(t_pd2), "object", None
elif pa.types.is_dictionary(t_pa):
return normalize_type(t_pa.value_type, t_np, t_np, None)
else:
return t_pa, t_pd, t_np, metadata
def _get_common_metadata_key(dataset_uuid, table):
return "{}/{}/{}".format(dataset_uuid, table, naming.TABLE_METADATA_FILE)
def read_schema_metadata(
dataset_uuid: str, store: KeyValueStore, table: str
) -> SchemaWrapper:
key = _get_common_metadata_key(dataset_uuid=dataset_uuid, table=table)
return SchemaWrapper(_bytes2schema(store.get(key)), key)
def store_schema_metadata(
schema: SchemaWrapper, dataset_uuid: str, store: KeyValueStore, table: str
) -> str:
key = _get_common_metadata_key(dataset_uuid=dataset_uuid, table=table)
return store.put(key, _schema2bytes(schema.internal()))
def _schema2bytes(schema: SchemaWrapper) -> bytes:
buf = pa.BufferOutputStream()
pq.write_metadata(schema, buf, version="2.0", coerce_timestamps="us")
return buf.getvalue().to_pybytes()
def _bytes2schema(data: bytes) -> SchemaWrapper:
reader = pa.BufferReader(data)
schema = pq.read_schema(reader)
fields = []
for idx in range(len(schema)):
f = schema[idx]
if f.type == pa.timestamp("us"):
f = pa.field(f.name, pa.timestamp("ns"))
fields.append(f)
return pa.schema(fields, schema.metadata)
def _pandas_in_schemas(schemas):
has_pandas = False
for schema in schemas:
if schema.metadata and b"pandas" in schema.metadata:
has_pandas = True
return has_pandas
def _determine_schemas_to_compare(schemas, ignore_pandas):
has_pandas = _pandas_in_schemas(schemas) and not ignore_pandas
schemas_to_evaluate = []
reference = None
null_cols_in_reference = set()
for schema in schemas:
if not isinstance(schema, SchemaWrapper):
schema = SchemaWrapper(schema, "__unknown__")
if has_pandas:
metadata = schema.metadata
if metadata is None or b"pandas" not in metadata:
raise ValueError(
"Pandas and non-Pandas schemas are not comparable. "
"Use ignore_pandas=True if you only want to compare "
"on Arrow level."
)
pandas_metadata = load_json(metadata[b"pandas"].decode("utf8"))
pandas_metadata["pandas_version"] = "{}".format(pd.__version__)
metadata_clean = deepcopy(metadata)
metadata_clean[b"pandas"] = _dict_to_binary(pandas_metadata)
current = SchemaWrapper(pa.schema(schema, metadata_clean), schema.origin)
else:
current = schema
null_columns = {field.name for field in current if field.type == pa.null()}
# comparison and would allow for false positives
# Trivial case
if reference is None:
reference = current
null_cols_in_reference = null_columns
# The reference has enough information to validate against current schema.
# Append it to the list of schemas to be verified
elif null_cols_in_reference.issubset(null_columns):
schemas_to_evaluate.append((current, null_columns))
# current schema includes all information of reference and more.
# Add reference to schemas_to_evaluate and update reference
elif null_columns.issubset(null_cols_in_reference):
schemas_to_evaluate.append((reference, null_cols_in_reference))
reference = current
null_cols_in_reference = null_columns
# If there is no clear subset available elect the schema with the least null columns as `reference`.
# Iterate over the null columns of `reference` and replace it with a non-null field of the `current`
# schema which recovers the loop invariant (null columns of `reference` is subset of `current`)
else:
if len(null_columns) < len(null_cols_in_reference):
reference, current = current, reference
null_cols_in_reference, null_columns = (
null_columns,
null_cols_in_reference,
)
for col in null_cols_in_reference - null_columns:
# Enrich the information in the reference by grabbing the missing fields
# from the current iteration. This assumes that we only check for global validity and
# isn't relevant where the reference comes from.
reference = _swap_fields_by_name(reference, current, col)
null_cols_in_reference.remove(col)
schemas_to_evaluate.append((current, null_columns))
assert (reference is not None) or (not schemas_to_evaluate)
return reference, schemas_to_evaluate
def _swap_fields_by_name(reference, current, field_name):
current_field = current.field(field_name)
reference_index = reference.get_field_index(field_name)
return reference.set(reference_index, current_field)
def _strip_columns_from_schema(schema, field_names):
stripped_schema = schema
for name in field_names:
ix = stripped_schema.get_field_index(name)
if ix >= 0:
stripped_schema = stripped_schema.remove(ix)
else:
# This is most likely an indicator for incompatible schemas and we refuse to strip the schema
# to not obfurscate the validation result
_logger.warning(
"Unexpected field `%s` encountered while trying to strip `null` columns.\n"
"Schema was:\n\n`%s`" % (name, schema)
)
return schema
return stripped_schema
def _remove_diff_header(diff):
diff = list(diff)
for ix, el in enumerate(diff):
# This marks the first actual entry of the diff
# e.g. @@ -1,5 + 2,5 @@
if el.startswith("@"):
return diff[ix:]
return diff
def _diff_schemas(first, second):
# see https://issues.apache.org/jira/browse/ARROW-4176
first_pyarrow_info = str(first.remove_metadata())
second_pyarrow_info = str(second.remove_metadata())
pyarrow_diff = _remove_diff_header(
difflib.unified_diff(
str(first_pyarrow_info).splitlines(), str(second_pyarrow_info).splitlines()
)
)
first_pandas_info = first.pandas_metadata
second_pandas_info = second.pandas_metadata
pandas_meta_diff = _remove_diff_header(
difflib.unified_diff(
pprint.pformat(first_pandas_info).splitlines(),
pprint.pformat(second_pandas_info).splitlines(),
)
)
diff_string = (
"Arrow schema:\n"
+ "\n".join(pyarrow_diff)
+ "\n\nPandas_metadata:\n"
+ "\n".join(pandas_meta_diff)
)
return diff_string
def validate_compatible(schemas, ignore_pandas=False):
reference, schemas_to_evaluate = _determine_schemas_to_compare(
schemas, ignore_pandas
)
for current, null_columns in schemas_to_evaluate:
# Compare each schema to the reference but ignore the null_cols and the Pandas schema information.
reference_to_compare = _strip_columns_from_schema(
reference, null_columns
).remove_metadata()
current_to_compare = _strip_columns_from_schema(
current, null_columns
).remove_metadata()
def _fmt_origin(origin):
origin = sorted(origin)
# dask cuts of exception messages at 1k chars:
# https://github.com/dask/distributed/blob/6e0c0a6b90b1d3c/distributed/core.py#L964
# therefore, we cut the the maximum length
max_len = 200
inner_msg = ", ".join(origin)
ellipsis = "..."
if len(inner_msg) > max_len + len(ellipsis):
inner_msg = inner_msg[:max_len] + ellipsis
return "{{{}}}".format(inner_msg)
if reference_to_compare != current_to_compare:
schema_diff = _diff_schemas(reference, current)
exception_message = """Schema violation
Origin schema: {origin_schema}
Origin reference: {origin_reference}
Diff:
{schema_diff}
Reference schema:
{reference}""".format(
schema_diff=schema_diff,
reference=str(reference),
origin_schema=_fmt_origin(current.origin),
origin_reference=_fmt_origin(reference.origin),
)
raise ValueError(exception_message)
# add all origins to result AFTER error checking, otherwise the error message would be pretty misleading due to the
# reference containing all origins.
if reference is None:
return None
else:
return reference.with_origin(
reduce(
set.union,
(schema.origin for schema, _null_columns in schemas_to_evaluate),
reference.origin,
)
)
def validate_shared_columns(schemas, ignore_pandas=False):
seen = {}
has_pandas = _pandas_in_schemas(schemas) and not ignore_pandas
for schema in schemas:
if has_pandas:
metadata = schema.metadata
if metadata is None or b"pandas" not in metadata:
raise ValueError(
"Pandas and non-Pandas schemas are not comparable. "
"Use ignore_pandas=True if you only want to compare "
"on Arrow level."
)
pandas_metadata = load_json(metadata[b"pandas"].decode("utf8"))
columns = []
for cmd in pandas_metadata["columns"]:
name = cmd.get("name")
if name is None:
continue
columns.append(cmd["field_name"])
else:
columns = schema.names
for col in columns:
field_idx = schema.get_field_index(col)
field = schema[field_idx]
obj = (field, col)
if col in seen:
ref = seen[col]
if pa.types.is_null(ref[0].type) or pa.types.is_null(field.type):
continue
if ref != obj:
raise ValueError(
'Found incompatible entries for column "{}"\n{}\n{}'.format(
col, ref, obj
)
)
else:
seen[col] = obj
def _dict_to_binary(dct):
return simplejson.dumps(dct, sort_keys=True).encode("utf8")
def empty_dataframe_from_schema(schema, columns=None, date_as_object=False):
df = schema.internal().empty_table().to_pandas(date_as_object=date_as_object)
df.columns = df.columns.map(ensure_string_type)
if columns is not None:
df = df[columns]
return df
| true | true |
f7f7aa16dacecd157f0228746943b4649aac647b | 578 | py | Python | graphgallery/nn/layers/pytorch/conv/__init__.py | dongzizhu/GraphGallery | c65eab42daeb52de5019609fe7b368e30863b4ae | [
"MIT"
] | 1 | 2020-07-29T08:00:32.000Z | 2020-07-29T08:00:32.000Z | graphgallery/nn/layers/pytorch/conv/__init__.py | dongzizhu/GraphGallery | c65eab42daeb52de5019609fe7b368e30863b4ae | [
"MIT"
] | null | null | null | graphgallery/nn/layers/pytorch/conv/__init__.py | dongzizhu/GraphGallery | c65eab42daeb52de5019609fe7b368e30863b4ae | [
"MIT"
] | null | null | null | from .gcn import GCNConv
from .gat import GATConv, SparseGATConv
from .sgc import SGConv
from .trainable_sgc import TrainableSGConv
from .median import MedianConv, TrimmedConv
from .dagnn import DAGNNConv
from .tagcn import TAGConv
from .appnp import APPNProp, PPNProp
from .graphsage import SAGEAggregator
from .ssgc import SSGConv
from .agnn import AGNNConv
from .sat import EigenConv, SpectralEigenConv, GraphEigenConv
from .agnn import AGNNConv
from .robustgcn import GaussionConvD, GaussionConvF
from .chebynet import ChebConv
from .gwnn import WaveletConv
| 34 | 62 | 0.813149 | from .gcn import GCNConv
from .gat import GATConv, SparseGATConv
from .sgc import SGConv
from .trainable_sgc import TrainableSGConv
from .median import MedianConv, TrimmedConv
from .dagnn import DAGNNConv
from .tagcn import TAGConv
from .appnp import APPNProp, PPNProp
from .graphsage import SAGEAggregator
from .ssgc import SSGConv
from .agnn import AGNNConv
from .sat import EigenConv, SpectralEigenConv, GraphEigenConv
from .agnn import AGNNConv
from .robustgcn import GaussionConvD, GaussionConvF
from .chebynet import ChebConv
from .gwnn import WaveletConv
| true | true |
f7f7aad936027802f617d577d9b3de2bc65ea57d | 2,434 | py | Python | benchmark/net/facenet_predictor.py | kknet/imalookalike | fe5ec3cb786b0e13e5260705f1c4251bfa028a04 | [
"Apache-2.0"
] | null | null | null | benchmark/net/facenet_predictor.py | kknet/imalookalike | fe5ec3cb786b0e13e5260705f1c4251bfa028a04 | [
"Apache-2.0"
] | 6 | 2020-01-28T22:43:09.000Z | 2022-02-10T00:15:35.000Z | benchmark/net/facenet_predictor.py | slawiko/imalookalike | ca0917bbc24b74d560ebe5b83dd882a6683abddf | [
"Apache-2.0"
] | 1 | 2019-10-29T02:38:49.000Z | 2019-10-29T02:38:49.000Z | import tensorflow as tf
import benchmark.net.facenet.facenet as facenet
from benchmark.net.facenet import detect_face
import cv2
import numpy as np
# some constants kept as default from facenet
minsize = 20
threshold = [0.6, 0.7, 0.7]
factor = 0.709
margin = 44
input_image_size = 160
sess = None
images_placeholder = None
embeddings = None
phase_train_placeholder = None
embedding_size = None
pnet = None
rnet = None
onet = None
def init_model(model_path):
global sess
global images_placeholder
global embeddings
global phase_train_placeholder
global embedding_size
global pnet
global rnet
global onet
facenet.load_model(model_path)
sess = tf.Session()
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
embedding_size = embeddings.get_shape()[1]
pnet, rnet, onet = detect_face.create_mtcnn(sess, 'facenet\\align')
def get_cropped_image(img):
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, points = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
if not len(bounding_boxes) == 0:
for face in bounding_boxes:
if face[4] > 0.50:
det = np.squeeze(face[0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
return cropped
def predict_embeddings(imgs):
prewhiteneds = []
for img in imgs:
cropped = get_cropped_image(img)
if cropped is None:
print("not detected")
cropped = img
resized = cv2.resize(cropped, (input_image_size, input_image_size), interpolation=cv2.INTER_CUBIC)
prewhitened = facenet.prewhiten(resized)
prewhiteneds.append(prewhitened)
reshaped = np.array(prewhiteneds).reshape((-1, input_image_size, input_image_size, 3))
feed_dict = {images_placeholder: reshaped, phase_train_placeholder: False}
embedding = sess.run(embeddings, feed_dict=feed_dict)
return embedding
| 31.205128 | 106 | 0.668858 | import tensorflow as tf
import benchmark.net.facenet.facenet as facenet
from benchmark.net.facenet import detect_face
import cv2
import numpy as np
minsize = 20
threshold = [0.6, 0.7, 0.7]
factor = 0.709
margin = 44
input_image_size = 160
sess = None
images_placeholder = None
embeddings = None
phase_train_placeholder = None
embedding_size = None
pnet = None
rnet = None
onet = None
def init_model(model_path):
global sess
global images_placeholder
global embeddings
global phase_train_placeholder
global embedding_size
global pnet
global rnet
global onet
facenet.load_model(model_path)
sess = tf.Session()
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
embedding_size = embeddings.get_shape()[1]
pnet, rnet, onet = detect_face.create_mtcnn(sess, 'facenet\\align')
def get_cropped_image(img):
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, points = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
if not len(bounding_boxes) == 0:
for face in bounding_boxes:
if face[4] > 0.50:
det = np.squeeze(face[0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
return cropped
def predict_embeddings(imgs):
prewhiteneds = []
for img in imgs:
cropped = get_cropped_image(img)
if cropped is None:
print("not detected")
cropped = img
resized = cv2.resize(cropped, (input_image_size, input_image_size), interpolation=cv2.INTER_CUBIC)
prewhitened = facenet.prewhiten(resized)
prewhiteneds.append(prewhitened)
reshaped = np.array(prewhiteneds).reshape((-1, input_image_size, input_image_size, 3))
feed_dict = {images_placeholder: reshaped, phase_train_placeholder: False}
embedding = sess.run(embeddings, feed_dict=feed_dict)
return embedding
| true | true |
f7f7acc9c0d6d80919d440cfbd03025d7bc7fb0d | 259 | py | Python | fython/test/importpec_py/py_star_import_test.py | nicolasessisbreton/fython | 988f5a94cee8b16b0000501a22239195c73424a1 | [
"Apache-2.0"
] | 41 | 2016-01-21T05:14:45.000Z | 2021-11-24T20:37:21.000Z | fython/test/importpec_py/py_star_import_test.py | nicolasessisbreton/fython | 988f5a94cee8b16b0000501a22239195c73424a1 | [
"Apache-2.0"
] | 5 | 2016-01-21T05:36:37.000Z | 2016-08-22T19:26:51.000Z | fython/test/importpec_py/py_star_import_test.py | nicolasessisbreton/fython | 988f5a94cee8b16b0000501a22239195c73424a1 | [
"Apache-2.0"
] | 3 | 2016-01-23T04:03:44.000Z | 2016-08-21T15:58:38.000Z | s="""
.a.py
x = 10
.b.fy
import .a(*)
int x = |x|
print 'in b {:x}'
"""
from fython.test import *
shell('rm -rf a/ a.* b.*')
writer(s)
w = load('.b', force=1, release=1, verbose=0, run_main=0)
# print(open(w.module.url.fortran_path, 'r').read())
| 11.26087 | 57 | 0.552124 | s="""
.a.py
x = 10
.b.fy
import .a(*)
int x = |x|
print 'in b {:x}'
"""
from fython.test import *
shell('rm -rf a/ a.* b.*')
writer(s)
w = load('.b', force=1, release=1, verbose=0, run_main=0)
| true | true |
f7f7ad241e054510143a9aaabf60dcfad76851d3 | 2,052 | py | Python | restler/engine/core/fuzzer.py | Ayudjj/mvp | a0ba706a2156e31cf6053b639b57aa1b9acad442 | [
"MIT"
] | 1 | 2021-01-21T07:36:34.000Z | 2021-01-21T07:36:34.000Z | restler/engine/core/fuzzer.py | Ayudjj/mvp | a0ba706a2156e31cf6053b639b57aa1b9acad442 | [
"MIT"
] | 1 | 2021-03-30T02:53:04.000Z | 2021-03-30T22:39:03.000Z | restler/engine/core/fuzzer.py | Ayudjj/mvp | a0ba706a2156e31cf6053b639b57aa1b9acad442 | [
"MIT"
] | 1 | 2021-01-21T07:36:37.000Z | 2021-01-21T07:36:37.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import threading
import engine.core.driver as driver
import utils.logger as logger
from engine.core.fuzzing_monitor import Monitor
from engine.core.requests import GrammarRequestCollection
from engine.errors import InvalidDictionaryException
class FuzzingThread(threading.Thread):
""" Fuzzer thread class
"""
def __init__(self, fuzzing_requests, checkers, fuzzing_jobs=1):
""" Constructor for the Fuzzer thread class
@param fuzzing_requests: The collection of requests to fuzz
@type fuzzing_requests: FuzzingRequestCollection
@param checkers: List of checker objects
@type checkers: List[Checker]
"""
threading.Thread.__init__(self)
self._fuzzing_requests = fuzzing_requests
self._checkers = checkers
self._fuzzing_jobs = fuzzing_jobs
self._num_total_sequences = 0
self._exception = None
@property
def exception(self):
return self._exception
def run(self):
""" Thread entrance - performs fuzzing
"""
try:
self._num_total_sequences = driver.generate_sequences(
self._fuzzing_requests, self._checkers, self._fuzzing_jobs
)
# At the end of everything print out any request that were never
# rendered (because they never had valid constraints).
logger.print_request_rendering_stats_never_rendered_requests(
self._fuzzing_requests,
GrammarRequestCollection().candidate_values_pool,
Monitor()
)
except InvalidDictionaryException:
pass
except Exception as err:
self._exception = str(err)
def join(self, *args):
""" Overrides thread join function
@return: The total number of sequences from the fuzzing run
@rtype : Int
"""
threading.Thread.join(self, *args)
return self._num_total_sequences
| 31.090909 | 76 | 0.659357 |
import threading
import engine.core.driver as driver
import utils.logger as logger
from engine.core.fuzzing_monitor import Monitor
from engine.core.requests import GrammarRequestCollection
from engine.errors import InvalidDictionaryException
class FuzzingThread(threading.Thread):
def __init__(self, fuzzing_requests, checkers, fuzzing_jobs=1):
threading.Thread.__init__(self)
self._fuzzing_requests = fuzzing_requests
self._checkers = checkers
self._fuzzing_jobs = fuzzing_jobs
self._num_total_sequences = 0
self._exception = None
@property
def exception(self):
return self._exception
def run(self):
try:
self._num_total_sequences = driver.generate_sequences(
self._fuzzing_requests, self._checkers, self._fuzzing_jobs
)
logger.print_request_rendering_stats_never_rendered_requests(
self._fuzzing_requests,
GrammarRequestCollection().candidate_values_pool,
Monitor()
)
except InvalidDictionaryException:
pass
except Exception as err:
self._exception = str(err)
def join(self, *args):
threading.Thread.join(self, *args)
return self._num_total_sequences
| true | true |
f7f7ad72a31fa302e6d2ff757b829493b6194f5e | 3,204 | py | Python | bin/ExtractSimulatedChimericRead4Retrain.py | XiDsLab/scFusion | 0398a12272683b84c712903eb1453ab058e6862f | [
"MIT"
] | 7 | 2021-08-21T05:54:12.000Z | 2022-03-14T07:24:08.000Z | bin/ExtractSimulatedChimericRead4Retrain.py | XiDsLab/scFusion | 0398a12272683b84c712903eb1453ab058e6862f | [
"MIT"
] | 3 | 2021-05-29T11:12:38.000Z | 2021-09-22T06:25:22.000Z | bin/ExtractSimulatedChimericRead4Retrain.py | ZijieJin/scFusion | 13beb881fd5cce096cfbd5ed436c28ccb43a57c9 | [
"MIT"
] | 4 | 2020-12-28T04:22:52.000Z | 2021-05-30T05:17:01.000Z | from __future__ import print_function
from __future__ import division
import sys
import random
import os
import pysam
# ***** readme *****
# This code extracts chimeric read from sam file for training, with pos and direction
# The input is *.sam
def ReverseComplement(str):
return str[::-1].replace('A', 't').replace('T', 'a').replace('G', 'c').replace('C', 'g').upper()
chimericfile = open(sys.argv[1])
mappingpath = sys.argv[2]
linenum = len(chimericfile.readlines())
chimericfile.close()
count = 0
cellindex = []
for dir in os.listdir(mappingpath):
cellindex.append(dir)
while count < linenum:
thisindex = random.sample(cellindex, 1)[0]
try:
found = False
for file in os.listdir(mappingpath + thisindex):
if file.find('Aligned.sortedByCoord.out.bam') > -1:
samfile = pysam.AlignmentFile(mappingpath + thisindex + '/' + file, 'rb')
found = True
break
except:
continue
if not found:
continue
sam = []
for r in samfile:
sam.append(r)
thiscount = 0
while thiscount < len(sam) / 5 and count < linenum:
while True:
a = random.randint(1, len(sam)) - 1
b = random.randint(1, len(sam)) - 1
chr1 = str(sam[a].reference_name)
chr2 = str(sam[b].reference_name)
allread1 = sam[a].seq
allread2 = sam[b].seq
readlength = len(allread1)
if not chr1.startswith('chr'):
chr1 = 'chr' + chr1
if not chr2.startswith('chr'):
chr2 = 'chr' + chr2
if len(sam[a].cigar) > 1 or len(sam[b].cigar) > 1:
continue
if not (chr1 == '*' or chr2 == '*' or chr1 == 'chrM' or chr2 == 'chrM'):
break
read1length = 30
read2length = 60 - read1length
c = random.randint(0, 60 - read1length - 1)
d = random.randint(0, 60 - read2length - 1)
try:
read1 = sam[a].seq[c:c + read1length]
except:
sys.stderr.write(str(sam[a]))
line2 = sam[b]
try:
read2 = sam[b].seq[d:d + read2length]
except:
sys.stderr.write(str(sam[b]))
e = random.randint(0, 1)
f = random.randint(0, 1)
if e == 0:
e = -1
read2 = ReverseComplement(read2)
pos2 = sam[b].pos + d + read2length - 1
else:
pos2 = sam[b].pos + d
if f == 0:
f = -1
read1 = ReverseComplement(read1)
pos1 = sam[a].pos + c
else:
pos1 = sam[a].pos + c + read1length - 1
if f == -1:
direct1 = '+'
else:
direct1 = '-'
if e == 1:
direct2 = '+'
else:
direct2 = '-'
if read1.find('N') == -1 and read2.find('N') == -1 and len(read1 + read2) == 60:
print(read1.upper() + read2.upper() + '\t' + str(read1length) + '\t', end='')
print(chr1 + ':' + str(pos1) + ':' + direct1 + '\t' + chr2 + ':' + str(pos2) + ':' + direct2)
count += 1
thiscount += 1
samfile.close() | 32.04 | 105 | 0.504057 | from __future__ import print_function
from __future__ import division
import sys
import random
import os
import pysam
def ReverseComplement(str):
return str[::-1].replace('A', 't').replace('T', 'a').replace('G', 'c').replace('C', 'g').upper()
chimericfile = open(sys.argv[1])
mappingpath = sys.argv[2]
linenum = len(chimericfile.readlines())
chimericfile.close()
count = 0
cellindex = []
for dir in os.listdir(mappingpath):
cellindex.append(dir)
while count < linenum:
thisindex = random.sample(cellindex, 1)[0]
try:
found = False
for file in os.listdir(mappingpath + thisindex):
if file.find('Aligned.sortedByCoord.out.bam') > -1:
samfile = pysam.AlignmentFile(mappingpath + thisindex + '/' + file, 'rb')
found = True
break
except:
continue
if not found:
continue
sam = []
for r in samfile:
sam.append(r)
thiscount = 0
while thiscount < len(sam) / 5 and count < linenum:
while True:
a = random.randint(1, len(sam)) - 1
b = random.randint(1, len(sam)) - 1
chr1 = str(sam[a].reference_name)
chr2 = str(sam[b].reference_name)
allread1 = sam[a].seq
allread2 = sam[b].seq
readlength = len(allread1)
if not chr1.startswith('chr'):
chr1 = 'chr' + chr1
if not chr2.startswith('chr'):
chr2 = 'chr' + chr2
if len(sam[a].cigar) > 1 or len(sam[b].cigar) > 1:
continue
if not (chr1 == '*' or chr2 == '*' or chr1 == 'chrM' or chr2 == 'chrM'):
break
read1length = 30
read2length = 60 - read1length
c = random.randint(0, 60 - read1length - 1)
d = random.randint(0, 60 - read2length - 1)
try:
read1 = sam[a].seq[c:c + read1length]
except:
sys.stderr.write(str(sam[a]))
line2 = sam[b]
try:
read2 = sam[b].seq[d:d + read2length]
except:
sys.stderr.write(str(sam[b]))
e = random.randint(0, 1)
f = random.randint(0, 1)
if e == 0:
e = -1
read2 = ReverseComplement(read2)
pos2 = sam[b].pos + d + read2length - 1
else:
pos2 = sam[b].pos + d
if f == 0:
f = -1
read1 = ReverseComplement(read1)
pos1 = sam[a].pos + c
else:
pos1 = sam[a].pos + c + read1length - 1
if f == -1:
direct1 = '+'
else:
direct1 = '-'
if e == 1:
direct2 = '+'
else:
direct2 = '-'
if read1.find('N') == -1 and read2.find('N') == -1 and len(read1 + read2) == 60:
print(read1.upper() + read2.upper() + '\t' + str(read1length) + '\t', end='')
print(chr1 + ':' + str(pos1) + ':' + direct1 + '\t' + chr2 + ':' + str(pos2) + ':' + direct2)
count += 1
thiscount += 1
samfile.close() | true | true |
f7f7ade5bf71d9623eeea934d95a5ee6ab2a8ac6 | 71 | py | Python | quick_start/my_text_classifier/predictors/__init__.py | ramild/allennlp-guide | 4cff916e7bc4629184bc70594e213ef56e14ec70 | [
"MIT"
] | 71 | 2020-06-06T03:12:44.000Z | 2022-03-12T20:21:48.000Z | quick_start/my_text_classifier/predictors/__init__.py | ramild/allennlp-guide | 4cff916e7bc4629184bc70594e213ef56e14ec70 | [
"MIT"
] | 50 | 2020-06-18T14:19:15.000Z | 2022-03-28T07:04:16.000Z | quick_start/my_text_classifier/predictors/__init__.py | ramild/allennlp-guide | 4cff916e7bc4629184bc70594e213ef56e14ec70 | [
"MIT"
] | 37 | 2020-06-05T19:08:44.000Z | 2022-03-17T08:23:41.000Z | from .sentence_classifier_predictor import SentenceClassifierPredictor
| 35.5 | 70 | 0.929577 | from .sentence_classifier_predictor import SentenceClassifierPredictor
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.