hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c429d734bcad3299699b458f7a0c48c61376d31
| 259
|
py
|
Python
|
config.py
|
lightning-sprinkle/lightning-sprinkle-service
|
c5f44d17da2a9894982e203aa1fbcc6f74753db2
|
[
"MIT"
] | null | null | null |
config.py
|
lightning-sprinkle/lightning-sprinkle-service
|
c5f44d17da2a9894982e203aa1fbcc6f74753db2
|
[
"MIT"
] | null | null | null |
config.py
|
lightning-sprinkle/lightning-sprinkle-service
|
c5f44d17da2a9894982e203aa1fbcc6f74753db2
|
[
"MIT"
] | null | null | null |
"""
Application configuration
"""
# The maximum reward per hour in satoshis
max_hourly_reward = 40
# Only reward hostnams with a valid OV or EV certificate.
organization_only = False
# Hostnames that will never get a reward
blacklist = [
'example.com'
]
| 17.266667
| 57
| 0.749035
|
max_hourly_reward = 40
organization_only = False
blacklist = [
'example.com'
]
| true
| true
|
1c429d8117c4a9648bd684460e211b645d1066da
| 159
|
py
|
Python
|
derivative.py
|
daviddamilola/python-initial-gra-gra
|
9978bfda18f12c87601b110f297da2cb13872d27
|
[
"MIT"
] | 1
|
2019-11-07T21:30:21.000Z
|
2019-11-07T21:30:21.000Z
|
derivative.py
|
daviddamilola/python-initial-gra-gra
|
9978bfda18f12c87601b110f297da2cb13872d27
|
[
"MIT"
] | 2
|
2021-04-06T18:19:09.000Z
|
2021-06-02T03:27:18.000Z
|
derivative.py
|
daviddamilola/python-initial-gra-gra
|
9978bfda18f12c87601b110f297da2cb13872d27
|
[
"MIT"
] | null | null | null |
"""
formula for the derivative of a function
f′(a) = lim f(a+h)− f(a) / h
h→0
"""
def derivative(f, h):
return lambda x: (f(x + h) - f(x)) / h
| 14.454545
| 42
| 0.509434
|
def derivative(f, h):
return lambda x: (f(x + h) - f(x)) / h
| true
| true
|
1c429d9483d7d7d6d3108e553c8492771ee15b86
| 794
|
py
|
Python
|
showings/migrations/0010_auto_20170925_2310.py
|
WarwickAnimeSoc/aniMango
|
f927c2bc6eb484561ab38172ebebee6f03c8b13b
|
[
"MIT"
] | null | null | null |
showings/migrations/0010_auto_20170925_2310.py
|
WarwickAnimeSoc/aniMango
|
f927c2bc6eb484561ab38172ebebee6f03c8b13b
|
[
"MIT"
] | 6
|
2016-10-18T14:52:05.000Z
|
2020-06-18T15:14:41.000Z
|
showings/migrations/0010_auto_20170925_2310.py
|
WarwickAnimeSoc/aniMango
|
f927c2bc6eb484561ab38172ebebee6f03c8b13b
|
[
"MIT"
] | 6
|
2020-02-07T17:37:37.000Z
|
2021-01-15T00:01:43.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-25 22:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('showings', '0009_auto_20170925_2248'),
]
operations = [
migrations.AddField(
model_name='showing',
name='details',
field=models.CharField(blank=True, help_text=b'Brief event explanation, etc.', max_length=200, null=True),
),
migrations.AlterField(
model_name='showing',
name='showing_type',
field=models.CharField(choices=[(b'wk', b'Weekly showing'), (b'an', b'Allnighter'), (b'ev', b'Event'), (b'ot', b'Other')], default=b'wk', max_length=2),
),
]
| 30.538462
| 164
| 0.605793
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('showings', '0009_auto_20170925_2248'),
]
operations = [
migrations.AddField(
model_name='showing',
name='details',
field=models.CharField(blank=True, help_text=b'Brief event explanation, etc.', max_length=200, null=True),
),
migrations.AlterField(
model_name='showing',
name='showing_type',
field=models.CharField(choices=[(b'wk', b'Weekly showing'), (b'an', b'Allnighter'), (b'ev', b'Event'), (b'ot', b'Other')], default=b'wk', max_length=2),
),
]
| true
| true
|
1c42a08940e444dd3ecf4c062516205b6371119e
| 2,259
|
py
|
Python
|
tests/test-version.py
|
Jastor11/aiobotocore
|
40427e6c45dd6b8fb75072f13cfb076cf6c4d10b
|
[
"Apache-2.0"
] | 772
|
2016-02-12T13:20:26.000Z
|
2022-03-29T20:51:37.000Z
|
tests/test-version.py
|
Jastor11/aiobotocore
|
40427e6c45dd6b8fb75072f13cfb076cf6c4d10b
|
[
"Apache-2.0"
] | 826
|
2016-02-14T11:31:25.000Z
|
2022-03-31T20:41:31.000Z
|
tests/test-version.py
|
Jastor11/aiobotocore
|
40427e6c45dd6b8fb75072f13cfb076cf6c4d10b
|
[
"Apache-2.0"
] | 154
|
2016-04-28T16:27:33.000Z
|
2022-03-05T19:41:52.000Z
|
import pytest
import docutils.nodes
import docutils.parsers.rst
import docutils.utils
import docutils.frontend
import aiobotocore
import re
from pathlib import Path
from packaging import version
from datetime import datetime
# from: https://stackoverflow.com/a/48719723/1241593
def _parse_rst(text: str) -> docutils.nodes.document:
parser = docutils.parsers.rst.Parser()
components = (docutils.parsers.rst.Parser,)
settings = docutils.frontend.OptionParser(
components=components).get_default_values()
document = docutils.utils.new_document('<rst-doc>', settings=settings)
parser.parse(text, document)
return document
# date can be YYYY-MM-DD or "TBD"
_rst_ver_date_str_re = re.compile(
r'(?P<version>\d+\.\d+\.\d+) \((?P<date>\d{4}-\d{2}-\d{2}|TBD)\)')
@pytest.mark.moto
def test_release_versions():
# ensures versions in CHANGES.rst + __init__.py match
init_version = version.parse(aiobotocore.__version__)
changes_path = Path(__file__).absolute().parent.parent / 'CHANGES.rst'
with changes_path.open('r') as f:
changes_doc = _parse_rst(f.read())
rst_ver_str = changes_doc[0][1][0][0] # ex: 0.11.1 (2020-01-03)
rst_prev_ver_str = changes_doc[0][2][0][0]
rst_ver_groups = _rst_ver_date_str_re.match(rst_ver_str)
rst_prev_ver_groups = _rst_ver_date_str_re.match(rst_prev_ver_str)
rst_ver = version.parse(rst_ver_groups['version'])
rst_prev_ver = version.parse(rst_prev_ver_groups['version'])
# first the init version should match the rst version
assert init_version == rst_ver
# the current version must be greater than the previous version
assert rst_ver > rst_prev_ver
rst_date = rst_ver_groups['date']
rst_prev_date = rst_prev_ver_groups['date']
if rst_date == 'TBD':
assert rst_ver.is_prerelease, \
'Version must be prerelease if final release date not set'
else:
assert not rst_ver.is_prerelease, \
'Version must not be prerelease if release date set'
rst_date = datetime.strptime(rst_date, '%Y-%m-%d').date()
rst_prev_date = datetime.strptime(rst_prev_date, '%Y-%m-%d').date()
assert rst_date > rst_prev_date, 'Current release must be after last release'
| 33.220588
| 85
| 0.709606
|
import pytest
import docutils.nodes
import docutils.parsers.rst
import docutils.utils
import docutils.frontend
import aiobotocore
import re
from pathlib import Path
from packaging import version
from datetime import datetime
def _parse_rst(text: str) -> docutils.nodes.document:
parser = docutils.parsers.rst.Parser()
components = (docutils.parsers.rst.Parser,)
settings = docutils.frontend.OptionParser(
components=components).get_default_values()
document = docutils.utils.new_document('<rst-doc>', settings=settings)
parser.parse(text, document)
return document
_rst_ver_date_str_re = re.compile(
r'(?P<version>\d+\.\d+\.\d+) \((?P<date>\d{4}-\d{2}-\d{2}|TBD)\)')
@pytest.mark.moto
def test_release_versions():
init_version = version.parse(aiobotocore.__version__)
changes_path = Path(__file__).absolute().parent.parent / 'CHANGES.rst'
with changes_path.open('r') as f:
changes_doc = _parse_rst(f.read())
rst_ver_str = changes_doc[0][1][0][0]
rst_prev_ver_str = changes_doc[0][2][0][0]
rst_ver_groups = _rst_ver_date_str_re.match(rst_ver_str)
rst_prev_ver_groups = _rst_ver_date_str_re.match(rst_prev_ver_str)
rst_ver = version.parse(rst_ver_groups['version'])
rst_prev_ver = version.parse(rst_prev_ver_groups['version'])
assert init_version == rst_ver
assert rst_ver > rst_prev_ver
rst_date = rst_ver_groups['date']
rst_prev_date = rst_prev_ver_groups['date']
if rst_date == 'TBD':
assert rst_ver.is_prerelease, \
'Version must be prerelease if final release date not set'
else:
assert not rst_ver.is_prerelease, \
'Version must not be prerelease if release date set'
rst_date = datetime.strptime(rst_date, '%Y-%m-%d').date()
rst_prev_date = datetime.strptime(rst_prev_date, '%Y-%m-%d').date()
assert rst_date > rst_prev_date, 'Current release must be after last release'
| true
| true
|
1c42a1324b17e0befbcd29042fc01f59088beaec
| 463
|
py
|
Python
|
OsterAnmeldung/models.py
|
Husterknupp/2020-oster-squash
|
43e8742c89ad1225119e8d2c4d2dba6a2914dd0d
|
[
"MIT"
] | 1
|
2020-03-06T16:06:00.000Z
|
2020-03-06T16:06:00.000Z
|
OsterAnmeldung/models.py
|
Husterknupp/2020-oster-squash
|
43e8742c89ad1225119e8d2c4d2dba6a2914dd0d
|
[
"MIT"
] | 1
|
2021-06-10T18:36:46.000Z
|
2021-06-10T18:36:46.000Z
|
OsterAnmeldung/models.py
|
Husterknupp/2020-oster-squash
|
43e8742c89ad1225119e8d2c4d2dba6a2914dd0d
|
[
"MIT"
] | 1
|
2020-03-05T23:38:21.000Z
|
2020-03-05T23:38:21.000Z
|
from django.db import models
class Registration(models.Model):
id = models.CharField(primary_key=True, unique=True, max_length=120)
emailAddress = models.TextField()
timeFrameBegin = models.DateTimeField()
dateOfRegistration = models.DateTimeField(auto_now=True)
quantity = models.IntegerField()
state = models.CharField(max_length=120)
def _str_(self):
return str(self.timeFrameBegin) + " (" + str(self.emailAddress) + ")"
| 33.071429
| 77
| 0.719222
|
from django.db import models
class Registration(models.Model):
id = models.CharField(primary_key=True, unique=True, max_length=120)
emailAddress = models.TextField()
timeFrameBegin = models.DateTimeField()
dateOfRegistration = models.DateTimeField(auto_now=True)
quantity = models.IntegerField()
state = models.CharField(max_length=120)
def _str_(self):
return str(self.timeFrameBegin) + " (" + str(self.emailAddress) + ")"
| true
| true
|
1c42a1771f2e1ab5a3930a8db384eefefe3ac7f5
| 4,336
|
py
|
Python
|
examples/collectors/interval_collectors.py
|
benji011/instascrape
|
712a7b0e2b5abd635d0113b5600e8cb62d6bdbbc
|
[
"MIT"
] | null | null | null |
examples/collectors/interval_collectors.py
|
benji011/instascrape
|
712a7b0e2b5abd635d0113b5600e8cb62d6bdbbc
|
[
"MIT"
] | null | null | null |
examples/collectors/interval_collectors.py
|
benji011/instascrape
|
712a7b0e2b5abd635d0113b5600e8cb62d6bdbbc
|
[
"MIT"
] | null | null | null |
import datetime
from itertools import cycle
import time
from typing import List, Callable
class IntervalCollector:
"""
Given a list of scraper objects, perform different data collection tasks
"""
def __init__(self, scrapers: List["Scrapers"]) -> None:
if not type(scrapers) == list:
scrapers = list(scrapers)
self.scrapers = scrapers
def _calculate_time_remaining(self, current, end):
return (end - current).seconds
def interval_scrape(
self,
min_interval: int = 5,
days: int = 0,
seconds: int = 60,
microseconds: int = 0,
milliseconds: int = 0,
minutes: int = 0,
hours: int = 0,
weeks: int = 0,
quiet: bool = False,
):
current_time = datetime.datetime.now()
end_time = current_time + datetime.timedelta(
days=days,
seconds=seconds,
microseconds=microseconds,
milliseconds=milliseconds,
minutes=minutes,
hours=hours,
weeks=weeks,
)
# TODO: Process list asynchronously and then wait so that each
# scraper is processed at basically the same time before waiting
# Wait during interval, scrape data, then check if current time has passed end time
if not quiet:
print(
f"Starting scrape, {self._calculate_time_remaining(current_time, end_time)} seconds remaining"
)
for scraper in cycle(self.scrapers):
time.sleep(min_interval)
scraper.static_load()
current_time = datetime.datetime.now()
time_remaining = self._calculate_time_remaining(current_time, end_time)
if not quiet:
if time_remaining > 0:
print(f"{scraper} scraped: {time_remaining} seconds remaining")
else:
print(f"{scraper} scraped: No time remaining, exitting")
if current_time > end_time or time_remaining < min_interval:
break
class IntervalIterator(IntervalCollector):
"""
Iterator for scraping at given intervals
"""
def __init__(
self,
scrapers,
min_interval: int = 5,
days: int = 0,
seconds: int = 60,
microseconds: int = 0,
milliseconds: int = 0,
minutes: int = 0,
hours: int = 0,
weeks: int = 0,
quiet: bool = False,
):
self.scrapers = scrapers
self.min_interval = min_interval
self.days = days
self.seconds = seconds
self.microseconds = microseconds
self.milliseconds = milliseconds
self.minutes = minutes
self.hours = hours
self.weeks = weeks
self.quiet = quiet
self.current = self.scrapers[0]
self.scrapers = cycle(self.scrapers)
self.current_time = datetime.datetime.now()
self.end_time = self.current_time + datetime.timedelta(
days=days,
seconds=seconds,
microseconds=microseconds,
milliseconds=milliseconds,
minutes=minutes,
hours=hours,
weeks=weeks,
)
if not self.quiet:
print(
f"Starting scrape, {self._calculate_time_remaining(self.current_time, self.end_time)} seconds remaining"
)
def __iter__(self):
return self
def __next__(self, val=True):
time.sleep(self.min_interval)
self.current.static_load()
self.current_time = datetime.datetime.now()
self.time_remaining = self._calculate_time_remaining(
self.current_time, self.end_time
)
if not self.quiet:
if self.time_remaining > 0:
print(
f"{self.current} scraped: {self.time_remaining} seconds remaining"
)
else:
print(f"{self.current} scraped: No time remaining, exitting")
self.current = next(self.scrapers)
if self.current_time > self.end_time or self.time_remaining < self.min_interval:
raise StopIteration
| 32.358209
| 121
| 0.562269
|
import datetime
from itertools import cycle
import time
from typing import List, Callable
class IntervalCollector:
def __init__(self, scrapers: List["Scrapers"]) -> None:
if not type(scrapers) == list:
scrapers = list(scrapers)
self.scrapers = scrapers
def _calculate_time_remaining(self, current, end):
return (end - current).seconds
def interval_scrape(
self,
min_interval: int = 5,
days: int = 0,
seconds: int = 60,
microseconds: int = 0,
milliseconds: int = 0,
minutes: int = 0,
hours: int = 0,
weeks: int = 0,
quiet: bool = False,
):
current_time = datetime.datetime.now()
end_time = current_time + datetime.timedelta(
days=days,
seconds=seconds,
microseconds=microseconds,
milliseconds=milliseconds,
minutes=minutes,
hours=hours,
weeks=weeks,
)
if not quiet:
print(
f"Starting scrape, {self._calculate_time_remaining(current_time, end_time)} seconds remaining"
)
for scraper in cycle(self.scrapers):
time.sleep(min_interval)
scraper.static_load()
current_time = datetime.datetime.now()
time_remaining = self._calculate_time_remaining(current_time, end_time)
if not quiet:
if time_remaining > 0:
print(f"{scraper} scraped: {time_remaining} seconds remaining")
else:
print(f"{scraper} scraped: No time remaining, exitting")
if current_time > end_time or time_remaining < min_interval:
break
class IntervalIterator(IntervalCollector):
def __init__(
self,
scrapers,
min_interval: int = 5,
days: int = 0,
seconds: int = 60,
microseconds: int = 0,
milliseconds: int = 0,
minutes: int = 0,
hours: int = 0,
weeks: int = 0,
quiet: bool = False,
):
self.scrapers = scrapers
self.min_interval = min_interval
self.days = days
self.seconds = seconds
self.microseconds = microseconds
self.milliseconds = milliseconds
self.minutes = minutes
self.hours = hours
self.weeks = weeks
self.quiet = quiet
self.current = self.scrapers[0]
self.scrapers = cycle(self.scrapers)
self.current_time = datetime.datetime.now()
self.end_time = self.current_time + datetime.timedelta(
days=days,
seconds=seconds,
microseconds=microseconds,
milliseconds=milliseconds,
minutes=minutes,
hours=hours,
weeks=weeks,
)
if not self.quiet:
print(
f"Starting scrape, {self._calculate_time_remaining(self.current_time, self.end_time)} seconds remaining"
)
def __iter__(self):
return self
def __next__(self, val=True):
time.sleep(self.min_interval)
self.current.static_load()
self.current_time = datetime.datetime.now()
self.time_remaining = self._calculate_time_remaining(
self.current_time, self.end_time
)
if not self.quiet:
if self.time_remaining > 0:
print(
f"{self.current} scraped: {self.time_remaining} seconds remaining"
)
else:
print(f"{self.current} scraped: No time remaining, exitting")
self.current = next(self.scrapers)
if self.current_time > self.end_time or self.time_remaining < self.min_interval:
raise StopIteration
| true
| true
|
1c42a1af5d59125195c23ba20d51a39b303f93cd
| 6,166
|
py
|
Python
|
src/models/legendre_duality/train.py
|
lavoiems/NeuralWassersteinFlow
|
b120778d75fc7afc9b6a56724768ab39ad7c0b91
|
[
"MIT"
] | null | null | null |
src/models/legendre_duality/train.py
|
lavoiems/NeuralWassersteinFlow
|
b120778d75fc7afc9b6a56724768ab39ad7c0b91
|
[
"MIT"
] | null | null | null |
src/models/legendre_duality/train.py
|
lavoiems/NeuralWassersteinFlow
|
b120778d75fc7afc9b6a56724768ab39ad7c0b91
|
[
"MIT"
] | null | null | null |
import time
import torch
from torch import optim
from sklearn.decomposition import PCA
import matplotlib.pylab as plt
import torch.nn.functional as F
from common.util import sample, save_models
from common.initialize import initialize, infer_iteration
from . import model
def c_transform(y, ey, lp, critic):
cy = critic(ey)
cost = (ey.view(ey.shape[0], -1) - y.view(y.shape[0], -1)).abs().pow(lp).sum(1)
return (cy - cost).mean()
def encoder_loss(batch_size, lp, z_dim, encoder, generator, critic, device):
z = torch.randn(batch_size, z_dim, device=device)
y = generator(z).detach()
ey = encoder(y)
return c_transform(y, ey, lp, critic)
def critic_loss(x, lp, z_dim, encoder, critic, generator, device):
f = critic(x).mean()
z = torch.randn(x.shape[0], z_dim, device=device)
y = generator(z).detach()
ey = encoder(y).detach()
return f - critic(e(y))
def transfer_loss(batch_size, lp, z_dim, encoder, critic, generator, device):
z = torch.randn(batch_size, z_dim, device=device)
y = generator(z)
ey = encoder(y).detach()
return -c_transform(y, ey, lp, critic)
def define_models(shape1, **parameters):
critic = model.Critic(shape1[0], shape1[1], **parameters)
generator = model.Generator(shape1[0], shape1[1], **parameters)
encoder = model.Encoder(shape1[0], shape1[1], **parameters)
return {
'generator': generator,
'critic': critic,
'encoder': encoder,
}
def evaluate(visualiser, nz, data, encoder, generator, critic, z_dim, id, device):
z = torch.randn(data.shape[0], nz, device=device)
z.requires_grad = True
dec = generator(z)
visualiser.image(dec.cpu().detach().numpy(), title=f'GAN generated', step=id)
visualiser.image(data.cpu().numpy(), title=f'Target', step=id)
enc = encoder(dec)
visualiser.image(enc.cpu().detach().numpy(), title=f'GAN encoded', step=id)
@torch.no_grad()
def evaluate_clusters(visualiser, encoder, target, label, id):
enc = encoder(target)
pca = PCA(2)
emb = pca.fit_transform(enc.reshape(enc.shape[0], -1).cpu().squeeze().numpy())
fig = plt.figure()
colors = [f'C{c}' for c in label.cpu().numpy()]
plt.scatter(*emb.transpose(), c=colors)
visualiser.matplotlib(fig, f'Embeddings {id}', None)
plt.clf()
plt.close(fig)
@torch.no_grad()
def evaluate_distance(visualiser, encoder, loader1, loader2, device):
ds = torch.zeros(10, 10, device=device)
totals = torch.zeros(10, 10, device=device)
for b1, b2 in zip(loader1, loader2):
d1, d2 = b1[0].to(device), b2[0].to(device)
l1, l2 = b1[1].to(device), b2[1].to(device)
z1, z2 = encoder(d1), encoder(d2)
dist = F.pairwise_distance(z1, z2, 2)
ds[l1, l2] += dist
ds[l2, l1] += dist
totals[l1, l2] += 1
totals[l2, l1] += 1
avgs = ds / totals
fig, ax = plt.subplots()
im = ax.imshow(avgs.cpu().numpy())
for i, row in enumerate(avgs):
for j, point in enumerate(row):
text = ax.text(j, i, f'{point.cpu().item():.3f}', ha='center', va='center', color='w', size=6)
visualiser.matplotlib(fig, 'distances', None)
plt.clf()
plt.close(fig)
def train(args):
parameters = vars(args)
train_loader1, test_loader1 = args.loaders1
models = define_models(**parameters)
initialize(models, args.reload, args.save_path, args.model_path)
generator = models['generator'].to(args.device)
critic = models['critic'].to(args.device)
encoder = models['encoder'].to(args.device)
print(generator)
print(critic)
print(encoder)
optim_critic = optim.Adam(critic.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
optim_generator = optim.Adam(generator.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
optim_encoder = optim.Adam(encoder.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
iter1 = iter(train_loader1)
iteration = infer_iteration(list(models.keys())[0], args.reload, args.model_path, args.save_path)
titer1 = iter(test_loader1)
mone = torch.FloatTensor([-1]).to(args.device)
t0 = time.time()
for i in range(iteration, args.iterations):
generator.train()
critic.train()
encoder.train()
for _ in range(10):
batchx, iter1 = sample(iter1, train_loader1)
data = batchx[0].to(args.device)
optim_encoder.zero_grad()
optim_generator.zero_grad()
e_loss = encoder_loss(data.shape[0], args.lp, args.z_dim, encoder, generator, critic, args.device)
e_loss.backward()
optim_encoder.step()
optim_generator.step()
for _ in range(1):
batchx, iter1 = sample(iter1, train_loader1)
data = batchx[0].to(args.device)
optim_critic.zero_grad()
r_loss = critic_loss(data, args.lp, args.z_dim, encoder, critic, generator, args.device)
r_loss.backward(mone)
optim_critic.step()
for _ in range(1):
batchx, iter1 = sample(iter1, train_loader1)
data = batchx[0].to(args.device)
optim_generator.zero_grad()
t_loss = transfer_loss(data.shape[0], args.lp, args.z_dim, encoder, critic, generator, args.device)
t_loss.backward()
optim_generator.step()
if i % args.evaluate == 0:
encoder.eval()
generator.eval()
print('Iter: %s' % i, time.time() - t0)
batchx, titer1 = sample(titer1, test_loader1)
data = batchx[0].to(args.device)
evaluate(args.visualiser, args.z_dim, data, encoder, generator, critic, args.z_dim, i, args.device)
d_loss = (r_loss).detach().cpu().numpy()
args.visualiser.plot(step=i, data=d_loss, title=f'Critic loss')
args.visualiser.plot(step=i, data=e_loss.detach().cpu().numpy(), title=f'Encoder loss')
args.visualiser.plot(step=i, data=t_loss.detach().cpu().numpy(), title=f'Generator loss')
t0 = time.time()
save_models(models, i, args.model_path, args.checkpoint)
| 36.702381
| 111
| 0.628608
|
import time
import torch
from torch import optim
from sklearn.decomposition import PCA
import matplotlib.pylab as plt
import torch.nn.functional as F
from common.util import sample, save_models
from common.initialize import initialize, infer_iteration
from . import model
def c_transform(y, ey, lp, critic):
cy = critic(ey)
cost = (ey.view(ey.shape[0], -1) - y.view(y.shape[0], -1)).abs().pow(lp).sum(1)
return (cy - cost).mean()
def encoder_loss(batch_size, lp, z_dim, encoder, generator, critic, device):
z = torch.randn(batch_size, z_dim, device=device)
y = generator(z).detach()
ey = encoder(y)
return c_transform(y, ey, lp, critic)
def critic_loss(x, lp, z_dim, encoder, critic, generator, device):
f = critic(x).mean()
z = torch.randn(x.shape[0], z_dim, device=device)
y = generator(z).detach()
ey = encoder(y).detach()
return f - critic(e(y))
def transfer_loss(batch_size, lp, z_dim, encoder, critic, generator, device):
z = torch.randn(batch_size, z_dim, device=device)
y = generator(z)
ey = encoder(y).detach()
return -c_transform(y, ey, lp, critic)
def define_models(shape1, **parameters):
critic = model.Critic(shape1[0], shape1[1], **parameters)
generator = model.Generator(shape1[0], shape1[1], **parameters)
encoder = model.Encoder(shape1[0], shape1[1], **parameters)
return {
'generator': generator,
'critic': critic,
'encoder': encoder,
}
def evaluate(visualiser, nz, data, encoder, generator, critic, z_dim, id, device):
z = torch.randn(data.shape[0], nz, device=device)
z.requires_grad = True
dec = generator(z)
visualiser.image(dec.cpu().detach().numpy(), title=f'GAN generated', step=id)
visualiser.image(data.cpu().numpy(), title=f'Target', step=id)
enc = encoder(dec)
visualiser.image(enc.cpu().detach().numpy(), title=f'GAN encoded', step=id)
@torch.no_grad()
def evaluate_clusters(visualiser, encoder, target, label, id):
enc = encoder(target)
pca = PCA(2)
emb = pca.fit_transform(enc.reshape(enc.shape[0], -1).cpu().squeeze().numpy())
fig = plt.figure()
colors = [f'C{c}' for c in label.cpu().numpy()]
plt.scatter(*emb.transpose(), c=colors)
visualiser.matplotlib(fig, f'Embeddings {id}', None)
plt.clf()
plt.close(fig)
@torch.no_grad()
def evaluate_distance(visualiser, encoder, loader1, loader2, device):
ds = torch.zeros(10, 10, device=device)
totals = torch.zeros(10, 10, device=device)
for b1, b2 in zip(loader1, loader2):
d1, d2 = b1[0].to(device), b2[0].to(device)
l1, l2 = b1[1].to(device), b2[1].to(device)
z1, z2 = encoder(d1), encoder(d2)
dist = F.pairwise_distance(z1, z2, 2)
ds[l1, l2] += dist
ds[l2, l1] += dist
totals[l1, l2] += 1
totals[l2, l1] += 1
avgs = ds / totals
fig, ax = plt.subplots()
im = ax.imshow(avgs.cpu().numpy())
for i, row in enumerate(avgs):
for j, point in enumerate(row):
text = ax.text(j, i, f'{point.cpu().item():.3f}', ha='center', va='center', color='w', size=6)
visualiser.matplotlib(fig, 'distances', None)
plt.clf()
plt.close(fig)
def train(args):
parameters = vars(args)
train_loader1, test_loader1 = args.loaders1
models = define_models(**parameters)
initialize(models, args.reload, args.save_path, args.model_path)
generator = models['generator'].to(args.device)
critic = models['critic'].to(args.device)
encoder = models['encoder'].to(args.device)
print(generator)
print(critic)
print(encoder)
optim_critic = optim.Adam(critic.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
optim_generator = optim.Adam(generator.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
optim_encoder = optim.Adam(encoder.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
iter1 = iter(train_loader1)
iteration = infer_iteration(list(models.keys())[0], args.reload, args.model_path, args.save_path)
titer1 = iter(test_loader1)
mone = torch.FloatTensor([-1]).to(args.device)
t0 = time.time()
for i in range(iteration, args.iterations):
generator.train()
critic.train()
encoder.train()
for _ in range(10):
batchx, iter1 = sample(iter1, train_loader1)
data = batchx[0].to(args.device)
optim_encoder.zero_grad()
optim_generator.zero_grad()
e_loss = encoder_loss(data.shape[0], args.lp, args.z_dim, encoder, generator, critic, args.device)
e_loss.backward()
optim_encoder.step()
optim_generator.step()
for _ in range(1):
batchx, iter1 = sample(iter1, train_loader1)
data = batchx[0].to(args.device)
optim_critic.zero_grad()
r_loss = critic_loss(data, args.lp, args.z_dim, encoder, critic, generator, args.device)
r_loss.backward(mone)
optim_critic.step()
for _ in range(1):
batchx, iter1 = sample(iter1, train_loader1)
data = batchx[0].to(args.device)
optim_generator.zero_grad()
t_loss = transfer_loss(data.shape[0], args.lp, args.z_dim, encoder, critic, generator, args.device)
t_loss.backward()
optim_generator.step()
if i % args.evaluate == 0:
encoder.eval()
generator.eval()
print('Iter: %s' % i, time.time() - t0)
batchx, titer1 = sample(titer1, test_loader1)
data = batchx[0].to(args.device)
evaluate(args.visualiser, args.z_dim, data, encoder, generator, critic, args.z_dim, i, args.device)
d_loss = (r_loss).detach().cpu().numpy()
args.visualiser.plot(step=i, data=d_loss, title=f'Critic loss')
args.visualiser.plot(step=i, data=e_loss.detach().cpu().numpy(), title=f'Encoder loss')
args.visualiser.plot(step=i, data=t_loss.detach().cpu().numpy(), title=f'Generator loss')
t0 = time.time()
save_models(models, i, args.model_path, args.checkpoint)
| true
| true
|
1c42a3d855bc16c21e385d7108c3106884ae4f5e
| 27,746
|
py
|
Python
|
tensorflow/contrib/data/python/kernel_tests/reader_dataset_ops_test.py
|
harunpehlivan/tensorflow
|
376e2cfdab31f4da251ea2e50992a9bf97fd171b
|
[
"Apache-2.0"
] | 24
|
2018-02-01T15:49:22.000Z
|
2021-01-11T16:31:18.000Z
|
tensorflow/contrib/data/python/kernel_tests/reader_dataset_ops_test.py
|
hamzabekkouri/tensorflow
|
d87a9fbbc5f49ec5ae8eb52c62628f0b1a0bf67f
|
[
"Apache-2.0"
] | 3
|
2018-05-09T11:31:58.000Z
|
2021-01-27T12:26:21.000Z
|
tensorflow/contrib/data/python/kernel_tests/reader_dataset_ops_test.py
|
hamzabekkouri/tensorflow
|
d87a9fbbc5f49ec5ae8eb52c62628f0b1a0bf67f
|
[
"Apache-2.0"
] | 13
|
2018-02-22T21:04:13.000Z
|
2020-11-17T11:38:36.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.contrib.data.python.kernel_tests import dataset_serialization_test_base
from tensorflow.contrib.data.python.ops import readers
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class TextLineDatasetTestBase(test.TestCase):
def _lineText(self, f, l):
return compat.as_bytes("%d: %d" % (f, l))
def _createFiles(self,
num_files,
num_lines,
crlf=False,
compression_type=None):
filenames = []
for i in range(num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
contents = []
for j in range(num_lines):
contents.append(self._lineText(i, j))
# Always include a newline after the record unless it is
# at the end of the file, in which case we include it
if j + 1 != num_lines or i == 0:
contents.append(b"\r\n" if crlf else b"\n")
contents = b"".join(contents)
if not compression_type:
with open(fn, "wb") as f:
f.write(contents)
elif compression_type == "GZIP":
with gzip.GzipFile(fn, "wb") as f:
f.write(contents)
elif compression_type == "ZLIB":
contents = zlib.compress(contents)
with open(fn, "wb") as f:
f.write(contents)
else:
raise ValueError("Unsupported compression_type", compression_type)
return filenames
class TextLineDatasetTest(TextLineDatasetTestBase):
def _testTextLineDataset(self, compression_type=None):
test_filenames = self._createFiles(
2, 5, crlf=True, compression_type=compression_type)
filenames = array_ops.placeholder(dtypes.string, shape=[None])
num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = readers.TextLineDataset(
filenames, compression_type=compression_type).repeat(num_epochs)
batch_dataset = repeat_dataset.batch(batch_size)
iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
init_op = iterator.make_initializer(repeat_dataset)
init_batch_op = iterator.make_initializer(batch_dataset)
get_next = iterator.get_next()
with self.test_session() as sess:
# Basic test: read from file 0.
sess.run(
init_op, feed_dict={filenames: [test_filenames[0]],
num_epochs: 1})
for i in range(5):
self.assertEqual(self._lineText(0, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from file 1.
sess.run(
init_op, feed_dict={filenames: [test_filenames[1]],
num_epochs: 1})
for i in range(5):
self.assertEqual(self._lineText(1, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from both files.
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 1})
for j in range(2):
for i in range(5):
self.assertEqual(self._lineText(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test repeated iteration through both files.
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 10})
for _ in range(10):
for j in range(2):
for i in range(5):
self.assertEqual(self._lineText(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test batched and repeated iteration through both files.
sess.run(
init_batch_op,
feed_dict={filenames: test_filenames,
num_epochs: 10,
batch_size: 5})
for _ in range(10):
self.assertAllEqual([self._lineText(0, i) for i in range(5)],
sess.run(get_next))
self.assertAllEqual([self._lineText(1, i) for i in range(5)],
sess.run(get_next))
def testTextLineDatasetNoCompression(self):
self._testTextLineDataset()
def testTextLineDatasetGzipCompression(self):
self._testTextLineDataset(compression_type="GZIP")
def testTextLineDatasetZlibCompression(self):
self._testTextLineDataset(compression_type="ZLIB")
def testTextLineDatasetBuffering(self):
test_filenames = self._createFiles(2, 5, crlf=True)
repeat_dataset = readers.TextLineDataset(test_filenames, buffer_size=10)
iterator = repeat_dataset.make_one_shot_iterator()
with self.test_session() as sess:
for j in range(2):
for i in range(5):
self.assertEqual(self._lineText(j, i), sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
class TextLineDatasetSerializationTest(
TextLineDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self, test_filenames, compression_type=None):
return readers.TextLineDataset(
test_filenames, compression_type=compression_type, buffer_size=10)
def testTextLineCore(self):
compression_types = [None, "GZIP", "ZLIB"]
num_files = 5
lines_per_file = 5
num_outputs = num_files * lines_per_file
for compression_type in compression_types:
test_filenames = self._createFiles(
num_files,
lines_per_file,
crlf=True,
compression_type=compression_type)
# pylint: disable=cell-var-from-loop
self.run_core_tests(
lambda: self._build_iterator_graph(test_filenames, compression_type),
lambda: self._build_iterator_graph(test_filenames), num_outputs)
# pylint: enable=cell-var-from-loop
class FixedLengthRecordReaderTestBase(test.TestCase):
def setUp(self):
super(FixedLengthRecordReaderTestBase, self).setUp()
self._num_files = 2
self._num_records = 7
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
def _record(self, f, r):
return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
for j in range(self._num_records):
f.write(self._record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
class FixedLengthRecordReaderTest(FixedLengthRecordReaderTestBase):
def testFixedLengthRecordDataset(self):
test_filenames = self._createFiles()
filenames = array_ops.placeholder(dtypes.string, shape=[None])
num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = (readers.FixedLengthRecordDataset(
filenames, self._record_bytes, self._header_bytes, self._footer_bytes)
.repeat(num_epochs))
batch_dataset = repeat_dataset.batch(batch_size)
iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
init_op = iterator.make_initializer(repeat_dataset)
init_batch_op = iterator.make_initializer(batch_dataset)
get_next = iterator.get_next()
with self.test_session() as sess:
# Basic test: read from file 0.
sess.run(
init_op, feed_dict={filenames: [test_filenames[0]],
num_epochs: 1})
for i in range(self._num_records):
self.assertEqual(self._record(0, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from file 1.
sess.run(
init_op, feed_dict={filenames: [test_filenames[1]],
num_epochs: 1})
for i in range(self._num_records):
self.assertEqual(self._record(1, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from both files.
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 1})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test repeated iteration through both files.
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 10})
for _ in range(10):
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test batched and repeated iteration through both files.
sess.run(
init_batch_op,
feed_dict={
filenames: test_filenames,
num_epochs: 10,
batch_size: self._num_records
})
for _ in range(10):
for j in range(self._num_files):
self.assertAllEqual(
[self._record(j, i) for i in range(self._num_records)],
sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testFixedLengthRecordDatasetBuffering(self):
test_filenames = self._createFiles()
dataset = readers.FixedLengthRecordDataset(
test_filenames,
self._record_bytes,
self._header_bytes,
self._footer_bytes,
buffer_size=10)
iterator = dataset.make_one_shot_iterator()
with self.test_session() as sess:
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
class FixedLengthRecordDatasetSerializationTest(
FixedLengthRecordReaderTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self, num_epochs, compression_type=None):
filenames = self._createFiles()
return readers.FixedLengthRecordDataset(
filenames, self._record_bytes, self._header_bytes,
self._footer_bytes).repeat(num_epochs)
def testFixedLengthRecordCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(lambda: self._build_iterator_graph(num_epochs),
lambda: self._build_iterator_graph(num_epochs * 2),
num_outputs)
class TFRecordDatasetTestBase(test.TestCase):
def setUp(self):
super(TFRecordDatasetTestBase, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
self.filenames = array_ops.placeholder(dtypes.string, shape=[None])
self.num_epochs = array_ops.placeholder_with_default(
constant_op.constant(1, dtypes.int64), shape=[])
self.compression_type = array_ops.placeholder_with_default("", shape=[])
self.batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = readers.TFRecordDataset(self.filenames,
self.compression_type).repeat(
self.num_epochs)
batch_dataset = repeat_dataset.batch(self.batch_size)
iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
self.init_op = iterator.make_initializer(repeat_dataset)
self.init_batch_op = iterator.make_initializer(batch_dataset)
self.get_next = iterator.get_next()
def _record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
class TFRecordDatasetTest(TFRecordDatasetTestBase):
def testReadOneEpoch(self):
with self.test_session() as sess:
# Basic test: read from file 0.
sess.run(
self.init_op,
feed_dict={
self.filenames: [self.test_filenames[0]],
self.num_epochs: 1
})
for i in range(self._num_records):
self.assertAllEqual(self._record(0, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
# Basic test: read from file 1.
sess.run(
self.init_op,
feed_dict={
self.filenames: [self.test_filenames[1]],
self.num_epochs: 1
})
for i in range(self._num_records):
self.assertAllEqual(self._record(1, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
# Basic test: read from both files.
sess.run(
self.init_op,
feed_dict={self.filenames: self.test_filenames,
self.num_epochs: 1})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadTenEpochs(self):
with self.test_session() as sess:
sess.run(
self.init_op,
feed_dict={self.filenames: self.test_filenames,
self.num_epochs: 10})
for _ in range(10):
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadTenEpochsOfBatches(self):
with self.test_session() as sess:
sess.run(
self.init_batch_op,
feed_dict={
self.filenames: self.test_filenames,
self.num_epochs: 10,
self.batch_size: self._num_records
})
for _ in range(10):
for j in range(self._num_files):
values = sess.run(self.get_next)
self.assertAllEqual(
[self._record(j, i) for i in range(self._num_records)], values)
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadZlibFiles(self):
zlib_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
with self.test_session() as sess:
sess.run(
self.init_op,
feed_dict={self.filenames: zlib_files,
self.compression_type: "ZLIB"})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadGzipFiles(self):
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
with self.test_session() as sess:
sess.run(
self.init_op,
feed_dict={self.filenames: gzip_files,
self.compression_type: "GZIP"})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadWithBuffer(self):
one_mebibyte = 2**20
d = readers.TFRecordDataset(self.test_filenames, buffer_size=one_mebibyte)
iterator = d.make_one_shot_iterator()
with self.test_session() as sess:
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
class TFRecordDatasetSerializationTest(
TFRecordDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self,
num_epochs,
batch_size=1,
compression_type=None,
buffer_size=None):
filenames = self._createFiles()
if compression_type is "ZLIB":
zlib_files = []
for i, fn in enumerate(filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
filenames = zlib_files
elif compression_type is "GZIP":
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
filenames = gzip_files
return readers.TFRecordDataset(
filenames, compression_type,
buffer_size=buffer_size).repeat(num_epochs).batch(batch_size)
def testTFRecordWithoutBufferCore(self):
num_epochs = 5
batch_size = num_epochs
num_outputs = num_epochs * self._num_files * self._num_records // batch_size
# pylint: disable=g-long-lambda
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, batch_size,
buffer_size=0),
lambda: self._build_iterator_graph(num_epochs * 2, batch_size),
num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, buffer_size=0), None,
num_outputs * batch_size)
# pylint: enable=g-long-lambda
def testTFRecordWithBufferCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(lambda: self._build_iterator_graph(num_epochs),
lambda: self._build_iterator_graph(num_epochs * 2),
num_outputs)
def testTFRecordWithCompressionCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="ZLIB"),
lambda: self._build_iterator_graph(num_epochs * 2), num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="GZIP"),
lambda: self._build_iterator_graph(num_epochs * 2), num_outputs)
class ReadBatchFeaturesTest(test.TestCase):
def setUp(self):
super(ReadBatchFeaturesTest, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
def _read_batch_features(self, filenames, num_epochs, batch_size):
self.filenames = filenames
self.num_epochs = num_epochs
self.batch_size = batch_size
return readers.read_batch_features(
file_pattern=self.filenames,
batch_size=self.batch_size,
features={
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
"record": parsing_ops.FixedLenFeature([], dtypes.int64),
"keywords": parsing_ops.VarLenFeature(dtypes.string)
},
reader=readers.TFRecordDataset,
randomize_input=False,
num_epochs=self.num_epochs)
def _record(self, f, r):
example = example_pb2.Example(features=feature_pb2.Features(
feature={
"file":
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[f])),
"record":
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[r])),
"keywords":
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=self._get_keywords(f, r)))
}))
return example.SerializeToString()
def _get_keywords(self, f, r):
num_keywords = 1 + (f + r) % 2
keywords = []
for index in range(num_keywords):
keywords.append(compat.as_bytes("keyword%d" % index))
return keywords
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
def _next_actual_batch(self, sess):
file_op = self.outputs["file"]
keywords_indices_op = self.outputs["keywords"].indices
keywords_values_op = self.outputs["keywords"].values
keywords_dense_shape_op = self.outputs["keywords"].dense_shape
record_op = self.outputs["record"]
return sess.run([
file_op, keywords_indices_op, keywords_values_op,
keywords_dense_shape_op, record_op
])
def _next_expected_batch(self, file_indices, batch_size, num_epochs):
def _next_record(file_indices):
for j in file_indices:
for i in range(self._num_records):
yield j, i
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
for _ in range(num_epochs):
for record in _next_record(file_indices):
f = record[0]
r = record[1]
file_batch.append(f)
record_batch.append(r)
keywords = self._get_keywords(f, r)
keywords_batch_values.extend(keywords)
keywords_batch_indices.extend([[batch_index, i]
for i in range(len(keywords))])
batch_index += 1
keywords_batch_max_len = max(keywords_batch_max_len, len(keywords))
if len(file_batch) == batch_size:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[batch_size, keywords_batch_max_len], record_batch
]
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
if file_batch:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[len(file_batch), keywords_batch_max_len], record_batch
]
def _verify_records(self, sess, batch_size, file_index=None, num_epochs=1):
if file_index is not None:
file_indices = [file_index]
else:
file_indices = range(self._num_files)
for expected_batch in self._next_expected_batch(file_indices, batch_size,
num_epochs):
actual_batch = self._next_actual_batch(sess)
for i in range(len(expected_batch)):
self.assertAllEqual(expected_batch[i], actual_batch[i])
def testRead(self):
for batch_size in [1, 2]:
for num_epochs in [1, 10]:
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Basic test: read from file 0.
self.outputs = self._read_batch_features(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, 0, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Basic test: read from file 1.
self.outputs = self._read_batch_features(
filenames=self.test_filenames[1],
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, 1, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Basic test: read from both files.
self.outputs = self._read_batch_features(
filenames=self.test_filenames,
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
def testReadWithEquivalentDataset(self):
# TODO(mrry): Add support for tf.SparseTensor as a Dataset component.
features = {
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
"record": parsing_ops.FixedLenFeature([], dtypes.int64),
}
dataset = (readers.TFRecordDataset(self.test_filenames)
.map(lambda x: parsing_ops.parse_single_example(x, features))
.repeat(10).batch(2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
next_element = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for file_batch, _, _, _, record_batch in self._next_expected_batch(
range(self._num_files), 2, 10):
actual_batch = sess.run(next_element)
self.assertAllEqual(file_batch, actual_batch["file"])
self.assertAllEqual(record_batch, actual_batch["record"])
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
if __name__ == "__main__":
test.main()
| 37.393531
| 87
| 0.653932
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.contrib.data.python.kernel_tests import dataset_serialization_test_base
from tensorflow.contrib.data.python.ops import readers
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class TextLineDatasetTestBase(test.TestCase):
def _lineText(self, f, l):
return compat.as_bytes("%d: %d" % (f, l))
def _createFiles(self,
num_files,
num_lines,
crlf=False,
compression_type=None):
filenames = []
for i in range(num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
contents = []
for j in range(num_lines):
contents.append(self._lineText(i, j))
if j + 1 != num_lines or i == 0:
contents.append(b"\r\n" if crlf else b"\n")
contents = b"".join(contents)
if not compression_type:
with open(fn, "wb") as f:
f.write(contents)
elif compression_type == "GZIP":
with gzip.GzipFile(fn, "wb") as f:
f.write(contents)
elif compression_type == "ZLIB":
contents = zlib.compress(contents)
with open(fn, "wb") as f:
f.write(contents)
else:
raise ValueError("Unsupported compression_type", compression_type)
return filenames
class TextLineDatasetTest(TextLineDatasetTestBase):
def _testTextLineDataset(self, compression_type=None):
test_filenames = self._createFiles(
2, 5, crlf=True, compression_type=compression_type)
filenames = array_ops.placeholder(dtypes.string, shape=[None])
num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = readers.TextLineDataset(
filenames, compression_type=compression_type).repeat(num_epochs)
batch_dataset = repeat_dataset.batch(batch_size)
iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
init_op = iterator.make_initializer(repeat_dataset)
init_batch_op = iterator.make_initializer(batch_dataset)
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(
init_op, feed_dict={filenames: [test_filenames[0]],
num_epochs: 1})
for i in range(5):
self.assertEqual(self._lineText(0, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
sess.run(
init_op, feed_dict={filenames: [test_filenames[1]],
num_epochs: 1})
for i in range(5):
self.assertEqual(self._lineText(1, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 1})
for j in range(2):
for i in range(5):
self.assertEqual(self._lineText(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 10})
for _ in range(10):
for j in range(2):
for i in range(5):
self.assertEqual(self._lineText(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
sess.run(
init_batch_op,
feed_dict={filenames: test_filenames,
num_epochs: 10,
batch_size: 5})
for _ in range(10):
self.assertAllEqual([self._lineText(0, i) for i in range(5)],
sess.run(get_next))
self.assertAllEqual([self._lineText(1, i) for i in range(5)],
sess.run(get_next))
def testTextLineDatasetNoCompression(self):
self._testTextLineDataset()
def testTextLineDatasetGzipCompression(self):
self._testTextLineDataset(compression_type="GZIP")
def testTextLineDatasetZlibCompression(self):
self._testTextLineDataset(compression_type="ZLIB")
def testTextLineDatasetBuffering(self):
test_filenames = self._createFiles(2, 5, crlf=True)
repeat_dataset = readers.TextLineDataset(test_filenames, buffer_size=10)
iterator = repeat_dataset.make_one_shot_iterator()
with self.test_session() as sess:
for j in range(2):
for i in range(5):
self.assertEqual(self._lineText(j, i), sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
class TextLineDatasetSerializationTest(
TextLineDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self, test_filenames, compression_type=None):
return readers.TextLineDataset(
test_filenames, compression_type=compression_type, buffer_size=10)
def testTextLineCore(self):
compression_types = [None, "GZIP", "ZLIB"]
num_files = 5
lines_per_file = 5
num_outputs = num_files * lines_per_file
for compression_type in compression_types:
test_filenames = self._createFiles(
num_files,
lines_per_file,
crlf=True,
compression_type=compression_type)
self.run_core_tests(
lambda: self._build_iterator_graph(test_filenames, compression_type),
lambda: self._build_iterator_graph(test_filenames), num_outputs)
class FixedLengthRecordReaderTestBase(test.TestCase):
def setUp(self):
super(FixedLengthRecordReaderTestBase, self).setUp()
self._num_files = 2
self._num_records = 7
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
def _record(self, f, r):
return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
for j in range(self._num_records):
f.write(self._record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
class FixedLengthRecordReaderTest(FixedLengthRecordReaderTestBase):
def testFixedLengthRecordDataset(self):
test_filenames = self._createFiles()
filenames = array_ops.placeholder(dtypes.string, shape=[None])
num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = (readers.FixedLengthRecordDataset(
filenames, self._record_bytes, self._header_bytes, self._footer_bytes)
.repeat(num_epochs))
batch_dataset = repeat_dataset.batch(batch_size)
iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
init_op = iterator.make_initializer(repeat_dataset)
init_batch_op = iterator.make_initializer(batch_dataset)
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(
init_op, feed_dict={filenames: [test_filenames[0]],
num_epochs: 1})
for i in range(self._num_records):
self.assertEqual(self._record(0, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
sess.run(
init_op, feed_dict={filenames: [test_filenames[1]],
num_epochs: 1})
for i in range(self._num_records):
self.assertEqual(self._record(1, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 1})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 10})
for _ in range(10):
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
sess.run(
init_batch_op,
feed_dict={
filenames: test_filenames,
num_epochs: 10,
batch_size: self._num_records
})
for _ in range(10):
for j in range(self._num_files):
self.assertAllEqual(
[self._record(j, i) for i in range(self._num_records)],
sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testFixedLengthRecordDatasetBuffering(self):
test_filenames = self._createFiles()
dataset = readers.FixedLengthRecordDataset(
test_filenames,
self._record_bytes,
self._header_bytes,
self._footer_bytes,
buffer_size=10)
iterator = dataset.make_one_shot_iterator()
with self.test_session() as sess:
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
class FixedLengthRecordDatasetSerializationTest(
FixedLengthRecordReaderTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self, num_epochs, compression_type=None):
filenames = self._createFiles()
return readers.FixedLengthRecordDataset(
filenames, self._record_bytes, self._header_bytes,
self._footer_bytes).repeat(num_epochs)
def testFixedLengthRecordCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(lambda: self._build_iterator_graph(num_epochs),
lambda: self._build_iterator_graph(num_epochs * 2),
num_outputs)
class TFRecordDatasetTestBase(test.TestCase):
def setUp(self):
super(TFRecordDatasetTestBase, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
self.filenames = array_ops.placeholder(dtypes.string, shape=[None])
self.num_epochs = array_ops.placeholder_with_default(
constant_op.constant(1, dtypes.int64), shape=[])
self.compression_type = array_ops.placeholder_with_default("", shape=[])
self.batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = readers.TFRecordDataset(self.filenames,
self.compression_type).repeat(
self.num_epochs)
batch_dataset = repeat_dataset.batch(self.batch_size)
iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
self.init_op = iterator.make_initializer(repeat_dataset)
self.init_batch_op = iterator.make_initializer(batch_dataset)
self.get_next = iterator.get_next()
def _record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
class TFRecordDatasetTest(TFRecordDatasetTestBase):
def testReadOneEpoch(self):
with self.test_session() as sess:
sess.run(
self.init_op,
feed_dict={
self.filenames: [self.test_filenames[0]],
self.num_epochs: 1
})
for i in range(self._num_records):
self.assertAllEqual(self._record(0, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
sess.run(
self.init_op,
feed_dict={
self.filenames: [self.test_filenames[1]],
self.num_epochs: 1
})
for i in range(self._num_records):
self.assertAllEqual(self._record(1, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
sess.run(
self.init_op,
feed_dict={self.filenames: self.test_filenames,
self.num_epochs: 1})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadTenEpochs(self):
with self.test_session() as sess:
sess.run(
self.init_op,
feed_dict={self.filenames: self.test_filenames,
self.num_epochs: 10})
for _ in range(10):
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadTenEpochsOfBatches(self):
with self.test_session() as sess:
sess.run(
self.init_batch_op,
feed_dict={
self.filenames: self.test_filenames,
self.num_epochs: 10,
self.batch_size: self._num_records
})
for _ in range(10):
for j in range(self._num_files):
values = sess.run(self.get_next)
self.assertAllEqual(
[self._record(j, i) for i in range(self._num_records)], values)
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadZlibFiles(self):
zlib_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
with self.test_session() as sess:
sess.run(
self.init_op,
feed_dict={self.filenames: zlib_files,
self.compression_type: "ZLIB"})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadGzipFiles(self):
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
with self.test_session() as sess:
sess.run(
self.init_op,
feed_dict={self.filenames: gzip_files,
self.compression_type: "GZIP"})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadWithBuffer(self):
one_mebibyte = 2**20
d = readers.TFRecordDataset(self.test_filenames, buffer_size=one_mebibyte)
iterator = d.make_one_shot_iterator()
with self.test_session() as sess:
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
class TFRecordDatasetSerializationTest(
TFRecordDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self,
num_epochs,
batch_size=1,
compression_type=None,
buffer_size=None):
filenames = self._createFiles()
if compression_type is "ZLIB":
zlib_files = []
for i, fn in enumerate(filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
filenames = zlib_files
elif compression_type is "GZIP":
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
filenames = gzip_files
return readers.TFRecordDataset(
filenames, compression_type,
buffer_size=buffer_size).repeat(num_epochs).batch(batch_size)
def testTFRecordWithoutBufferCore(self):
num_epochs = 5
batch_size = num_epochs
num_outputs = num_epochs * self._num_files * self._num_records // batch_size
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, batch_size,
buffer_size=0),
lambda: self._build_iterator_graph(num_epochs * 2, batch_size),
num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, buffer_size=0), None,
num_outputs * batch_size)
def testTFRecordWithBufferCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(lambda: self._build_iterator_graph(num_epochs),
lambda: self._build_iterator_graph(num_epochs * 2),
num_outputs)
def testTFRecordWithCompressionCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="ZLIB"),
lambda: self._build_iterator_graph(num_epochs * 2), num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="GZIP"),
lambda: self._build_iterator_graph(num_epochs * 2), num_outputs)
class ReadBatchFeaturesTest(test.TestCase):
def setUp(self):
super(ReadBatchFeaturesTest, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
def _read_batch_features(self, filenames, num_epochs, batch_size):
self.filenames = filenames
self.num_epochs = num_epochs
self.batch_size = batch_size
return readers.read_batch_features(
file_pattern=self.filenames,
batch_size=self.batch_size,
features={
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
"record": parsing_ops.FixedLenFeature([], dtypes.int64),
"keywords": parsing_ops.VarLenFeature(dtypes.string)
},
reader=readers.TFRecordDataset,
randomize_input=False,
num_epochs=self.num_epochs)
def _record(self, f, r):
example = example_pb2.Example(features=feature_pb2.Features(
feature={
"file":
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[f])),
"record":
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[r])),
"keywords":
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=self._get_keywords(f, r)))
}))
return example.SerializeToString()
def _get_keywords(self, f, r):
num_keywords = 1 + (f + r) % 2
keywords = []
for index in range(num_keywords):
keywords.append(compat.as_bytes("keyword%d" % index))
return keywords
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
def _next_actual_batch(self, sess):
file_op = self.outputs["file"]
keywords_indices_op = self.outputs["keywords"].indices
keywords_values_op = self.outputs["keywords"].values
keywords_dense_shape_op = self.outputs["keywords"].dense_shape
record_op = self.outputs["record"]
return sess.run([
file_op, keywords_indices_op, keywords_values_op,
keywords_dense_shape_op, record_op
])
def _next_expected_batch(self, file_indices, batch_size, num_epochs):
def _next_record(file_indices):
for j in file_indices:
for i in range(self._num_records):
yield j, i
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
for _ in range(num_epochs):
for record in _next_record(file_indices):
f = record[0]
r = record[1]
file_batch.append(f)
record_batch.append(r)
keywords = self._get_keywords(f, r)
keywords_batch_values.extend(keywords)
keywords_batch_indices.extend([[batch_index, i]
for i in range(len(keywords))])
batch_index += 1
keywords_batch_max_len = max(keywords_batch_max_len, len(keywords))
if len(file_batch) == batch_size:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[batch_size, keywords_batch_max_len], record_batch
]
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
if file_batch:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[len(file_batch), keywords_batch_max_len], record_batch
]
def _verify_records(self, sess, batch_size, file_index=None, num_epochs=1):
if file_index is not None:
file_indices = [file_index]
else:
file_indices = range(self._num_files)
for expected_batch in self._next_expected_batch(file_indices, batch_size,
num_epochs):
actual_batch = self._next_actual_batch(sess)
for i in range(len(expected_batch)):
self.assertAllEqual(expected_batch[i], actual_batch[i])
def testRead(self):
for batch_size in [1, 2]:
for num_epochs in [1, 10]:
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
self.outputs = self._read_batch_features(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, 0, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
self.outputs = self._read_batch_features(
filenames=self.test_filenames[1],
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, 1, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
self.outputs = self._read_batch_features(
filenames=self.test_filenames,
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
def testReadWithEquivalentDataset(self):
features = {
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
"record": parsing_ops.FixedLenFeature([], dtypes.int64),
}
dataset = (readers.TFRecordDataset(self.test_filenames)
.map(lambda x: parsing_ops.parse_single_example(x, features))
.repeat(10).batch(2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
next_element = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for file_batch, _, _, _, record_batch in self._next_expected_batch(
range(self._num_files), 2, 10):
actual_batch = sess.run(next_element)
self.assertAllEqual(file_batch, actual_batch["file"])
self.assertAllEqual(record_batch, actual_batch["record"])
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
if __name__ == "__main__":
test.main()
| true
| true
|
1c42a45984700520b74c50a6c75286ee65c109e9
| 305
|
py
|
Python
|
Dataset/Leetcode/train/111/532.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/train/111/532.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/train/111/532.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
class Solution:
def XXX(self, root: TreeNode) -> int:
if not root:
return 0
def fun(node):
if not node: return 0x3f3f3f
if not node.left and not node.right: return 1
return min(fun(node.left),fun(node.right))+1
return fun(root)
| 27.727273
| 57
| 0.55082
|
class Solution:
def XXX(self, root: TreeNode) -> int:
if not root:
return 0
def fun(node):
if not node: return 0x3f3f3f
if not node.left and not node.right: return 1
return min(fun(node.left),fun(node.right))+1
return fun(root)
| true
| true
|
1c42a4d54ab1ec0325767e0a5c09c49005819c3d
| 3,391
|
py
|
Python
|
data/p2DJ/New/program/qiskit/simulator/startQiskit365.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/program/qiskit/simulator/startQiskit365.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/program/qiskit/simulator/startQiskit365.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=2
# total number=21
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.y(input_qubit[1]) # number=2
prog.y(input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=3
prog.h(input_qubit[0]) # number=13
prog.cz(input_qubit[1],input_qubit[0]) # number=14
prog.h(input_qubit[0]) # number=15
prog.cx(input_qubit[1],input_qubit[0]) # number=18
prog.x(input_qubit[0]) # number=19
prog.cx(input_qubit[1],input_qubit[0]) # number=20
prog.cx(input_qubit[1],input_qubit[0]) # number=9
prog.cx(input_qubit[1],input_qubit[0]) # number=10
prog.x(input_qubit[0]) # number=11
prog.cx(input_qubit[1],input_qubit[0]) # number=12
prog.x(input_qubit[0]) # number=16
prog.x(input_qubit[0]) # number=17
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit365.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 29.232759
| 82
| 0.627543
|
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
prog.x(target)
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1])
prog.h(target)
prog.barrier()
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
prog.y(input_qubit[1])
prog.y(input_qubit[1])
prog.y(input_qubit[1])
prog.h(input_qubit[0])
prog.cz(input_qubit[1],input_qubit[0])
prog.h(input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0])
prog.x(input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0])
prog.x(input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0])
prog.x(input_qubit[0])
prog.x(input_qubit[0])
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit365.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true
| true
|
1c42a56f7472dd88036f3b89d2f4f2d610f06ef4
| 7,566
|
py
|
Python
|
neobolt/packstream/unpacker.py
|
technige/neobolt
|
f48eac3046cf0f6d6fe534fdb53ea42c964bcc9f
|
[
"Apache-2.0"
] | null | null | null |
neobolt/packstream/unpacker.py
|
technige/neobolt
|
f48eac3046cf0f6d6fe534fdb53ea42c964bcc9f
|
[
"Apache-2.0"
] | null | null | null |
neobolt/packstream/unpacker.py
|
technige/neobolt
|
f48eac3046cf0f6d6fe534fdb53ea42c964bcc9f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2018 "Neo4j,"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from codecs import decode
from struct import unpack as struct_unpack
from neobolt.packstream import Structure
EndOfStream = object()
class Unpacker(object):
def __init__(self):
self.source = None
def attach(self, source):
self.source = source
def read(self, n=1):
return self.source.read(n)
def read_int(self):
return self.source.read_int()
def unpack(self):
return self._unpack()
def _unpack(self):
marker = self.read_int()
if marker == -1:
raise RuntimeError("Nothing to unpack")
# Tiny Integer
if 0x00 <= marker <= 0x7F:
return marker
elif 0xF0 <= marker <= 0xFF:
return marker - 0x100
# Null
elif marker == 0xC0:
return None
# Float
elif marker == 0xC1:
value, = struct_unpack(">d", self.read(8))
return value
# Boolean
elif marker == 0xC2:
return False
elif marker == 0xC3:
return True
# Integer
elif marker == 0xC8:
return struct_unpack(">b", self.read(1))[0]
elif marker == 0xC9:
return struct_unpack(">h", self.read(2))[0]
elif marker == 0xCA:
return struct_unpack(">i", self.read(4))[0]
elif marker == 0xCB:
return struct_unpack(">q", self.read(8))[0]
# Bytes
elif marker == 0xCC:
size, = struct_unpack(">B", self.read(1))
return self.read(size).tobytes()
elif marker == 0xCD:
size, = struct_unpack(">H", self.read(2))
return self.read(size).tobytes()
elif marker == 0xCE:
size, = struct_unpack(">I", self.read(4))
return self.read(size).tobytes()
else:
marker_high = marker & 0xF0
# String
if marker_high == 0x80: # TINY_STRING
return decode(self.read(marker & 0x0F), "utf-8")
elif marker == 0xD0: # STRING_8:
size, = struct_unpack(">B", self.read(1))
return decode(self.read(size), "utf-8")
elif marker == 0xD1: # STRING_16:
size, = struct_unpack(">H", self.read(2))
return decode(self.read(size), "utf-8")
elif marker == 0xD2: # STRING_32:
size, = struct_unpack(">I", self.read(4))
return decode(self.read(size), "utf-8")
# List
elif 0x90 <= marker <= 0x9F or 0xD4 <= marker <= 0xD7:
return self._unpack_list(marker)
# Map
elif 0xA0 <= marker <= 0xAF or 0xD8 <= marker <= 0xDB:
return self._unpack_map(marker)
# Structure
elif 0xB0 <= marker <= 0xBF or 0xDC <= marker <= 0xDD:
size, tag = self._unpack_structure_header(marker)
value = Structure(tag, *([None] * size))
for i in range(len(value)):
value[i] = self._unpack()
return value
elif marker == 0xDF: # END_OF_STREAM:
return EndOfStream
else:
raise RuntimeError("Unknown PackStream marker %02X" % marker)
def unpack_list(self):
marker = self.read_int()
return self._unpack_list(marker)
def _unpack_list(self, marker):
marker_high = marker & 0xF0
if marker_high == 0x90:
size = marker & 0x0F
if size == 0:
return []
elif size == 1:
return [self._unpack()]
else:
return [self._unpack() for _ in range(size)]
elif marker == 0xD4: # LIST_8:
size, = struct_unpack(">B", self.read(1))
return [self._unpack() for _ in range(size)]
elif marker == 0xD5: # LIST_16:
size, = struct_unpack(">H", self.read(2))
return [self._unpack() for _ in range(size)]
elif marker == 0xD6: # LIST_32:
size, = struct_unpack(">I", self.read(4))
return [self._unpack() for _ in range(size)]
elif marker == 0xD7: # LIST_STREAM:
value = []
item = None
while item is not EndOfStream:
item = self._unpack()
if item is not EndOfStream:
value.append(item)
return value
else:
return None
def unpack_map(self):
marker = self.read_int()
return self._unpack_map(marker)
def _unpack_map(self, marker):
marker_high = marker & 0xF0
if marker_high == 0xA0:
size = marker & 0x0F
value = {}
for _ in range(size):
key = self._unpack()
value[key] = self._unpack()
return value
elif marker == 0xD8: # MAP_8:
size, = struct_unpack(">B", self.read(1))
value = {}
for _ in range(size):
key = self._unpack()
value[key] = self._unpack()
return value
elif marker == 0xD9: # MAP_16:
size, = struct_unpack(">H", self.read(2))
value = {}
for _ in range(size):
key = self._unpack()
value[key] = self._unpack()
return value
elif marker == 0xDA: # MAP_32:
size, = struct_unpack(">I", self.read(4))
value = {}
for _ in range(size):
key = self._unpack()
value[key] = self._unpack()
return value
elif marker == 0xDB: # MAP_STREAM:
value = {}
key = None
while key is not EndOfStream:
key = self._unpack()
if key is not EndOfStream:
value[key] = self._unpack()
return value
else:
return None
def unpack_structure_header(self):
marker = self.read_int()
if marker == -1:
return None, None
else:
return self._unpack_structure_header(marker)
def _unpack_structure_header(self, marker):
marker_high = marker & 0xF0
if marker_high == 0xB0: # TINY_STRUCT
signature = self.read(1).tobytes()
return marker & 0x0F, signature
elif marker == 0xDC: # STRUCT_8:
size, = struct_unpack(">B", self.read(1))
signature = self.read(1).tobytes()
return size, signature
elif marker == 0xDD: # STRUCT_16:
size, = struct_unpack(">H", self.read(2))
signature = self.read(1).tobytes()
return size, signature
else:
raise RuntimeError("Expected structure, found marker %02X" % marker)
| 32.333333
| 80
| 0.52313
|
from codecs import decode
from struct import unpack as struct_unpack
from neobolt.packstream import Structure
EndOfStream = object()
class Unpacker(object):
def __init__(self):
self.source = None
def attach(self, source):
self.source = source
def read(self, n=1):
return self.source.read(n)
def read_int(self):
return self.source.read_int()
def unpack(self):
return self._unpack()
def _unpack(self):
marker = self.read_int()
if marker == -1:
raise RuntimeError("Nothing to unpack")
if 0x00 <= marker <= 0x7F:
return marker
elif 0xF0 <= marker <= 0xFF:
return marker - 0x100
elif marker == 0xC0:
return None
elif marker == 0xC1:
value, = struct_unpack(">d", self.read(8))
return value
elif marker == 0xC2:
return False
elif marker == 0xC3:
return True
elif marker == 0xC8:
return struct_unpack(">b", self.read(1))[0]
elif marker == 0xC9:
return struct_unpack(">h", self.read(2))[0]
elif marker == 0xCA:
return struct_unpack(">i", self.read(4))[0]
elif marker == 0xCB:
return struct_unpack(">q", self.read(8))[0]
elif marker == 0xCC:
size, = struct_unpack(">B", self.read(1))
return self.read(size).tobytes()
elif marker == 0xCD:
size, = struct_unpack(">H", self.read(2))
return self.read(size).tobytes()
elif marker == 0xCE:
size, = struct_unpack(">I", self.read(4))
return self.read(size).tobytes()
else:
marker_high = marker & 0xF0
if marker_high == 0x80:
return decode(self.read(marker & 0x0F), "utf-8")
elif marker == 0xD0:
size, = struct_unpack(">B", self.read(1))
return decode(self.read(size), "utf-8")
elif marker == 0xD1:
size, = struct_unpack(">H", self.read(2))
return decode(self.read(size), "utf-8")
elif marker == 0xD2:
size, = struct_unpack(">I", self.read(4))
return decode(self.read(size), "utf-8")
elif 0x90 <= marker <= 0x9F or 0xD4 <= marker <= 0xD7:
return self._unpack_list(marker)
elif 0xA0 <= marker <= 0xAF or 0xD8 <= marker <= 0xDB:
return self._unpack_map(marker)
elif 0xB0 <= marker <= 0xBF or 0xDC <= marker <= 0xDD:
size, tag = self._unpack_structure_header(marker)
value = Structure(tag, *([None] * size))
for i in range(len(value)):
value[i] = self._unpack()
return value
elif marker == 0xDF:
return EndOfStream
else:
raise RuntimeError("Unknown PackStream marker %02X" % marker)
def unpack_list(self):
marker = self.read_int()
return self._unpack_list(marker)
def _unpack_list(self, marker):
marker_high = marker & 0xF0
if marker_high == 0x90:
size = marker & 0x0F
if size == 0:
return []
elif size == 1:
return [self._unpack()]
else:
return [self._unpack() for _ in range(size)]
elif marker == 0xD4:
size, = struct_unpack(">B", self.read(1))
return [self._unpack() for _ in range(size)]
elif marker == 0xD5:
size, = struct_unpack(">H", self.read(2))
return [self._unpack() for _ in range(size)]
elif marker == 0xD6:
size, = struct_unpack(">I", self.read(4))
return [self._unpack() for _ in range(size)]
elif marker == 0xD7:
value = []
item = None
while item is not EndOfStream:
item = self._unpack()
if item is not EndOfStream:
value.append(item)
return value
else:
return None
def unpack_map(self):
marker = self.read_int()
return self._unpack_map(marker)
def _unpack_map(self, marker):
marker_high = marker & 0xF0
if marker_high == 0xA0:
size = marker & 0x0F
value = {}
for _ in range(size):
key = self._unpack()
value[key] = self._unpack()
return value
elif marker == 0xD8:
size, = struct_unpack(">B", self.read(1))
value = {}
for _ in range(size):
key = self._unpack()
value[key] = self._unpack()
return value
elif marker == 0xD9:
size, = struct_unpack(">H", self.read(2))
value = {}
for _ in range(size):
key = self._unpack()
value[key] = self._unpack()
return value
elif marker == 0xDA:
size, = struct_unpack(">I", self.read(4))
value = {}
for _ in range(size):
key = self._unpack()
value[key] = self._unpack()
return value
elif marker == 0xDB:
value = {}
key = None
while key is not EndOfStream:
key = self._unpack()
if key is not EndOfStream:
value[key] = self._unpack()
return value
else:
return None
def unpack_structure_header(self):
marker = self.read_int()
if marker == -1:
return None, None
else:
return self._unpack_structure_header(marker)
def _unpack_structure_header(self, marker):
marker_high = marker & 0xF0
if marker_high == 0xB0:
signature = self.read(1).tobytes()
return marker & 0x0F, signature
elif marker == 0xDC:
size, = struct_unpack(">B", self.read(1))
signature = self.read(1).tobytes()
return size, signature
elif marker == 0xDD:
size, = struct_unpack(">H", self.read(2))
signature = self.read(1).tobytes()
return size, signature
else:
raise RuntimeError("Expected structure, found marker %02X" % marker)
| true
| true
|
1c42a692e9ca72a0c30ddf8303060417384e81f4
| 1,477
|
py
|
Python
|
final_project/machinetranslation/translator.py
|
INKI-LEE/xzceb-flask_eng_fr
|
956eb3e412300e48d9cda44ab26c95dfd60f572b
|
[
"Apache-2.0"
] | null | null | null |
final_project/machinetranslation/translator.py
|
INKI-LEE/xzceb-flask_eng_fr
|
956eb3e412300e48d9cda44ab26c95dfd60f572b
|
[
"Apache-2.0"
] | null | null | null |
final_project/machinetranslation/translator.py
|
INKI-LEE/xzceb-flask_eng_fr
|
956eb3e412300e48d9cda44ab26c95dfd60f572b
|
[
"Apache-2.0"
] | null | null | null |
""" Translator Function that englishToFrench , frenchToEnglish """
#import json
import os
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from dotenv import load_dotenv
load_dotenv()
apikey = os.environ['apikey']
url = os.environ['url']
authenticator = IAMAuthenticator(apikey)
language_translator = LanguageTranslatorV3(
version='2018-05-01',
authenticator=authenticator
)
language_translator.set_service_url(url)
#languages = language_translator.list_languages().get_result()
#print(json.dumps(languages, indent=2))
def englishToFrench(english_text):
""" englishToFrench Function that translates English to French """
if english_text=="":
french_text=""
else:
translation_reponse = language_translator.translate(text=english_text, model_id="en-fr")
translation = translation_reponse.get_result()
french_text = translation['translations'][0]['translation']
return french_text
def frenchToEnglish(french_text):
""" englishToFrench Function that translates French to English """
if french_text=="":
english_text=""
else:
translation_reponse = language_translator.translate(text=french_text, model_id="fr-en")
translation = translation_reponse.get_result()
english_text = translation['translations'][0]['translation']
return english_text
#print(englishToFrench("hello"))
#print(frenchToEnglish("Bonjour"))
| 30.770833
| 96
| 0.748815
|
import os
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from dotenv import load_dotenv
load_dotenv()
apikey = os.environ['apikey']
url = os.environ['url']
authenticator = IAMAuthenticator(apikey)
language_translator = LanguageTranslatorV3(
version='2018-05-01',
authenticator=authenticator
)
language_translator.set_service_url(url)
def englishToFrench(english_text):
if english_text=="":
french_text=""
else:
translation_reponse = language_translator.translate(text=english_text, model_id="en-fr")
translation = translation_reponse.get_result()
french_text = translation['translations'][0]['translation']
return french_text
def frenchToEnglish(french_text):
if french_text=="":
english_text=""
else:
translation_reponse = language_translator.translate(text=french_text, model_id="fr-en")
translation = translation_reponse.get_result()
english_text = translation['translations'][0]['translation']
return english_text
| true
| true
|
1c42a7bb962587e69a70ca8c8836c686c53ab380
| 4,220
|
py
|
Python
|
FWCore/Integration/test/readSubProcessOutput_cfg.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
FWCore/Integration/test/readSubProcessOutput_cfg.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
FWCore/Integration/test/readSubProcessOutput_cfg.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
import FWCore.ParameterSet.Config as cms
process = cms.Process("READ")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring("file:testSubProcess.root")
)
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string(
'readSubprocessOutput.root'
)
)
# Reusing some code I used for testing merging, although in this
# context it has nothing to do with merging.
# Here we are checking the event, run, and lumi products
# from the last subprocess in the chain of subprocesses
# are there.
process.testproducts = cms.EDAnalyzer("TestMergeResults",
expectedBeginRunProd = cms.untracked.vint32(
0, 0, 0, # start
0, 0, 0, # begin file 1
10001, 10002, 10003, # * begin run 1
10001, 10002, 10003, # * events
10001, 10002, 10003, # end run 1
10001, 10002, 10003, # * begin run 2
10001, 10002, 10003, # * events
10001, 10002, 10003, # end run 2
10001, 10002, 10003, # * begin run 2
10001, 10002, 10003, # * events
10001, 10002, 10003 # end run 3
),
expectedEndRunProd = cms.untracked.vint32(
0, 0, 0, # start
0, 0, 0, # begin file 1
100001, 100002, 100003, # begin run 1
100001, 100002, 100003, # * events
100001, 100002, 100003, # * end run 1
100001, 100002, 100003, # begin run 2
100001, 100002, 100003, # * events
100001, 100002, 100003, # * end run 2
100001, 100002, 100003, # begin run 2
100001, 100002, 100003, # * events
100001, 100002, 100003 # * end run 3
),
expectedBeginLumiProd = cms.untracked.vint32(
0, 0, 0, # start
0, 0, 0, # begin file 1
101, 102, 103, # * begin run 1 lumi 1
101, 102, 103, # * events
101, 102, 103 # end run 1 lumi 1
# There are more, but all with the same pattern as the first
),
expectedEndLumiProd = cms.untracked.vint32(
0, 0, 0, # start
0, 0, 0, # begin file 1
1001, 1002, 1003, # begin run 1 lumi 1
1001, 1002, 1003, # * events
1001, 1002, 1003 # * end run 1 lumi 1
),
expectedProcessHistoryInRuns = cms.untracked.vstring(
'PROD', # Run 1
'PROD2',
'READ',
'PROD', # Run 2
'PROD2',
'READ',
'PROD', # Run 3
'PROD2',
'READ'
),
verbose = cms.untracked.bool(True)
)
process.test = cms.EDAnalyzer('RunLumiEventAnalyzer',
verbose = cms.untracked.bool(True),
expectedRunLumiEvents = cms.untracked.vuint32(
1, 0, 0,
1, 1, 0,
1, 1, 1,
1, 1, 2,
1, 1, 3,
1, 1, 4,
1, 1, 0,
1, 2, 0,
1, 2, 5,
1, 2, 6,
1, 2, 7,
1, 2, 8,
1, 2, 0,
1, 3, 0,
1, 3, 9,
1, 3, 10,
1, 3, 0,
1, 0, 0,
2, 0, 0,
2, 1, 0,
2, 1, 1,
2, 1, 2,
2, 1, 3,
2, 1, 4,
2, 1, 0,
2, 2, 0,
2, 2, 5,
2, 2, 6,
2, 2, 7,
2, 2, 8,
2, 2, 0,
2, 3, 0,
2, 3, 9,
2, 3, 10,
2, 3, 0,
2, 0, 0,
3, 0, 0,
3, 1, 0,
3, 1, 1,
3, 1, 2,
3, 1, 3,
3, 1, 4,
3, 1, 0,
3, 2, 0,
3, 2, 5,
3, 2, 6,
3, 2, 7,
3, 2, 8,
3, 2, 0,
3, 3, 0,
3, 3, 9,
3, 3, 10,
3, 3, 0,
3, 0, 0
)
)
process.path1 = cms.Path(process.test*process.testproducts)
process.ep = cms.EndPath(process.out)
read2Process = cms.Process("READ2")
process.addSubProcess(cms.SubProcess(read2Process,
outputCommands = cms.untracked.vstring(
"keep *",
"drop *_putInt2_*_*"
)
))
read2Process.getInt = cms.EDAnalyzer("TestFindProduct",
inputTags = cms.untracked.VInputTag(
cms.InputTag("putInt3")
),
expectedSum = cms.untracked.int32(180),
inputTagsNotFound = cms.untracked.VInputTag(
cms.InputTag("putInt2")
)
)
read2Process.path1 = cms.Path(read2Process.getInt)
| 25.421687
| 68
| 0.502133
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("READ")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring("file:testSubProcess.root")
)
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string(
'readSubprocessOutput.root'
)
)
process.testproducts = cms.EDAnalyzer("TestMergeResults",
expectedBeginRunProd = cms.untracked.vint32(
0, 0, 0,
0, 0, 0,
10001, 10002, 10003,
10001, 10002, 10003,
10001, 10002, 10003,
10001, 10002, 10003,
10001, 10002, 10003,
10001, 10002, 10003,
10001, 10002, 10003,
10001, 10002, 10003,
10001, 10002, 10003
),
expectedEndRunProd = cms.untracked.vint32(
0, 0, 0,
0, 0, 0,
100001, 100002, 100003,
100001, 100002, 100003,
100001, 100002, 100003,
100001, 100002, 100003,
100001, 100002, 100003,
100001, 100002, 100003,
100001, 100002, 100003,
100001, 100002, 100003,
100001, 100002, 100003
),
expectedBeginLumiProd = cms.untracked.vint32(
0, 0, 0,
0, 0, 0,
101, 102, 103,
101, 102, 103,
101, 102, 103
),
expectedEndLumiProd = cms.untracked.vint32(
0, 0, 0,
0, 0, 0,
1001, 1002, 1003,
1001, 1002, 1003,
1001, 1002, 1003
),
expectedProcessHistoryInRuns = cms.untracked.vstring(
'PROD',
'PROD2',
'READ',
'PROD',
'PROD2',
'READ',
'PROD',
'PROD2',
'READ'
),
verbose = cms.untracked.bool(True)
)
process.test = cms.EDAnalyzer('RunLumiEventAnalyzer',
verbose = cms.untracked.bool(True),
expectedRunLumiEvents = cms.untracked.vuint32(
1, 0, 0,
1, 1, 0,
1, 1, 1,
1, 1, 2,
1, 1, 3,
1, 1, 4,
1, 1, 0,
1, 2, 0,
1, 2, 5,
1, 2, 6,
1, 2, 7,
1, 2, 8,
1, 2, 0,
1, 3, 0,
1, 3, 9,
1, 3, 10,
1, 3, 0,
1, 0, 0,
2, 0, 0,
2, 1, 0,
2, 1, 1,
2, 1, 2,
2, 1, 3,
2, 1, 4,
2, 1, 0,
2, 2, 0,
2, 2, 5,
2, 2, 6,
2, 2, 7,
2, 2, 8,
2, 2, 0,
2, 3, 0,
2, 3, 9,
2, 3, 10,
2, 3, 0,
2, 0, 0,
3, 0, 0,
3, 1, 0,
3, 1, 1,
3, 1, 2,
3, 1, 3,
3, 1, 4,
3, 1, 0,
3, 2, 0,
3, 2, 5,
3, 2, 6,
3, 2, 7,
3, 2, 8,
3, 2, 0,
3, 3, 0,
3, 3, 9,
3, 3, 10,
3, 3, 0,
3, 0, 0
)
)
process.path1 = cms.Path(process.test*process.testproducts)
process.ep = cms.EndPath(process.out)
read2Process = cms.Process("READ2")
process.addSubProcess(cms.SubProcess(read2Process,
outputCommands = cms.untracked.vstring(
"keep *",
"drop *_putInt2_*_*"
)
))
read2Process.getInt = cms.EDAnalyzer("TestFindProduct",
inputTags = cms.untracked.VInputTag(
cms.InputTag("putInt3")
),
expectedSum = cms.untracked.int32(180),
inputTagsNotFound = cms.untracked.VInputTag(
cms.InputTag("putInt2")
)
)
read2Process.path1 = cms.Path(read2Process.getInt)
| true
| true
|
1c42a87c9ff7574326b25e2c3e6cf0edcb5bef4e
| 9,980
|
py
|
Python
|
paddlespeech/t2s/exps/synthesize.py
|
phecda-xu/PaddleSpeech
|
6bf0d3bf57229091a74912633e837dabc6215c86
|
[
"Apache-2.0"
] | 1
|
2022-02-26T01:48:00.000Z
|
2022-02-26T01:48:00.000Z
|
paddlespeech/t2s/exps/synthesize.py
|
phecda-xu/PaddleSpeech
|
6bf0d3bf57229091a74912633e837dabc6215c86
|
[
"Apache-2.0"
] | null | null | null |
paddlespeech/t2s/exps/synthesize.py
|
phecda-xu/PaddleSpeech
|
6bf0d3bf57229091a74912633e837dabc6215c86
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from pathlib import Path
import jsonlines
import numpy as np
import paddle
import soundfile as sf
import yaml
from yacs.config import CfgNode
from paddlespeech.s2t.utils.dynamic_import import dynamic_import
from paddlespeech.t2s.datasets.data_table import DataTable
from paddlespeech.t2s.modules.normalizer import ZScore
from paddlespeech.t2s.utils import str2bool
model_alias = {
# acoustic model
"speedyspeech":
"paddlespeech.t2s.models.speedyspeech:SpeedySpeech",
"speedyspeech_inference":
"paddlespeech.t2s.models.speedyspeech:SpeedySpeechInference",
"fastspeech2":
"paddlespeech.t2s.models.fastspeech2:FastSpeech2",
"fastspeech2_inference":
"paddlespeech.t2s.models.fastspeech2:FastSpeech2Inference",
"tacotron2":
"paddlespeech.t2s.models.tacotron2:Tacotron2",
"tacotron2_inference":
"paddlespeech.t2s.models.tacotron2:Tacotron2Inference",
# voc
"pwgan":
"paddlespeech.t2s.models.parallel_wavegan:PWGGenerator",
"pwgan_inference":
"paddlespeech.t2s.models.parallel_wavegan:PWGInference",
"mb_melgan":
"paddlespeech.t2s.models.melgan:MelGANGenerator",
"mb_melgan_inference":
"paddlespeech.t2s.models.melgan:MelGANInference",
}
def evaluate(args):
# dataloader has been too verbose
logging.getLogger("DataLoader").disabled = True
# construct dataset for evaluation
with jsonlines.open(args.test_metadata, 'r') as reader:
test_metadata = list(reader)
# Init body.
with open(args.am_config) as f:
am_config = CfgNode(yaml.safe_load(f))
with open(args.voc_config) as f:
voc_config = CfgNode(yaml.safe_load(f))
print("========Args========")
print(yaml.safe_dump(vars(args)))
print("========Config========")
print(am_config)
print(voc_config)
# construct dataset for evaluation
# model: {model_name}_{dataset}
am_name = args.am[:args.am.rindex('_')]
am_dataset = args.am[args.am.rindex('_') + 1:]
if am_name == 'fastspeech2':
fields = ["utt_id", "text"]
spk_num = None
if am_dataset in {"aishell3", "vctk"} and args.speaker_dict:
print("multiple speaker fastspeech2!")
with open(args.speaker_dict, 'rt') as f:
spk_id = [line.strip().split() for line in f.readlines()]
spk_num = len(spk_id)
fields += ["spk_id"]
elif args.voice_cloning:
print("voice cloning!")
fields += ["spk_emb"]
else:
print("single speaker fastspeech2!")
print("spk_num:", spk_num)
elif am_name == 'speedyspeech':
fields = ["utt_id", "phones", "tones"]
elif am_name == 'tacotron2':
fields = ["utt_id", "text"]
if args.voice_cloning:
print("voice cloning!")
fields += ["spk_emb"]
test_dataset = DataTable(data=test_metadata, fields=fields)
with open(args.phones_dict, "r") as f:
phn_id = [line.strip().split() for line in f.readlines()]
vocab_size = len(phn_id)
print("vocab_size:", vocab_size)
tone_size = None
if args.tones_dict:
with open(args.tones_dict, "r") as f:
tone_id = [line.strip().split() for line in f.readlines()]
tone_size = len(tone_id)
print("tone_size:", tone_size)
# acoustic model
odim = am_config.n_mels
am_class = dynamic_import(am_name, model_alias)
am_inference_class = dynamic_import(am_name + '_inference', model_alias)
if am_name == 'fastspeech2':
am = am_class(
idim=vocab_size, odim=odim, spk_num=spk_num, **am_config["model"])
elif am_name == 'speedyspeech':
am = am_class(
vocab_size=vocab_size, tone_size=tone_size, **am_config["model"])
elif am_name == 'tacotron2':
am = am_class(idim=vocab_size, odim=odim, **am_config["model"])
am.set_state_dict(paddle.load(args.am_ckpt)["main_params"])
am.eval()
am_mu, am_std = np.load(args.am_stat)
am_mu = paddle.to_tensor(am_mu)
am_std = paddle.to_tensor(am_std)
am_normalizer = ZScore(am_mu, am_std)
am_inference = am_inference_class(am_normalizer, am)
print("am_inference.training0:", am_inference.training)
am_inference.eval()
print("acoustic model done!")
# vocoder
# model: {model_name}_{dataset}
voc_name = args.voc[:args.voc.rindex('_')]
voc_class = dynamic_import(voc_name, model_alias)
voc_inference_class = dynamic_import(voc_name + '_inference', model_alias)
voc = voc_class(**voc_config["generator_params"])
voc.set_state_dict(paddle.load(args.voc_ckpt)["generator_params"])
voc.remove_weight_norm()
voc.eval()
voc_mu, voc_std = np.load(args.voc_stat)
voc_mu = paddle.to_tensor(voc_mu)
voc_std = paddle.to_tensor(voc_std)
voc_normalizer = ZScore(voc_mu, voc_std)
voc_inference = voc_inference_class(voc_normalizer, voc)
print("voc_inference.training0:", voc_inference.training)
voc_inference.eval()
print("voc done!")
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
for datum in test_dataset:
utt_id = datum["utt_id"]
with paddle.no_grad():
# acoustic model
if am_name == 'fastspeech2':
phone_ids = paddle.to_tensor(datum["text"])
spk_emb = None
spk_id = None
# multi speaker
if args.voice_cloning and "spk_emb" in datum:
spk_emb = paddle.to_tensor(np.load(datum["spk_emb"]))
elif "spk_id" in datum:
spk_id = paddle.to_tensor(datum["spk_id"])
mel = am_inference(phone_ids, spk_id=spk_id, spk_emb=spk_emb)
elif am_name == 'speedyspeech':
phone_ids = paddle.to_tensor(datum["phones"])
tone_ids = paddle.to_tensor(datum["tones"])
mel = am_inference(phone_ids, tone_ids)
elif am_name == 'tacotron2':
phone_ids = paddle.to_tensor(datum["text"])
spk_emb = None
# multi speaker
if args.voice_cloning and "spk_emb" in datum:
spk_emb = paddle.to_tensor(np.load(datum["spk_emb"]))
mel = am_inference(phone_ids, spk_emb=spk_emb)
# vocoder
wav = voc_inference(mel)
sf.write(
str(output_dir / (utt_id + ".wav")),
wav.numpy(),
samplerate=am_config.fs)
print(f"{utt_id} done!")
def main():
# parse args and config and redirect to train_sp
parser = argparse.ArgumentParser(
description="Synthesize with acoustic model & vocoder")
# acoustic model
parser.add_argument(
'--am',
type=str,
default='fastspeech2_csmsc',
choices=[
'speedyspeech_csmsc', 'fastspeech2_csmsc', 'fastspeech2_ljspeech',
'fastspeech2_aishell3', 'fastspeech2_vctk', 'tacotron2_csmsc',
'tacotron2_ljspeech', 'tacotron2_aishell3'
],
help='Choose acoustic model type of tts task.')
parser.add_argument(
'--am_config',
type=str,
default=None,
help='Config of acoustic model. Use deault config when it is None.')
parser.add_argument(
'--am_ckpt',
type=str,
default=None,
help='Checkpoint file of acoustic model.')
parser.add_argument(
"--am_stat",
type=str,
default=None,
help="mean and standard deviation used to normalize spectrogram when training acoustic model."
)
parser.add_argument(
"--phones_dict", type=str, default=None, help="phone vocabulary file.")
parser.add_argument(
"--tones_dict", type=str, default=None, help="tone vocabulary file.")
parser.add_argument(
"--speaker_dict", type=str, default=None, help="speaker id map file.")
parser.add_argument(
"--voice-cloning",
type=str2bool,
default=False,
help="whether training voice cloning model.")
# vocoder
parser.add_argument(
'--voc',
type=str,
default='pwgan_csmsc',
choices=[
'pwgan_csmsc', 'pwgan_ljspeech', 'pwgan_aishell3', 'pwgan_vctk',
'mb_melgan_csmsc'
],
help='Choose vocoder type of tts task.')
parser.add_argument(
'--voc_config',
type=str,
default=None,
help='Config of voc. Use deault config when it is None.')
parser.add_argument(
'--voc_ckpt', type=str, default=None, help='Checkpoint file of voc.')
parser.add_argument(
"--voc_stat",
type=str,
default=None,
help="mean and standard deviation used to normalize spectrogram when training voc."
)
# other
parser.add_argument(
"--ngpu", type=int, default=1, help="if ngpu == 0, use cpu.")
parser.add_argument("--test_metadata", type=str, help="test metadata.")
parser.add_argument("--output_dir", type=str, help="output dir.")
args = parser.parse_args()
if args.ngpu == 0:
paddle.set_device("cpu")
elif args.ngpu > 0:
paddle.set_device("gpu")
else:
print("ngpu should >= 0 !")
evaluate(args)
if __name__ == "__main__":
main()
| 34.895105
| 102
| 0.63517
|
import argparse
import logging
from pathlib import Path
import jsonlines
import numpy as np
import paddle
import soundfile as sf
import yaml
from yacs.config import CfgNode
from paddlespeech.s2t.utils.dynamic_import import dynamic_import
from paddlespeech.t2s.datasets.data_table import DataTable
from paddlespeech.t2s.modules.normalizer import ZScore
from paddlespeech.t2s.utils import str2bool
model_alias = {
"speedyspeech":
"paddlespeech.t2s.models.speedyspeech:SpeedySpeech",
"speedyspeech_inference":
"paddlespeech.t2s.models.speedyspeech:SpeedySpeechInference",
"fastspeech2":
"paddlespeech.t2s.models.fastspeech2:FastSpeech2",
"fastspeech2_inference":
"paddlespeech.t2s.models.fastspeech2:FastSpeech2Inference",
"tacotron2":
"paddlespeech.t2s.models.tacotron2:Tacotron2",
"tacotron2_inference":
"paddlespeech.t2s.models.tacotron2:Tacotron2Inference",
"pwgan":
"paddlespeech.t2s.models.parallel_wavegan:PWGGenerator",
"pwgan_inference":
"paddlespeech.t2s.models.parallel_wavegan:PWGInference",
"mb_melgan":
"paddlespeech.t2s.models.melgan:MelGANGenerator",
"mb_melgan_inference":
"paddlespeech.t2s.models.melgan:MelGANInference",
}
def evaluate(args):
logging.getLogger("DataLoader").disabled = True
with jsonlines.open(args.test_metadata, 'r') as reader:
test_metadata = list(reader)
with open(args.am_config) as f:
am_config = CfgNode(yaml.safe_load(f))
with open(args.voc_config) as f:
voc_config = CfgNode(yaml.safe_load(f))
print("========Args========")
print(yaml.safe_dump(vars(args)))
print("========Config========")
print(am_config)
print(voc_config)
am_name = args.am[:args.am.rindex('_')]
am_dataset = args.am[args.am.rindex('_') + 1:]
if am_name == 'fastspeech2':
fields = ["utt_id", "text"]
spk_num = None
if am_dataset in {"aishell3", "vctk"} and args.speaker_dict:
print("multiple speaker fastspeech2!")
with open(args.speaker_dict, 'rt') as f:
spk_id = [line.strip().split() for line in f.readlines()]
spk_num = len(spk_id)
fields += ["spk_id"]
elif args.voice_cloning:
print("voice cloning!")
fields += ["spk_emb"]
else:
print("single speaker fastspeech2!")
print("spk_num:", spk_num)
elif am_name == 'speedyspeech':
fields = ["utt_id", "phones", "tones"]
elif am_name == 'tacotron2':
fields = ["utt_id", "text"]
if args.voice_cloning:
print("voice cloning!")
fields += ["spk_emb"]
test_dataset = DataTable(data=test_metadata, fields=fields)
with open(args.phones_dict, "r") as f:
phn_id = [line.strip().split() for line in f.readlines()]
vocab_size = len(phn_id)
print("vocab_size:", vocab_size)
tone_size = None
if args.tones_dict:
with open(args.tones_dict, "r") as f:
tone_id = [line.strip().split() for line in f.readlines()]
tone_size = len(tone_id)
print("tone_size:", tone_size)
odim = am_config.n_mels
am_class = dynamic_import(am_name, model_alias)
am_inference_class = dynamic_import(am_name + '_inference', model_alias)
if am_name == 'fastspeech2':
am = am_class(
idim=vocab_size, odim=odim, spk_num=spk_num, **am_config["model"])
elif am_name == 'speedyspeech':
am = am_class(
vocab_size=vocab_size, tone_size=tone_size, **am_config["model"])
elif am_name == 'tacotron2':
am = am_class(idim=vocab_size, odim=odim, **am_config["model"])
am.set_state_dict(paddle.load(args.am_ckpt)["main_params"])
am.eval()
am_mu, am_std = np.load(args.am_stat)
am_mu = paddle.to_tensor(am_mu)
am_std = paddle.to_tensor(am_std)
am_normalizer = ZScore(am_mu, am_std)
am_inference = am_inference_class(am_normalizer, am)
print("am_inference.training0:", am_inference.training)
am_inference.eval()
print("acoustic model done!")
voc_name = args.voc[:args.voc.rindex('_')]
voc_class = dynamic_import(voc_name, model_alias)
voc_inference_class = dynamic_import(voc_name + '_inference', model_alias)
voc = voc_class(**voc_config["generator_params"])
voc.set_state_dict(paddle.load(args.voc_ckpt)["generator_params"])
voc.remove_weight_norm()
voc.eval()
voc_mu, voc_std = np.load(args.voc_stat)
voc_mu = paddle.to_tensor(voc_mu)
voc_std = paddle.to_tensor(voc_std)
voc_normalizer = ZScore(voc_mu, voc_std)
voc_inference = voc_inference_class(voc_normalizer, voc)
print("voc_inference.training0:", voc_inference.training)
voc_inference.eval()
print("voc done!")
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
for datum in test_dataset:
utt_id = datum["utt_id"]
with paddle.no_grad():
if am_name == 'fastspeech2':
phone_ids = paddle.to_tensor(datum["text"])
spk_emb = None
spk_id = None
if args.voice_cloning and "spk_emb" in datum:
spk_emb = paddle.to_tensor(np.load(datum["spk_emb"]))
elif "spk_id" in datum:
spk_id = paddle.to_tensor(datum["spk_id"])
mel = am_inference(phone_ids, spk_id=spk_id, spk_emb=spk_emb)
elif am_name == 'speedyspeech':
phone_ids = paddle.to_tensor(datum["phones"])
tone_ids = paddle.to_tensor(datum["tones"])
mel = am_inference(phone_ids, tone_ids)
elif am_name == 'tacotron2':
phone_ids = paddle.to_tensor(datum["text"])
spk_emb = None
if args.voice_cloning and "spk_emb" in datum:
spk_emb = paddle.to_tensor(np.load(datum["spk_emb"]))
mel = am_inference(phone_ids, spk_emb=spk_emb)
wav = voc_inference(mel)
sf.write(
str(output_dir / (utt_id + ".wav")),
wav.numpy(),
samplerate=am_config.fs)
print(f"{utt_id} done!")
def main():
parser = argparse.ArgumentParser(
description="Synthesize with acoustic model & vocoder")
parser.add_argument(
'--am',
type=str,
default='fastspeech2_csmsc',
choices=[
'speedyspeech_csmsc', 'fastspeech2_csmsc', 'fastspeech2_ljspeech',
'fastspeech2_aishell3', 'fastspeech2_vctk', 'tacotron2_csmsc',
'tacotron2_ljspeech', 'tacotron2_aishell3'
],
help='Choose acoustic model type of tts task.')
parser.add_argument(
'--am_config',
type=str,
default=None,
help='Config of acoustic model. Use deault config when it is None.')
parser.add_argument(
'--am_ckpt',
type=str,
default=None,
help='Checkpoint file of acoustic model.')
parser.add_argument(
"--am_stat",
type=str,
default=None,
help="mean and standard deviation used to normalize spectrogram when training acoustic model."
)
parser.add_argument(
"--phones_dict", type=str, default=None, help="phone vocabulary file.")
parser.add_argument(
"--tones_dict", type=str, default=None, help="tone vocabulary file.")
parser.add_argument(
"--speaker_dict", type=str, default=None, help="speaker id map file.")
parser.add_argument(
"--voice-cloning",
type=str2bool,
default=False,
help="whether training voice cloning model.")
parser.add_argument(
'--voc',
type=str,
default='pwgan_csmsc',
choices=[
'pwgan_csmsc', 'pwgan_ljspeech', 'pwgan_aishell3', 'pwgan_vctk',
'mb_melgan_csmsc'
],
help='Choose vocoder type of tts task.')
parser.add_argument(
'--voc_config',
type=str,
default=None,
help='Config of voc. Use deault config when it is None.')
parser.add_argument(
'--voc_ckpt', type=str, default=None, help='Checkpoint file of voc.')
parser.add_argument(
"--voc_stat",
type=str,
default=None,
help="mean and standard deviation used to normalize spectrogram when training voc."
)
parser.add_argument(
"--ngpu", type=int, default=1, help="if ngpu == 0, use cpu.")
parser.add_argument("--test_metadata", type=str, help="test metadata.")
parser.add_argument("--output_dir", type=str, help="output dir.")
args = parser.parse_args()
if args.ngpu == 0:
paddle.set_device("cpu")
elif args.ngpu > 0:
paddle.set_device("gpu")
else:
print("ngpu should >= 0 !")
evaluate(args)
if __name__ == "__main__":
main()
| true
| true
|
1c42a96d611af83e23e1b80282feb7f487a972f8
| 9,681
|
py
|
Python
|
lib/python2.7/site-packages/networkx/algorithms/coloring/greedy_coloring.py
|
nishaero/wifi-userseg-ryu
|
1132f2c813b79eff755bdd1a9e73e7ad3980af7c
|
[
"Apache-2.0"
] | 15
|
2018-04-26T08:17:18.000Z
|
2021-03-05T08:44:13.000Z
|
lib/python2.7/site-packages/networkx/algorithms/coloring/greedy_coloring.py
|
nishaero/wifi-userseg-ryu
|
1132f2c813b79eff755bdd1a9e73e7ad3980af7c
|
[
"Apache-2.0"
] | null | null | null |
lib/python2.7/site-packages/networkx/algorithms/coloring/greedy_coloring.py
|
nishaero/wifi-userseg-ryu
|
1132f2c813b79eff755bdd1a9e73e7ad3980af7c
|
[
"Apache-2.0"
] | 6
|
2018-04-12T15:49:27.000Z
|
2022-01-27T12:34:50.000Z
|
# -*- coding: utf-8 -*-
"""
Greedy graph coloring using various strategies.
"""
# Copyright (C) 2014 by
# Christian Olsson <chro@itu.dk>
# Jan Aagaard Meier <jmei@itu.dk>
# Henrik Haugbølle <hhau@itu.dk>
# All rights reserved.
# BSD license.
import networkx as nx
import random
import itertools
from . import greedy_coloring_with_interchange as _interchange
__author__ = "\n".join(["Christian Olsson <chro@itu.dk>",
"Jan Aagaard Meier <jmei@itu.dk>",
"Henrik Haugbølle <hhau@itu.dk>"])
__all__ = [
'greedy_color',
'strategy_largest_first',
'strategy_random_sequential',
'strategy_smallest_last',
'strategy_independent_set',
'strategy_connected_sequential',
'strategy_connected_sequential_dfs',
'strategy_connected_sequential_bfs',
'strategy_saturation_largest_first'
]
def min_degree_node(G):
return min(G, key=G.degree)
def max_degree_node(G):
return max(G, key=G.degree)
def strategy_largest_first(G, colors):
"""
Largest first (lf) ordering. Ordering the nodes by largest degree
first.
"""
nodes = G.nodes()
nodes.sort(key=lambda node: -G.degree(node))
return nodes
def strategy_random_sequential(G, colors):
"""
Random sequential (RS) ordering. Scrambles nodes into random ordering.
"""
nodes = G.nodes()
random.shuffle(nodes)
return nodes
def strategy_smallest_last(G, colors):
"""
Smallest last (sl). Picking the node with smallest degree first,
subtracting it from the graph, and starting over with the new smallest
degree node. When the graph is empty, the reverse ordering of the one
built is returned.
"""
len_g = len(G)
available_g = G.copy()
nodes = [None] * len_g
for i in range(len_g):
node = min_degree_node(available_g)
available_g.remove_node(node)
nodes[len_g - i - 1] = node
return nodes
def strategy_independent_set(G, colors):
"""
Greedy independent set ordering (GIS). Generates a maximal independent
set of nodes, and assigns color C to all nodes in this set. This set
of nodes is now removed from the graph, and the algorithm runs again.
"""
len_g = len(G)
no_colored = 0
k = 0
uncolored_g = G.copy()
while no_colored < len_g: # While there are uncolored nodes
available_g = uncolored_g.copy()
while len(available_g): # While there are still nodes available
node = min_degree_node(available_g)
colors[node] = k # assign color to values
no_colored += 1
uncolored_g.remove_node(node)
# Remove node and its neighbors from available
available_g.remove_nodes_from(available_g.neighbors(node) + [node])
k += 1
return None
def strategy_connected_sequential_bfs(G, colors):
"""
Connected sequential ordering (CS). Yield nodes in such an order, that
each node, except the first one, has at least one neighbour in the
preceeding sequence. The sequence is generated using BFS)
"""
return strategy_connected_sequential(G, colors, 'bfs')
def strategy_connected_sequential_dfs(G, colors):
"""
Connected sequential ordering (CS). Yield nodes in such an order, that
each node, except the first one, has at least one neighbour in the
preceeding sequence. The sequence is generated using DFS)
"""
return strategy_connected_sequential(G, colors, 'dfs')
def strategy_connected_sequential(G, colors, traversal='bfs'):
"""
Connected sequential ordering (CS). Yield nodes in such an order, that
each node, except the first one, has at least one neighbour in the
preceeding sequence. The sequence can be generated using both BFS and
DFS search (using the strategy_connected_sequential_bfs and
strategy_connected_sequential_dfs method). The default is bfs.
"""
for component_graph in nx.connected_component_subgraphs(G):
source = component_graph.nodes()[0]
yield source # Pick the first node as source
if traversal == 'bfs':
tree = nx.bfs_edges(component_graph, source)
elif traversal == 'dfs':
tree = nx.dfs_edges(component_graph, source)
else:
raise nx.NetworkXError(
'Please specify bfs or dfs for connected sequential ordering')
for (_, end) in tree:
# Then yield nodes in the order traversed by either BFS or DFS
yield end
def strategy_saturation_largest_first(G, colors):
"""
Saturation largest first (SLF). Also known as degree saturation (DSATUR).
"""
len_g = len(G)
no_colored = 0
distinct_colors = {}
for node in G.nodes_iter():
distinct_colors[node] = set()
while no_colored != len_g:
if no_colored == 0:
# When sat. for all nodes is 0, yield the node with highest degree
no_colored += 1
node = max_degree_node(G)
yield node
for neighbour in G.neighbors_iter(node):
distinct_colors[neighbour].add(0)
else:
highest_saturation = -1
highest_saturation_nodes = []
for node, distinct in distinct_colors.items():
if node not in colors: # If the node is not already colored
saturation = len(distinct)
if saturation > highest_saturation:
highest_saturation = saturation
highest_saturation_nodes = [node]
elif saturation == highest_saturation:
highest_saturation_nodes.append(node)
if len(highest_saturation_nodes) == 1:
node = highest_saturation_nodes[0]
else:
# Return the node with highest degree
max_degree = -1
max_node = None
for node in highest_saturation_nodes:
degree = G.degree(node)
if degree > max_degree:
max_node = node
max_degree = degree
node = max_node
no_colored += 1
yield node
color = colors[node]
for neighbour in G.neighbors_iter(node):
distinct_colors[neighbour].add(color)
def greedy_color(G, strategy=strategy_largest_first, interchange=False):
"""Color a graph using various strategies of greedy graph coloring.
The strategies are described in [1]_.
Attempts to color a graph using as few colors as possible, where no
neighbours of a node can have same color as the node itself.
Parameters
----------
G : NetworkX graph
strategy : function(G, colors)
A function that provides the coloring strategy, by returning nodes
in the ordering they should be colored. G is the graph, and colors
is a dict of the currently assigned colors, keyed by nodes.
You can pass your own ordering function, or use one of the built in:
* strategy_largest_first
* strategy_random_sequential
* strategy_smallest_last
* strategy_independent_set
* strategy_connected_sequential_bfs
* strategy_connected_sequential_dfs
* strategy_connected_sequential
(alias of strategy_connected_sequential_bfs)
* strategy_saturation_largest_first (also known as DSATUR)
interchange: bool
Will use the color interchange algorithm described by [2]_ if set
to true.
Note that saturation largest first and independent set do not
work with interchange. Furthermore, if you use interchange with
your own strategy function, you cannot rely on the values in the
colors argument.
Returns
-------
A dictionary with keys representing nodes and values representing
corresponding coloring.
Examples
--------
>>> G = nx.cycle_graph(4)
>>> d = nx.coloring.greedy_color(G, strategy=nx.coloring.strategy_largest_first)
>>> d in [{0: 0, 1: 1, 2: 0, 3: 1}, {0: 1, 1: 0, 2: 1, 3: 0}]
True
References
----------
.. [1] Adrian Kosowski, and Krzysztof Manuszewski,
Classical Coloring of Graphs, Graph Colorings, 2-19, 2004.
ISBN 0-8218-3458-4.
.. [2] Maciej M. Syslo, Marsingh Deo, Janusz S. Kowalik,
Discrete Optimization Algorithms with Pascal Programs, 415-424, 1983.
ISBN 0-486-45353-7.
"""
colors = {} # dictionary to keep track of the colors of the nodes
if len(G):
if interchange and (
strategy == strategy_independent_set or
strategy == strategy_saturation_largest_first):
raise nx.NetworkXPointlessConcept(
'Interchange is not applicable for GIS and SLF')
nodes = strategy(G, colors)
if nodes:
if interchange:
return (_interchange
.greedy_coloring_with_interchange(G, nodes))
else:
for node in nodes:
# set to keep track of colors of neighbours
neighbour_colors = set()
for neighbour in G.neighbors_iter(node):
if neighbour in colors:
neighbour_colors.add(colors[neighbour])
for color in itertools.count():
if color not in neighbour_colors:
break
# assign the node the newly found color
colors[node] = color
return colors
| 32.816949
| 84
| 0.624729
|
import networkx as nx
import random
import itertools
from . import greedy_coloring_with_interchange as _interchange
__author__ = "\n".join(["Christian Olsson <chro@itu.dk>",
"Jan Aagaard Meier <jmei@itu.dk>",
"Henrik Haugbølle <hhau@itu.dk>"])
__all__ = [
'greedy_color',
'strategy_largest_first',
'strategy_random_sequential',
'strategy_smallest_last',
'strategy_independent_set',
'strategy_connected_sequential',
'strategy_connected_sequential_dfs',
'strategy_connected_sequential_bfs',
'strategy_saturation_largest_first'
]
def min_degree_node(G):
return min(G, key=G.degree)
def max_degree_node(G):
return max(G, key=G.degree)
def strategy_largest_first(G, colors):
nodes = G.nodes()
nodes.sort(key=lambda node: -G.degree(node))
return nodes
def strategy_random_sequential(G, colors):
nodes = G.nodes()
random.shuffle(nodes)
return nodes
def strategy_smallest_last(G, colors):
len_g = len(G)
available_g = G.copy()
nodes = [None] * len_g
for i in range(len_g):
node = min_degree_node(available_g)
available_g.remove_node(node)
nodes[len_g - i - 1] = node
return nodes
def strategy_independent_set(G, colors):
len_g = len(G)
no_colored = 0
k = 0
uncolored_g = G.copy()
while no_colored < len_g:
available_g = uncolored_g.copy()
while len(available_g):
node = min_degree_node(available_g)
colors[node] = k
no_colored += 1
uncolored_g.remove_node(node)
available_g.remove_nodes_from(available_g.neighbors(node) + [node])
k += 1
return None
def strategy_connected_sequential_bfs(G, colors):
return strategy_connected_sequential(G, colors, 'bfs')
def strategy_connected_sequential_dfs(G, colors):
return strategy_connected_sequential(G, colors, 'dfs')
def strategy_connected_sequential(G, colors, traversal='bfs'):
for component_graph in nx.connected_component_subgraphs(G):
source = component_graph.nodes()[0]
yield source
if traversal == 'bfs':
tree = nx.bfs_edges(component_graph, source)
elif traversal == 'dfs':
tree = nx.dfs_edges(component_graph, source)
else:
raise nx.NetworkXError(
'Please specify bfs or dfs for connected sequential ordering')
for (_, end) in tree:
yield end
def strategy_saturation_largest_first(G, colors):
len_g = len(G)
no_colored = 0
distinct_colors = {}
for node in G.nodes_iter():
distinct_colors[node] = set()
while no_colored != len_g:
if no_colored == 0:
no_colored += 1
node = max_degree_node(G)
yield node
for neighbour in G.neighbors_iter(node):
distinct_colors[neighbour].add(0)
else:
highest_saturation = -1
highest_saturation_nodes = []
for node, distinct in distinct_colors.items():
if node not in colors:
saturation = len(distinct)
if saturation > highest_saturation:
highest_saturation = saturation
highest_saturation_nodes = [node]
elif saturation == highest_saturation:
highest_saturation_nodes.append(node)
if len(highest_saturation_nodes) == 1:
node = highest_saturation_nodes[0]
else:
max_degree = -1
max_node = None
for node in highest_saturation_nodes:
degree = G.degree(node)
if degree > max_degree:
max_node = node
max_degree = degree
node = max_node
no_colored += 1
yield node
color = colors[node]
for neighbour in G.neighbors_iter(node):
distinct_colors[neighbour].add(color)
def greedy_color(G, strategy=strategy_largest_first, interchange=False):
colors = {}
if len(G):
if interchange and (
strategy == strategy_independent_set or
strategy == strategy_saturation_largest_first):
raise nx.NetworkXPointlessConcept(
'Interchange is not applicable for GIS and SLF')
nodes = strategy(G, colors)
if nodes:
if interchange:
return (_interchange
.greedy_coloring_with_interchange(G, nodes))
else:
for node in nodes:
neighbour_colors = set()
for neighbour in G.neighbors_iter(node):
if neighbour in colors:
neighbour_colors.add(colors[neighbour])
for color in itertools.count():
if color not in neighbour_colors:
break
colors[node] = color
return colors
| true
| true
|
1c42aa4b0857b199c7a25f26b3a6ff5382190116
| 1,728
|
py
|
Python
|
appengine/standard_python3/building-an-app/building-an-app-1/main.py
|
yshalabi/python-docs-samples
|
591787c01d94102ba9205f998d95a05b39ccad2f
|
[
"Apache-2.0"
] | 2
|
2020-09-19T04:22:52.000Z
|
2020-09-23T14:04:17.000Z
|
appengine/standard_python3/building-an-app/building-an-app-1/main.py
|
yshalabi/python-docs-samples
|
591787c01d94102ba9205f998d95a05b39ccad2f
|
[
"Apache-2.0"
] | 28
|
2020-08-20T21:36:30.000Z
|
2021-06-21T18:05:17.000Z
|
appengine/standard_python3/building-an-app/building-an-app-1/main.py
|
yshalabi/python-docs-samples
|
591787c01d94102ba9205f998d95a05b39ccad2f
|
[
"Apache-2.0"
] | 2
|
2020-09-13T03:47:22.000Z
|
2020-09-23T14:04:19.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START gae_python38_render_template]
import datetime
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def root():
# For the sake of example, use static information to inflate the template.
# This will be replaced with real information in later steps.
dummy_times = [datetime.datetime(2018, 1, 1, 10, 0, 0),
datetime.datetime(2018, 1, 2, 10, 30, 0),
datetime.datetime(2018, 1, 3, 11, 0, 0),
]
return render_template('index.html', times=dummy_times)
if __name__ == '__main__':
# This is used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
# Flask's development server will automatically serve static files in
# the "static" directory. See:
# http://flask.pocoo.org/docs/1.0/quickstart/#static-files. Once deployed,
# App Engine itself will serve those files as configured in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
# [END gae_python38_render_template]
| 38.4
| 78
| 0.707755
|
import datetime
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def root():
dummy_times = [datetime.datetime(2018, 1, 1, 10, 0, 0),
datetime.datetime(2018, 1, 2, 10, 30, 0),
datetime.datetime(2018, 1, 3, 11, 0, 0),
]
return render_template('index.html', times=dummy_times)
if __name__ == '__main__':
# the "static" directory. See:
# http://flask.pocoo.org/docs/1.0/quickstart/#static-files. Once deployed,
# App Engine itself will serve those files as configured in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
# [END gae_python38_render_template]
| true
| true
|
1c42ae3e0fab02a746f010480f8226dcab3ecade
| 1,942
|
py
|
Python
|
price/coinmarketcap.py
|
victoray/block-tracker-api
|
0d5918a29572b47b0fb3f205fc1ba21ad4fcca51
|
[
"MIT"
] | null | null | null |
price/coinmarketcap.py
|
victoray/block-tracker-api
|
0d5918a29572b47b0fb3f205fc1ba21ad4fcca51
|
[
"MIT"
] | null | null | null |
price/coinmarketcap.py
|
victoray/block-tracker-api
|
0d5918a29572b47b0fb3f205fc1ba21ad4fcca51
|
[
"MIT"
] | null | null | null |
import json
import logging
from typing import Dict
import pydash
from fastapi import HTTPException
from requests import Session
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
from price import settings
from price.models import Price
# FIAT
from price.redis_utils import redis_client
NGN_ID = "2819"
USD_ID = "2781"
# CRYPTO
BTC_ID = "1"
ETH_ID = "1027"
SUPPORTED_CRYPTO = [BTC_ID, ETH_ID]
# CACHE
CACHE_TIME = 6000
CMC_URL = "https://pro-api.coinmarketcap.com"
headers = {
"Accepts": "application/json",
"X-CMC_PRO_API_KEY": settings.CMC_KEY,
}
parameters = {"convert_id": f"{USD_ID}"}
session = Session()
session.headers.update(headers)
def get_latest_price(symbol: str) -> Price:
cached = redis_client.get(symbol)
if cached:
return Price.parse_obj(json.loads(cached))
try:
parameters.update({"symbol": symbol})
response = session.get(
f"{CMC_URL}/v1/cryptocurrency/quotes/latest",
params=parameters,
)
if not response.ok:
logging.error(response.text)
raise HTTPException(status_code=500, detail="Something went wrong")
response_data: Dict = response.json().get("data", dict())
symbol = symbol.upper()
symbol_ = pydash.get(response_data, f"{symbol}.symbol", "").lower()
slug = pydash.get(response_data, f"{symbol}.slug")
name = pydash.get(response_data, f"{symbol}.name")
price_usd = pydash.get(response_data, f"{symbol}.quote.{USD_ID}.price")
price = Price(
symbol=symbol_,
slug=slug,
name=name,
priceUSD=price_usd,
)
redis_client.setex(symbol, CACHE_TIME, json.dumps(price.dict()))
return price
except (ConnectionError, Timeout, TooManyRedirects) as e:
logging.error(e)
raise HTTPException(status_code=500, detail="Something went wrong")
| 25.893333
| 79
| 0.661174
|
import json
import logging
from typing import Dict
import pydash
from fastapi import HTTPException
from requests import Session
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
from price import settings
from price.models import Price
from price.redis_utils import redis_client
NGN_ID = "2819"
USD_ID = "2781"
BTC_ID = "1"
ETH_ID = "1027"
SUPPORTED_CRYPTO = [BTC_ID, ETH_ID]
CACHE_TIME = 6000
CMC_URL = "https://pro-api.coinmarketcap.com"
headers = {
"Accepts": "application/json",
"X-CMC_PRO_API_KEY": settings.CMC_KEY,
}
parameters = {"convert_id": f"{USD_ID}"}
session = Session()
session.headers.update(headers)
def get_latest_price(symbol: str) -> Price:
cached = redis_client.get(symbol)
if cached:
return Price.parse_obj(json.loads(cached))
try:
parameters.update({"symbol": symbol})
response = session.get(
f"{CMC_URL}/v1/cryptocurrency/quotes/latest",
params=parameters,
)
if not response.ok:
logging.error(response.text)
raise HTTPException(status_code=500, detail="Something went wrong")
response_data: Dict = response.json().get("data", dict())
symbol = symbol.upper()
symbol_ = pydash.get(response_data, f"{symbol}.symbol", "").lower()
slug = pydash.get(response_data, f"{symbol}.slug")
name = pydash.get(response_data, f"{symbol}.name")
price_usd = pydash.get(response_data, f"{symbol}.quote.{USD_ID}.price")
price = Price(
symbol=symbol_,
slug=slug,
name=name,
priceUSD=price_usd,
)
redis_client.setex(symbol, CACHE_TIME, json.dumps(price.dict()))
return price
except (ConnectionError, Timeout, TooManyRedirects) as e:
logging.error(e)
raise HTTPException(status_code=500, detail="Something went wrong")
| true
| true
|
1c42b0c83dde5f21f9c07838ae65e0ccb09e3d2a
| 849
|
py
|
Python
|
docs/src/classification6.py
|
vishalbelsare/RLScore
|
713f0a402f7a09e41a609f2ddcaf849b2021a0a7
|
[
"MIT"
] | 61
|
2015-03-06T08:48:01.000Z
|
2021-04-26T16:13:07.000Z
|
docs/src/classification6.py
|
andrecamara/RLScore
|
713f0a402f7a09e41a609f2ddcaf849b2021a0a7
|
[
"MIT"
] | 5
|
2016-09-08T15:47:00.000Z
|
2019-02-25T17:44:55.000Z
|
docs/src/classification6.py
|
vishalbelsare/RLScore
|
713f0a402f7a09e41a609f2ddcaf849b2021a0a7
|
[
"MIT"
] | 31
|
2015-01-28T15:05:33.000Z
|
2021-04-16T19:39:48.000Z
|
from rlscore.learner import LeaveOneOutRLS
from rlscore.measure import ova_accuracy
from wine_data import load_wine
from rlscore.utilities.multiclass import to_one_vs_all
def train_rls():
X_train, Y_train, X_test, Y_test = load_wine()
#Map labels from set {1,2,3} to one-vs-all encoding
Y_train = to_one_vs_all(Y_train, False)
Y_test = to_one_vs_all(Y_test, False)
regparams = [2.**i for i in range(-15, 16)]
learner = LeaveOneOutRLS(X_train, Y_train, regparams=regparams, measure=ova_accuracy)
P_test = learner.predict(X_test)
#ova_accuracy computes one-vs-all classification accuracy directly between transformed
#class label matrix, and a matrix of predictions, where each column corresponds to a class
print("test set accuracy %f" %ova_accuracy(Y_test, P_test))
if __name__=="__main__":
train_rls()
| 42.45
| 94
| 0.756184
|
from rlscore.learner import LeaveOneOutRLS
from rlscore.measure import ova_accuracy
from wine_data import load_wine
from rlscore.utilities.multiclass import to_one_vs_all
def train_rls():
X_train, Y_train, X_test, Y_test = load_wine()
Y_train = to_one_vs_all(Y_train, False)
Y_test = to_one_vs_all(Y_test, False)
regparams = [2.**i for i in range(-15, 16)]
learner = LeaveOneOutRLS(X_train, Y_train, regparams=regparams, measure=ova_accuracy)
P_test = learner.predict(X_test)
print("test set accuracy %f" %ova_accuracy(Y_test, P_test))
if __name__=="__main__":
train_rls()
| true
| true
|
1c42b3086097a4aaa38d65dc07b57a7f3a4d5b17
| 10,360
|
py
|
Python
|
tests/gold_tests/pluginTest/slice/slice_error.test.py
|
zhaorun/trafficserver
|
757256129811441f29eea288b1d7e19bc54fab9c
|
[
"Apache-2.0"
] | null | null | null |
tests/gold_tests/pluginTest/slice/slice_error.test.py
|
zhaorun/trafficserver
|
757256129811441f29eea288b1d7e19bc54fab9c
|
[
"Apache-2.0"
] | 3
|
2017-09-22T19:18:56.000Z
|
2021-06-21T18:07:14.000Z
|
tests/gold_tests/pluginTest/slice/slice_error.test.py
|
zhaorun/trafficserver
|
757256129811441f29eea288b1d7e19bc54fab9c
|
[
"Apache-2.0"
] | null | null | null |
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
Test.Summary = '''
Slice plugin error.log test
'''
## Test description:
# Preload the cache with the entire asset to be range requested.
# Reload remap rule with slice plugin
# Request content through the slice plugin
Test.SkipUnless(
Condition.PluginExists('slice.so'),
)
Test.ContinueOnFail = False
# configure origin server
server = Test.MakeOriginServer("server", lookup_key="{%Range}{PATH}")
# Define ATS and configure
ts = Test.MakeATSProcess("ts", command="traffic_manager", select_ports=True)
body = "the quick brown fox" # len 19
# default root
request_header_chk = {"headers":
"GET / HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"Range: bytes=0-\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
response_header_chk = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Connection: close\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": body,
}
server.addResponse("sessionlog.json", request_header_chk, response_header_chk)
blockbytes = 9
range0 = "{}-{}".format(0, blockbytes - 1)
range1 = "{}-{}".format(blockbytes, (2 * blockbytes) - 1)
body0 = body[0:blockbytes]
body1 = body[blockbytes:2 * blockbytes]
# Mismatch etag
request_header_etag0 = {"headers":
"GET /etag HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"Range: bytes={}\r\n".format(range0) +
"X-Slicer-Info: full content request\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
response_header_etag0 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Connection: close\r\n" +
'Etag: "etag0"\r\n' +
"Content-Range: bytes {}/{}\r\n".format(range0, len(body)) +
"Cache-Control: max-age=500\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": body0,
}
server.addResponse("sessionlog.json", request_header_etag0, response_header_etag0)
request_header_etag1 = {"headers":
"GET /etag HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"Range: bytes={}\r\n".format(range1) +
"X-Slicer-Info: full content request\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
response_header_etag1 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Connection: close\r\n" +
'Etag: "etag1"\r\n' +
"Content-Range: bytes {}/{}\r\n".format(range1, len(body)) +
"Cache-Control: max-age=500\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": body1,
}
server.addResponse("sessionlog.json", request_header_etag1, response_header_etag1)
# mismatch Last-Modified
request_header_lm0 = {"headers":
"GET /lastmodified HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"Range: bytes={}\r\n".format(range0) +
"X-Slicer-Info: full content request\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
response_header_lm0 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Connection: close\r\n" +
"Last-Modified: Tue, 08 May 2018 15:49:41 GMT\r\n" +
"Content-Range: bytes {}/{}\r\n".format(range0, len(body)) +
"Cache-Control: max-age=500\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": body0,
}
server.addResponse("sessionlog.json", request_header_lm0, response_header_lm0)
request_header_lm1 = {"headers":
"GET /lastmodified HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"Range: bytes={}\r\n".format(range1) +
"X-Slicer-Info: full content request\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
response_header_lm1 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Connection: close\r\n" +
"Last-Modified: Tue, 08 Apr 2019 18:00:00 GMT\r\n" +
"Content-Range: bytes {}/{}\r\n".format(range1, len(body)) +
"Cache-Control: max-age=500\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": body1,
}
server.addResponse("sessionlog.json", request_header_lm1, response_header_lm1)
# non 206 slice block
request_header_n206_0 = {"headers":
"GET /non206 HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"Range: bytes={}\r\n".format(range0) +
"X-Slicer-Info: full content request\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
response_header_n206_0 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Connection: close\r\n" +
'Etag: "etag"\r\n' +
"Last-Modified: Tue, 08 May 2018 15:49:41 GMT\r\n" +
"Content-Range: bytes {}/{}\r\n".format(range0, len(body)) +
"Cache-Control: max-age=500\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": body0,
}
server.addResponse("sessionlog.json", request_header_n206_0, response_header_n206_0)
# mismatch content-range
request_header_crr0 = {"headers":
"GET /crr HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"Range: bytes={}\r\n".format(range0) +
"X-Slicer-Info: full content request\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
response_header_crr0 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Connection: close\r\n" +
"Etag: crr\r\n" +
"Content-Range: bytes {}/{}\r\n".format(range0, len(body)) +
"Cache-Control: max-age=500\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": body0,
}
server.addResponse("sessionlog.json", request_header_crr0, response_header_crr0)
request_header_crr1 = {"headers":
"GET /crr HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"Range: bytes={}\r\n".format(range1) +
"X-Slicer-Info: full content request\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
response_header_crr1 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Connection: close\r\n" +
"Etag: crr\r\n" +
"Content-Range: bytes {}/{}\r\n".format(range1, len(body) - 1) +
"Cache-Control: max-age=500\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": body1,
}
server.addResponse("sessionlog.json", request_header_crr1, response_header_crr1)
ts.Setup.CopyAs('curlsort.sh', Test.RunDirectory)
curl_and_args = 'sh curlsort.sh -H "Host: www.example.com"'
# set up whole asset fetch into cache
ts.Disk.remap_config.AddLine(
'map / http://127.0.0.1:{}'.format(server.Variables.Port) +
' @plugin=slice.so @pparam=--test-blockbytes={}'.format(blockbytes)
)
# minimal configuration
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 0,
'proxy.config.diags.debug.tags': 'slice',
'proxy.config.http.cache.http': 0,
'proxy.config.http.wait_for_cache': 0,
'proxy.config.http.insert_age_in_response': 0,
'proxy.config.http.insert_request_via_str': 0,
'proxy.config.http.insert_response_via_str': 3,
})
# Override builtin error check as these cases will fail
# taken from the slice plug code
ts.Disk.diags_log.Content = Testers.ContainsExpression('reason="Mismatch block Etag"', "Mismatch block etag")
ts.Disk.diags_log.Content += Testers.ContainsExpression('reason="Mismatch block Last-Modified"', "Mismatch block Last-Modified")
ts.Disk.diags_log.Content += Testers.ContainsExpression('reason="Non 206 internal block response"', "Non 206 internal block response")
ts.Disk.diags_log.Content += Testers.ContainsExpression('reason="Mismatch/Bad block Content-Range"', "Mismatch/Bad block Content-Range")
# 0 Test - Etag mismatch test
tr = Test.AddTestRun("Etag test")
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.Processes.Default.Command = curl_and_args + ' http://127.0.0.1:{}/etag'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "gold_error/etag.stdout.gold"
tr.Processes.Default.Streams.stderr = "gold_error/etag.stderr.gold"
tr.StillRunningAfter = ts
# 1 Check - diags.log message
tr = Test.AddTestRun("Etag error check")
tr.Processes.Default.Command = "grep 'Mismatch block Etag' {}".format(ts.Disk.diags_log.Name)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = ts
# 2 Test - Last Modified mismatch test
tr = Test.AddTestRun("Last-Modified test")
tr.Processes.Default.Command = curl_and_args + ' http://127.0.0.1:{}/lastmodified'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "gold_error/lm.stdout.gold"
tr.Processes.Default.Streams.stderr = "gold_error/lm.stderr.gold"
tr.StillRunningAfter = ts
# 3 Check - diags.log message
tr = Test.AddTestRun("Last-Modified error check")
tr.Processes.Default.Command = "grep 'Mismatch block Last-Modified' {}".format(ts.Disk.diags_log.Name)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = ts
# 4 Test - Non 206 mismatch test
tr = Test.AddTestRun("Non 206 test")
tr.Processes.Default.Command = curl_and_args + ' http://127.0.0.1:{}/non206'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "gold_error/non206.stdout.gold"
tr.Processes.Default.Streams.stderr = "gold_error/non206.stderr.gold"
tr.StillRunningAfter = ts
# 3 Check - diags.log message
tr = Test.AddTestRun("Non 206 error check")
tr.Processes.Default.Command = "grep 'Non 206 internal block response' {}".format(ts.Disk.diags_log.Name)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = ts
# 4 Test - Block content-range
tr = Test.AddTestRun("Content-Range test")
tr.Processes.Default.Command = curl_and_args + ' http://127.0.0.1:{}/crr'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "gold_error/crr.stdout.gold"
tr.Processes.Default.Streams.stderr = "gold_error/crr.stderr.gold"
tr.StillRunningAfter = ts
# 3 Check - diags.log message
tr = Test.AddTestRun("Content-Range error check")
tr.Processes.Default.Command = "grep 'Mismatch/Bad block Content-Range' {}".format(ts.Disk.diags_log.Name)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = ts
| 31.876923
| 136
| 0.699131
|
import os
import time
Test.Summary = '''
Slice plugin error.log test
'''
s(
Condition.PluginExists('slice.so'),
)
Test.ContinueOnFail = False
server = Test.MakeOriginServer("server", lookup_key="{%Range}{PATH}")
ts = Test.MakeATSProcess("ts", command="traffic_manager", select_ports=True)
body = "the quick brown fox"
request_header_chk = {"headers":
"GET / HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"Range: bytes=0-\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
response_header_chk = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Connection: close\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": body,
}
server.addResponse("sessionlog.json", request_header_chk, response_header_chk)
blockbytes = 9
range0 = "{}-{}".format(0, blockbytes - 1)
range1 = "{}-{}".format(blockbytes, (2 * blockbytes) - 1)
body0 = body[0:blockbytes]
body1 = body[blockbytes:2 * blockbytes]
request_header_etag0 = {"headers":
"GET /etag HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"Range: bytes={}\r\n".format(range0) +
"X-Slicer-Info: full content request\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
response_header_etag0 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Connection: close\r\n" +
'Etag: "etag0"\r\n' +
"Content-Range: bytes {}/{}\r\n".format(range0, len(body)) +
"Cache-Control: max-age=500\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": body0,
}
server.addResponse("sessionlog.json", request_header_etag0, response_header_etag0)
request_header_etag1 = {"headers":
"GET /etag HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"Range: bytes={}\r\n".format(range1) +
"X-Slicer-Info: full content request\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
response_header_etag1 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Connection: close\r\n" +
'Etag: "etag1"\r\n' +
"Content-Range: bytes {}/{}\r\n".format(range1, len(body)) +
"Cache-Control: max-age=500\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": body1,
}
server.addResponse("sessionlog.json", request_header_etag1, response_header_etag1)
request_header_lm0 = {"headers":
"GET /lastmodified HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"Range: bytes={}\r\n".format(range0) +
"X-Slicer-Info: full content request\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
response_header_lm0 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Connection: close\r\n" +
"Last-Modified: Tue, 08 May 2018 15:49:41 GMT\r\n" +
"Content-Range: bytes {}/{}\r\n".format(range0, len(body)) +
"Cache-Control: max-age=500\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": body0,
}
server.addResponse("sessionlog.json", request_header_lm0, response_header_lm0)
request_header_lm1 = {"headers":
"GET /lastmodified HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"Range: bytes={}\r\n".format(range1) +
"X-Slicer-Info: full content request\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
response_header_lm1 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Connection: close\r\n" +
"Last-Modified: Tue, 08 Apr 2019 18:00:00 GMT\r\n" +
"Content-Range: bytes {}/{}\r\n".format(range1, len(body)) +
"Cache-Control: max-age=500\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": body1,
}
server.addResponse("sessionlog.json", request_header_lm1, response_header_lm1)
request_header_n206_0 = {"headers":
"GET /non206 HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"Range: bytes={}\r\n".format(range0) +
"X-Slicer-Info: full content request\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
response_header_n206_0 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Connection: close\r\n" +
'Etag: "etag"\r\n' +
"Last-Modified: Tue, 08 May 2018 15:49:41 GMT\r\n" +
"Content-Range: bytes {}/{}\r\n".format(range0, len(body)) +
"Cache-Control: max-age=500\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": body0,
}
server.addResponse("sessionlog.json", request_header_n206_0, response_header_n206_0)
request_header_crr0 = {"headers":
"GET /crr HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"Range: bytes={}\r\n".format(range0) +
"X-Slicer-Info: full content request\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
response_header_crr0 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Connection: close\r\n" +
"Etag: crr\r\n" +
"Content-Range: bytes {}/{}\r\n".format(range0, len(body)) +
"Cache-Control: max-age=500\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": body0,
}
server.addResponse("sessionlog.json", request_header_crr0, response_header_crr0)
request_header_crr1 = {"headers":
"GET /crr HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"Range: bytes={}\r\n".format(range1) +
"X-Slicer-Info: full content request\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
response_header_crr1 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Connection: close\r\n" +
"Etag: crr\r\n" +
"Content-Range: bytes {}/{}\r\n".format(range1, len(body) - 1) +
"Cache-Control: max-age=500\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": body1,
}
server.addResponse("sessionlog.json", request_header_crr1, response_header_crr1)
ts.Setup.CopyAs('curlsort.sh', Test.RunDirectory)
curl_and_args = 'sh curlsort.sh -H "Host: www.example.com"'
ts.Disk.remap_config.AddLine(
'map / http://127.0.0.1:{}'.format(server.Variables.Port) +
' @plugin=slice.so @pparam=--test-blockbytes={}'.format(blockbytes)
)
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 0,
'proxy.config.diags.debug.tags': 'slice',
'proxy.config.http.cache.http': 0,
'proxy.config.http.wait_for_cache': 0,
'proxy.config.http.insert_age_in_response': 0,
'proxy.config.http.insert_request_via_str': 0,
'proxy.config.http.insert_response_via_str': 3,
})
ts.Disk.diags_log.Content = Testers.ContainsExpression('reason="Mismatch block Etag"', "Mismatch block etag")
ts.Disk.diags_log.Content += Testers.ContainsExpression('reason="Mismatch block Last-Modified"', "Mismatch block Last-Modified")
ts.Disk.diags_log.Content += Testers.ContainsExpression('reason="Non 206 internal block response"', "Non 206 internal block response")
ts.Disk.diags_log.Content += Testers.ContainsExpression('reason="Mismatch/Bad block Content-Range"', "Mismatch/Bad block Content-Range")
tr = Test.AddTestRun("Etag test")
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.Processes.Default.Command = curl_and_args + ' http://127.0.0.1:{}/etag'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "gold_error/etag.stdout.gold"
tr.Processes.Default.Streams.stderr = "gold_error/etag.stderr.gold"
tr.StillRunningAfter = ts
tr = Test.AddTestRun("Etag error check")
tr.Processes.Default.Command = "grep 'Mismatch block Etag' {}".format(ts.Disk.diags_log.Name)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = ts
tr = Test.AddTestRun("Last-Modified test")
tr.Processes.Default.Command = curl_and_args + ' http://127.0.0.1:{}/lastmodified'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "gold_error/lm.stdout.gold"
tr.Processes.Default.Streams.stderr = "gold_error/lm.stderr.gold"
tr.StillRunningAfter = ts
tr = Test.AddTestRun("Last-Modified error check")
tr.Processes.Default.Command = "grep 'Mismatch block Last-Modified' {}".format(ts.Disk.diags_log.Name)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = ts
tr = Test.AddTestRun("Non 206 test")
tr.Processes.Default.Command = curl_and_args + ' http://127.0.0.1:{}/non206'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "gold_error/non206.stdout.gold"
tr.Processes.Default.Streams.stderr = "gold_error/non206.stderr.gold"
tr.StillRunningAfter = ts
tr = Test.AddTestRun("Non 206 error check")
tr.Processes.Default.Command = "grep 'Non 206 internal block response' {}".format(ts.Disk.diags_log.Name)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = ts
tr = Test.AddTestRun("Content-Range test")
tr.Processes.Default.Command = curl_and_args + ' http://127.0.0.1:{}/crr'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "gold_error/crr.stdout.gold"
tr.Processes.Default.Streams.stderr = "gold_error/crr.stderr.gold"
tr.StillRunningAfter = ts
tr = Test.AddTestRun("Content-Range error check")
tr.Processes.Default.Command = "grep 'Mismatch/Bad block Content-Range' {}".format(ts.Disk.diags_log.Name)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = ts
| true
| true
|
1c42b490ea7b27440738e2963c5613c34487593a
| 8,334
|
py
|
Python
|
tools/validators/instance_validator/validate/handler.py
|
ljulliar/digitalbuildings
|
5b5be8db9e00d967911065f5247a8d39512e6504
|
[
"Apache-2.0"
] | null | null | null |
tools/validators/instance_validator/validate/handler.py
|
ljulliar/digitalbuildings
|
5b5be8db9e00d967911065f5247a8d39512e6504
|
[
"Apache-2.0"
] | null | null | null |
tools/validators/instance_validator/validate/handler.py
|
ljulliar/digitalbuildings
|
5b5be8db9e00d967911065f5247a8d39512e6504
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Validation Helper."""
from __future__ import print_function
from datetime import datetime
import sys
from typing import Callable, Dict, List, Optional
from validate import entity_instance
from validate import generate_universe
from validate import instance_parser
from validate import subscriber
from validate import telemetry_validator
from yamlformat.validator import presubmit_validate_types_lib as pvt
def Deserialize(
yaml_files: List[str]) -> Dict[str, entity_instance.EntityInstance]:
"""Parses a yaml configuration file and deserializes it.
Args:
yaml_files: list of building configuration files.
Returns:
A map of entity name to EntityInstance.
"""
print('Validating syntax please wait ...')
parser = instance_parser.InstanceParser()
for yaml_file in yaml_files:
print('Opening file: {0}, please wait ...'.format(yaml_file))
parser.AddFile(yaml_file)
parser.Finalize()
default_entity_operation = instance_parser.EntityOperation.ADD
if parser.GetConfigMode() == instance_parser.ConfigMode.UPDATE:
default_entity_operation = instance_parser.EntityOperation.UPDATE
entities = {}
for entity_name, entity_yaml in parser.GetEntities().items():
entities[entity_name] = entity_instance.EntityInstance.FromYaml(
entity_yaml, default_entity_operation)
return entities, parser.GetConfigMode()
def _ValidateConfig(
filenames: List[str],
universe: pvt.ConfigUniverse) -> List[entity_instance.EntityInstance]:
"""Runs all config validaton checks."""
print('\nLoading config files...\n')
entities, config_mode = Deserialize(filenames)
print('\nStarting config validation...\n')
helper = EntityHelper(universe)
return helper.Validate(entities, config_mode)
def _ValidateTelemetry(subscription: str, service_account: str,
entities: Dict[str, entity_instance.EntityInstance],
report_filename: str, timeout: int) -> None:
"""Runs all telemetry validation checks."""
helper = TelemetryHelper(subscription, service_account)
helper.Validate(entities, report_filename, timeout)
def RunValidation(filenames: List[str],
modified_types_filepath: str = None,
subscription: str = None,
service_account: str = None,
report_filename: str = None,
timeout: int = 60) -> None:
"""Master runner for all validations."""
if bool(subscription) != bool(service_account):
print('Subscription and a service account file are '
'both needed for the telemetry validation!')
sys.exit(0)
print('\nStarting validator...\n')
print('\nStarting universe generation...\n')
universe = generate_universe.BuildUniverse(modified_types_filepath)
if not universe:
print('\nError generating universe')
sys.exit(0)
print('\nStarting config validation...\n')
entities = _ValidateConfig(filenames, universe)
if subscription:
print('\nStarting telemetry validation...\n')
_ValidateTelemetry(subscription, service_account, entities, report_filename,
timeout)
class TelemetryHelper(object):
"""A validation helper to encapsulate telemetry validation.
Attributes:
subscription: resource string referencing the subscription to check
service_account_file: path to file with service account information
"""
def __init__(self, subscription, service_account_file):
super().__init__()
self.subscription = subscription
self.service_account_file = service_account_file
def Validate(self, entities: Dict[str, entity_instance.EntityInstance],
report_filename: str, timeout: int) -> None:
"""Validates telemetry payload received from the subscription.
Args:
entities: EntityInstance dictionary keyed by entity name
report_filename: path to write results to
timeout: number of seconds to wait for telemetry
"""
print('Connecting to pubsub subscription: ', self.subscription)
sub = subscriber.Subscriber(self.subscription, self.service_account_file)
validator = telemetry_validator.TelemetryValidator(
entities, timeout,
self.BuildTelemetryValidationCallback(report_filename))
validator.StartTimer()
sub.Listen(validator.ValidateMessage)
def BuildTelemetryValidationCallback(
self,
report_filename: Optional[str] = None
) -> Callable[[telemetry_validator.TelemetryValidator], None]:
"""Returns a callback to be called when a telemetry message is received.
Args:
report_filename: path to write results to
"""
def TelemetryValidationCallback(
validator: telemetry_validator.TelemetryValidator) -> None:
"""Callback when the telemetry validator finishes.
This could be called due to a timeout or because telemetry messages were
received and validated for every expected entity.
Args:
validator: the telemetry validator that triggered the callback.
"""
print('Generating validation report ...')
current_time = datetime.now()
timestamp = current_time.strftime('%d-%b-%Y (%H:%M:%S)')
report = '\nReport Generated at: {0}\n'.format(timestamp)
if not validator.AllEntitiesValidated():
report += ('No telemetry message was received for the following '
'entities:')
report += '\n'
for entity_name in validator.GetUnvalidatedEntityNames():
report += ' {0}\n'.format(entity_name)
report += '\nTelemetry validation errors:\n'
for error in validator.GetErrors():
report += error.GetPrintableMessage()
report += '\nTelemetry validation warnings:\n'
for warnings in validator.GetWarnings():
report += warnings.GetPrintableMessage()
if report_filename:
with open(self.report_filename, 'w') as f:
f.write(report)
f.close()
else:
print('\n')
print(report)
print('Report Generated')
sys.exit(0)
return TelemetryValidationCallback
class EntityHelper(object):
"""A validation helper to coordinate the various steps of the validation.
Attributes:
universe: ConfigUniverse to validate against
"""
def __init__(self, universe: pvt.ConfigUniverse):
super().__init__()
self.universe = universe
def Validate(
self, entities: Dict[str, entity_instance.EntityInstance],
config_mode: instance_parser.ConfigMode
) -> Dict[str, entity_instance.EntityInstance]:
"""Validates entity instances that are already deserialized.
Args:
entities: a list of entity instances
config_mode: processing mode of the configuration
Returns:
A dictionary containing valid entities by name
Raises:
SyntaxError: If no building is found in the config
"""
print('Validating entities ...')
building_found = False
valid_entities = {}
validator = entity_instance.CombinationValidator(self.universe, config_mode,
entities)
for entity_name, current_entity in entities.items():
if (current_entity.operation is not instance_parser.EntityOperation.DELETE
and current_entity.type_name.lower() == 'building'):
building_found = True
if not validator.Validate(current_entity):
print(entity_name, 'is not a valid instance')
continue
valid_entities[entity_name] = entity_instance
if not building_found:
print('Config must contain a non-deleted entity with a building type')
raise SyntaxError('Building Config must contain an '
'entity with a building type')
print('All entities validated')
return valid_entities
| 35.46383
| 80
| 0.705064
|
from __future__ import print_function
from datetime import datetime
import sys
from typing import Callable, Dict, List, Optional
from validate import entity_instance
from validate import generate_universe
from validate import instance_parser
from validate import subscriber
from validate import telemetry_validator
from yamlformat.validator import presubmit_validate_types_lib as pvt
def Deserialize(
yaml_files: List[str]) -> Dict[str, entity_instance.EntityInstance]:
print('Validating syntax please wait ...')
parser = instance_parser.InstanceParser()
for yaml_file in yaml_files:
print('Opening file: {0}, please wait ...'.format(yaml_file))
parser.AddFile(yaml_file)
parser.Finalize()
default_entity_operation = instance_parser.EntityOperation.ADD
if parser.GetConfigMode() == instance_parser.ConfigMode.UPDATE:
default_entity_operation = instance_parser.EntityOperation.UPDATE
entities = {}
for entity_name, entity_yaml in parser.GetEntities().items():
entities[entity_name] = entity_instance.EntityInstance.FromYaml(
entity_yaml, default_entity_operation)
return entities, parser.GetConfigMode()
def _ValidateConfig(
filenames: List[str],
universe: pvt.ConfigUniverse) -> List[entity_instance.EntityInstance]:
print('\nLoading config files...\n')
entities, config_mode = Deserialize(filenames)
print('\nStarting config validation...\n')
helper = EntityHelper(universe)
return helper.Validate(entities, config_mode)
def _ValidateTelemetry(subscription: str, service_account: str,
entities: Dict[str, entity_instance.EntityInstance],
report_filename: str, timeout: int) -> None:
helper = TelemetryHelper(subscription, service_account)
helper.Validate(entities, report_filename, timeout)
def RunValidation(filenames: List[str],
modified_types_filepath: str = None,
subscription: str = None,
service_account: str = None,
report_filename: str = None,
timeout: int = 60) -> None:
if bool(subscription) != bool(service_account):
print('Subscription and a service account file are '
'both needed for the telemetry validation!')
sys.exit(0)
print('\nStarting validator...\n')
print('\nStarting universe generation...\n')
universe = generate_universe.BuildUniverse(modified_types_filepath)
if not universe:
print('\nError generating universe')
sys.exit(0)
print('\nStarting config validation...\n')
entities = _ValidateConfig(filenames, universe)
if subscription:
print('\nStarting telemetry validation...\n')
_ValidateTelemetry(subscription, service_account, entities, report_filename,
timeout)
class TelemetryHelper(object):
def __init__(self, subscription, service_account_file):
super().__init__()
self.subscription = subscription
self.service_account_file = service_account_file
def Validate(self, entities: Dict[str, entity_instance.EntityInstance],
report_filename: str, timeout: int) -> None:
print('Connecting to pubsub subscription: ', self.subscription)
sub = subscriber.Subscriber(self.subscription, self.service_account_file)
validator = telemetry_validator.TelemetryValidator(
entities, timeout,
self.BuildTelemetryValidationCallback(report_filename))
validator.StartTimer()
sub.Listen(validator.ValidateMessage)
def BuildTelemetryValidationCallback(
self,
report_filename: Optional[str] = None
) -> Callable[[telemetry_validator.TelemetryValidator], None]:
def TelemetryValidationCallback(
validator: telemetry_validator.TelemetryValidator) -> None:
print('Generating validation report ...')
current_time = datetime.now()
timestamp = current_time.strftime('%d-%b-%Y (%H:%M:%S)')
report = '\nReport Generated at: {0}\n'.format(timestamp)
if not validator.AllEntitiesValidated():
report += ('No telemetry message was received for the following '
'entities:')
report += '\n'
for entity_name in validator.GetUnvalidatedEntityNames():
report += ' {0}\n'.format(entity_name)
report += '\nTelemetry validation errors:\n'
for error in validator.GetErrors():
report += error.GetPrintableMessage()
report += '\nTelemetry validation warnings:\n'
for warnings in validator.GetWarnings():
report += warnings.GetPrintableMessage()
if report_filename:
with open(self.report_filename, 'w') as f:
f.write(report)
f.close()
else:
print('\n')
print(report)
print('Report Generated')
sys.exit(0)
return TelemetryValidationCallback
class EntityHelper(object):
def __init__(self, universe: pvt.ConfigUniverse):
super().__init__()
self.universe = universe
def Validate(
self, entities: Dict[str, entity_instance.EntityInstance],
config_mode: instance_parser.ConfigMode
) -> Dict[str, entity_instance.EntityInstance]:
print('Validating entities ...')
building_found = False
valid_entities = {}
validator = entity_instance.CombinationValidator(self.universe, config_mode,
entities)
for entity_name, current_entity in entities.items():
if (current_entity.operation is not instance_parser.EntityOperation.DELETE
and current_entity.type_name.lower() == 'building'):
building_found = True
if not validator.Validate(current_entity):
print(entity_name, 'is not a valid instance')
continue
valid_entities[entity_name] = entity_instance
if not building_found:
print('Config must contain a non-deleted entity with a building type')
raise SyntaxError('Building Config must contain an '
'entity with a building type')
print('All entities validated')
return valid_entities
| true
| true
|
1c42b4bb2d2af7735cddc76247f942db7850523b
| 1,417
|
py
|
Python
|
mindspore/ops/_op_impl/tbe/add_n.py
|
GuoSuiming/mindspore
|
48afc4cfa53d970c0b20eedfb46e039db2a133d5
|
[
"Apache-2.0"
] | 3,200
|
2020-02-17T12:45:41.000Z
|
2022-03-31T20:21:16.000Z
|
mindspore/ops/_op_impl/tbe/add_n.py
|
forwhat461/mindspore
|
59a277756eb4faad9ac9afcc7fd526e8277d4994
|
[
"Apache-2.0"
] | 176
|
2020-02-12T02:52:11.000Z
|
2022-03-28T22:15:55.000Z
|
mindspore/ops/_op_impl/tbe/add_n.py
|
forwhat461/mindspore
|
59a277756eb4faad9ac9afcc7fd526e8277d4994
|
[
"Apache-2.0"
] | 621
|
2020-03-09T01:31:41.000Z
|
2022-03-30T03:43:19.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""AddN op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
add_n_op_info = TBERegOp("AddN") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("add_n.so") \
.compute_cost(10) \
.kernel_name("add_n") \
.partial_flag(True) \
.attr("n", "required", "int", "all") \
.input(0, "x", False, "dynamic", "all") \
.output(0, "y", False, "required", "all") \
.op_pattern("broadcast") \
.dtype_format(DataType.F16_None, DataType.F16_None) \
.dtype_format(DataType.F32_None, DataType.F32_None) \
.dtype_format(DataType.I32_None, DataType.I32_None) \
.get_op_info()
@op_info_register(add_n_op_info)
def _add_n_tbe():
"""AddN TBE register"""
return
| 35.425
| 79
| 0.664785
|
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
add_n_op_info = TBERegOp("AddN") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("add_n.so") \
.compute_cost(10) \
.kernel_name("add_n") \
.partial_flag(True) \
.attr("n", "required", "int", "all") \
.input(0, "x", False, "dynamic", "all") \
.output(0, "y", False, "required", "all") \
.op_pattern("broadcast") \
.dtype_format(DataType.F16_None, DataType.F16_None) \
.dtype_format(DataType.F32_None, DataType.F32_None) \
.dtype_format(DataType.I32_None, DataType.I32_None) \
.get_op_info()
@op_info_register(add_n_op_info)
def _add_n_tbe():
return
| true
| true
|
1c42b4d53e445692e9aafb1231685cc33fdca33e
| 29,557
|
py
|
Python
|
assignment_1/pos_hmm_bigram.py
|
sigfredonin/NEU_CS6120
|
878fb65f9685af8f4d464398f26d1c5e1a803971
|
[
"FSFAP"
] | null | null | null |
assignment_1/pos_hmm_bigram.py
|
sigfredonin/NEU_CS6120
|
878fb65f9685af8f4d464398f26d1c5e1a803971
|
[
"FSFAP"
] | null | null | null |
assignment_1/pos_hmm_bigram.py
|
sigfredonin/NEU_CS6120
|
878fb65f9685af8f4d464398f26d1c5e1a803971
|
[
"FSFAP"
] | null | null | null |
"""
NEU CS6120 Assignment 1
Problem 4 POS Tagging - Hidden Markov Model
The training set is a collection of files from the Brown corpus.
The training set files have sentences of tokenized tagged words,
w_1/t_1 w_2/t_2 w_3/t_3 ... w_k-1/t_k-1 w_k/t_k
one sentence per line, with leading white space.
Some lines are empty (i.e., just a newline).
4.1 Obtain frequency counts from all the training files counted together --
C(w_i,t_i): word-tag counts,
C(t_i): tag unigram counts,
C(t_i-1, t_i): tag bigram counts.
Have to separate words/tags for this counting,
and have to add beginning and end of sentence token-tag pairs,
<s>/<$s> and </s>/<s$>.
Identify infrequent words and replace with 'UNK' before counting.
4.2 Calculate transition probability:
P(t_i-1, t_i) = C(t_i-1, t_i) / C(t_i-1)
4.3 Calculate emission probability:
P(w_i | t_i) = C(w_i, t_i) / C(t_i)
4.4 Generate 5 random sentences using HMM.
Output each sentence (with its POS tags),
and the probability of it being generated.
This uses the probabilities of the whole training vocabulary,
including infrequent words.
4.5 Use the Viterbi algorithm (NLP ed3 Fig. 8.5) to
derive the most probable tag sequence for each word
in the test dataset:
<sentence = ID=1>
word, tag
word, tag
...
word, tag
<EOS>
<sentence = ID=1>
word, tag
word, tag
...
word, tag
<EOS>
...
The test data set contains word tokens in this format, but without tags.
This uses the probabilities of the words-tag pairs where infrequent words
are collapsed into 'UNK'-tag pairs.
Sig Nin
03 Oct 2018
"""
import nltk
import numpy as np
import os
import re
from collections import defaultdict
# ------------------------------------------------------------------------
# Constants ---
# ------------------------------------------------------------------------
TOK_SS = '<s>' # start sentence
TAG_SS = '$S'
TOK_ES = '</s>' # end sentence
TAG_ES = 'S$'
pathToyPOS = r'D:/Documents/NLP/NEU_CS6120/assignment_1/toyPOS'
pathBrownData = r'D:/Documents/NLP/NEU_CS6120/assignment_1/brown'
pathTestDataFile = r'D:/Documents/NLP/NEU_CS6120/science_sample.txt'
# ------------------------------------------------------------------------
# Main Class - HMM POS Bigram Model ---
# ------------------------------------------------------------------------
class POS_HMM_BiGram:
"""
Bigram HMM POS model.
Initialize with counts from a collection of documents.
Can generate random sentences.
Can generate sequences of likely tags for words in sentences,
using the Viterbi algorithm.
"""
# ------------------------------------------------------------------------
# Cumulative Probabilities and Random Choosing ---
# ------------------------------------------------------------------------
def _cumulative_probabilities_for_prior(self, probabilities):
"""
Calculate cumulative probabilities for a list of probabilities.
Input:
List of probabilities for each possible successor:
[ ( successor, probability ), ... ]
Output:
List of cumulative probabilities for each possible successor:
[ ( successor, cumulative probability ), ... ]
"""
cps = [] # .. cumulative
cumulative_probability = 0.0
for s, p in probabilities:
cumulative_probability += p
cps += [(s, cumulative_probability, )]
return cps
def _cumulative_probabilities(self, successor_probabilities):
"""
Calculate cumulative probabilities for a list of succesor probabilities.
The successor probabilities for each prior sum to 1.
The cumulative successor probabilities end in 1.
Input:
List of probabilities for each possible successor for each prior:
{ prior : [ ( successor, probability ), ... ] ), ... }
Output:
List of cumulative probabilities for each possible successor for each prior:
{ prior, [ ( successor, cumulative probability ), ... ] ), ... }
"""
scps = { }
for prior, probabilities in successor_probabilities.items():
cps = self._cumulative_probabilities_for_prior(probabilities)
last, cp = cps[-1]
if abs(1.0 - cp) > 1e-14:
print("Warning: Probabilities don't add to 1.0", prior, last, cp)
cps[-1] = ( last, 1.0 )
scps[prior] = cps
return scps
def _choose_by_probability(self, cps):
"""
Choose an item at random from a list of
(item, cumulative probability)
so that each item has its own probability of being chosen.
Use binary search.
"""
from random import uniform
cumulative_probability = cps[-1][1]
r = uniform(0.0, cumulative_probability)
if self.DEBUG:
print("Random value, r:", r, ", Item list size:", len(cps))
entry = None
first = 0
last = len(cps) - 1
found = False
while first < last: # while interval size > 1
i = (first + last) // 2
entry = cps[i]
prob = entry[1];
if self.DEBUG and i < 20:
print("---", first, i, last, ":", entry, prob)
if r < prob:
last = i # in this or earlier interval
else:
first = i + 1 # in later interval
return cps[last]
# ------------------------------------------------------------------------
# HMM Probabilities - transition and emission ---
# ------------------------------------------------------------------------
def _emission_probabilities(self, count_tag_unigrams, count_word_tag_pairs):
"""
Calculate emission probability (alpha-smoothed):
P(w_i | t_i) = (C(w_i, t_i) + alpha) / (C(t_i) + alpha * V)
where V = count unique word tag pairs and alpha = 0.1
Inputs:
count_word_tags:
{ ( w_i, t_i ) : count, ... }
Outputs:
emission probabilities:
{ ( w_i , t_i ) : probability, ... }
word emission probabilities:
{ t_i : [ ( w_i , probability ), ... ], ... }
"""
alpha = 0.1
V = len(count_word_tag_pairs) # count of unique word tag pairs
alpha_V = alpha * V
# Compute probability of unseen word tag pair (count = 0)
emission_probabilities_unseen = defaultdict(lambda: 1.0 / V)
for tag, tag_count in count_tag_unigrams.items():
unseen_probability = alpha / (tag_count + alpha_V)
emission_probabilities_unseen[tag] = unseen_probability
# Calculate the emission probability P(w_i | t_i)
emission_probabilities = { }
word_emission_probabilities = defaultdict(list)
for word_tag_pair, word_tag_count in count_word_tag_pairs.items():
word, tag = word_tag_pair
tag_count = count_tag_unigrams[tag]
probability = (float(word_tag_count) + alpha) \
/ (tag_count + alpha_V)
emission_probabilities[word_tag_pair] = probability
word_emission_probabilities[tag] += [ ( word, probability ) ]
return emission_probabilities, word_emission_probabilities, \
emission_probabilities_unseen
def _transition_probabilities(self, count_tag_unigrams, count_tag_bigrams):
"""
Calculate transition probability (alpha-smoothed):
P(t_i-1, t_i) = (C(t_i-1, t_i) + alpha) / (C(t_i-1) + alpha * V)
where V = count unique bigrams, alpha = 0.1
Inputs:
count_tag_bigrams:
{ ( t_i-1, t_i ) : count, ... }
Outputs:
transition probabilities:
{ ( t_i-1, t_i ) : probability, ... }
tag transition probabilities:
{ t_i-1 : [ ( t_i, probability ) ... ], ... }
"""
alpha = 0.1
V = len(count_tag_bigrams) # count of unique tag bigrams
alpha_V = alpha * V
# Compute probability of unseen bigram (count = 0)
transition_probabilities_unseen = defaultdict(lambda: 1.0 / V)
for tag, tag_count in count_tag_unigrams.items():
unseen_probability = alpha / (tag_count + alpha_V)
transition_probabilities_unseen[tag] = unseen_probability
# Calculate the transition probability P(t_i-1, t_i)
transition_probabilities = { }
tag_transition_probabilities = defaultdict(list)
for bigram, bigram_count in count_tag_bigrams.items():
prev_tag, tag = bigram
prev_tag_count = count_tag_unigrams[prev_tag]
probability = (float(bigram_count) + alpha) \
/ (prev_tag_count + alpha_V)
transition_probabilities[bigram] = probability
tag_transition_probabilities[prev_tag] += [ ( tag, probability, ) ]
return transition_probabilities, tag_transition_probabilities, \
transition_probabilities_unseen
# ------------------------------------------------------------------------
# Infrequent and unknown words, conversion to 'UNK' ---
# ------------------------------------------------------------------------
def _infrequent_words(self, word_tag_pairs, TOO_FEW):
"""
Return the word counts and infrequent word counts
in dictionaries with entries (word, tag) : count.
Inputs:
word_tag_pairs: [ (word, tag), ... ]
TOO_FEW: word is infrequent if count <= TOO_FEW
Outputs:
count_words: { word : count, ... }
count_infrequent: { ( word ) : count, ... }
"""
count_words = defaultdict(int)
for word, tag in word_tag_pairs:
count_words[word] += 1
count_infrequent = defaultdict(int)
for word, count in count_words.items():
if count <= TOO_FEW:
count_infrequent[word] += count
word_tag_pairs_UNK = []
for word, tag in word_tag_pairs:
if word in count_infrequent:
word = 'UNK'
word_tag_pairs_UNK += [ ( word, tag ) ]
return count_words, count_infrequent, word_tag_pairs_UNK
def _unknown_word_tags(self, word_tag_pairs, count_infrequent):
"""
Return a copy of the word counts dictionary
with the infrequent words replaced by a 'UNK' entry
that has count the sum of their counts.
Inputs:
count_word_tags: { ( word, tag ) : count, ... }
count_infrequent: { word : count, ... }
Outputs:
count_word_tag_pairs_UNK: { ( word, tag ): count, ...
( 'UNK', tag ) : count_unk, ... }
... where count_unk is the sum of the counts of the
infrequent words with that tag.
"""
count_word_tag_pairs = defaultdict(int)
for word_tag in word_tag_pairs:
word, tag = word_tag
count_word_tag_pairs[word_tag] += 1
count_word_tag_pairs_UNK = count_word_tag_pairs.copy()
for word_tag, count in count_word_tag_pairs.items():
word, tag = word_tag
if word in count_infrequent:
count_word_tag_pairs_UNK[('UNK', tag,)] += count
del count_word_tag_pairs_UNK[word_tag]
return count_word_tag_pairs_UNK
# ------------------------------------------------------------------------
# Sentences, words, tags and counts ---
# ------------------------------------------------------------------------
def _counts_from_word_tag_pairs(self, word_tag_pairs):
count_word_tags = defaultdict(int)
count_tag_unigrams = defaultdict(int)
count_tag_bigrams = defaultdict(int)
tag_prev = None
for pair in word_tag_pairs:
word, tag = pair
count_word_tags[pair] += 1
tag_unigram = ( tag, )
count_tag_unigrams[tag_unigram] += 1
if tag_prev != None:
tag_bigram = ( tag_prev, tag, )
count_tag_bigrams[tag_bigram] += 1
tag_prev = tag
return count_word_tags, count_tag_unigrams, count_tag_bigrams
def _tags_from_sentences(self, sents):
p = re.compile(r'(\S+)/(\S+)')
word_tag_pairs = []
for sent in sents:
pairs_in_sent = [ (word.lower(), tag) for word, tag in p.findall(sent) ]
word_tag_pairs += [ ( TOK_SS, TAG_SS, ) ] # Start of sentence
word_tag_pairs += pairs_in_sent # words and tags
word_tag_pairs += [ ( TOK_ES, TAG_ES, ) ] # End of sentence
return word_tag_pairs
def _tagged_sentences_from_file(self, dirPath, fnx):
fnxPath = os.path.join(dirPath, fnx)
re_nl = re.compile(r'\n')
re_sb = re.compile(r'( )+')
sents_in_file = []
with open(fnxPath) as f:
print(fnx)
for line in f:
nnl = re_nl.sub(' ', line) # '\n' -> ' '
sb = re_sb.sub(' ', nnl) # ' '+ -> ' '
if sb != ' ':
sents_in_file += [ sb ]
return sents_in_file
def _tagged_sentences_from_files(self, dirPath, files):
sents = []
for fnx in files:
fnx_sents = self._tagged_sentences_from_file(dirPath, fnx)
sents += fnx_sents
return sents
# ------------------------------------------------------------------------
# Class constructor and training ---
# ------------------------------------------------------------------------
def init(self, dirPath, TOO_FEW=5):
self.files = os.listdir(dirPath)
self.TOO_FEW = TOO_FEW
# sentences, word/tag pairs, counts
self.sents = self._tagged_sentences_from_files(dirPath, self.files)
self.word_tag_pairs = self._tags_from_sentences(self.sents)
# identify infrequent words and replace with ('UNK',tag) counts
self.count_words, self.count_infrequent, self.word_tag_pairs_UNK = \
self._infrequent_words(self.word_tag_pairs, self.TOO_FEW)
self.count_word_tag_pairs_UNK = \
self._unknown_word_tags(self.word_tag_pairs, self.count_infrequent)
# bigrams and counts, from word tag pairs with infrequent set to UNK
self.count_word_tags, self.count_tag_unigrams, self.count_tag_bigrams = \
self._counts_from_word_tag_pairs(self.word_tag_pairs_UNK)
# transition and emission probabilities
self.pTrans, self.pTagTrans, self.pTransUnseen = \
self._transition_probabilities( \
self.count_tag_unigrams, self.count_tag_bigrams)
self.pEmiss, self.pTagEmiss, self.pEmissUnseen = \
self._emission_probabilities( \
self.count_tag_unigrams, self.count_word_tags)
self.pEmUNK, self.pTagEmUNK, self.pEmUNKUnseen = \
self._emission_probabilities( \
self.count_tag_unigrams, self.count_word_tag_pairs_UNK)
# cumulative probabilities for random choosing
self.pCumTrans = self._cumulative_probabilities(self.pTagTrans)
self.pCumEmiss = self._cumulative_probabilities(self.pTagEmiss)
self.pCumEmUNK = self._cumulative_probabilities(self.pTagEmUNK)
def reset(self):
# ... over whole training set ...
self.files = None # List of files in training set
self.TOO_FEW = None # UNK if word count <= TOO_FEW
self.sents = None # List of sentences
self.tags = None # List of (word, tag) pairs
# counts ...
self.count_word_tags = None # { (w_i, t_i) : count, .. }
self.count_words = None # { w_i : count, ... }
self.count_infrequent = None # { (w_i, t_i) : count, ... }
self.count_word_tag_pairs_UNK = None # { (w_i, t_i) : count, ... }
self.count_tag_unigrams = None # { (t_i) : count, ... }
self.count_tag_bigrams = None # { (t_i-1, t_i) : count, ... }
# probabilities
self.pTrans = None # { (t_i-1, t_i) : P(t_i-1, t_i), ... }
self.pEmiss = None # { (w_i, t_i) : P(w_i | t_i), ... }
self.pEmUNK = None # { (w_i, t_i) : P(w_i | t_i), ... }
# conditional probabilities
self.pTagTrans = None # { t_i-1 : (t_i, P(t_i-1, t_i)), ... }
self.pTagEmiss = None # { t_i : (w_i, P(w_i | t_i)), ... }
self.pTagEmUNK = None # { t_i : (w_i, P(w_i | t_i)), ... }
# cumulative conditional probabilities
self.pCumTrans = None # { t_i-1 : [ (t_i, cP(t_i-1, t_i)) ], ... }
self.pCumEmiss = None # { t_i : [ (w_i, cP(w_i | t_i)) ], ... }
self.pCumEmUNK = None # { t_i : [ (w_i, cP(w_i | t_i)) ], ... }
def set_DEBUG(self, DEBUG=True):
self.DEBUG=DEBUG
def __init__(self, DEBUG=False):
self.set_DEBUG(DEBUG)
self.reset()
# ------------------------------------------------------------------------
# Sentence Generation ---
# ------------------------------------------------------------------------
def _assemble_sentence(self, swt):
sent = ""
sent_tagged = ""
ss = False
for word, tag in swt:
if tag == TAG_SS:
ss = True
elif tag != TAG_ES:
if tag == 'np' or ss:
word = word.capitalize()
ss = False
sent += word + ' '
sent_tagged += word + "/" + tag + ' '
if tag == TAG_ES:
sent = sent[:-1]
sent_tagged = sent_tagged[:-1]
return sent, sent_tagged
def generate_sentence(self, pTrans, pEmiss, pCumTrans, pCumEmiss):
swt = [] # sentence word/tag pairs
stp = [] # sentence transition probabilities
sep = [] # sentence emission probabilities
# start of sentence word and tag
word_tag = ( TOK_SS, TAG_SS, )
swt += [ word_tag ]
stp += [ 1.0 ]
sep += [ 1.0 ]
# Iterate choosing tags and words until end of sentence is chosen
next_word = None
next_tag = None
while next_word != TOK_ES:
# generate the next word/tag pair
word, tag = word_tag
tcps = pCumTrans[tag] # List of cumulative transition probabilities
next_tag_cumP = self._choose_by_probability(tcps)
next_tag, tagCumP = next_tag_cumP
ecps = pCumEmiss[next_tag] # List of cumulative emission probabilities
next_word_cumP = self._choose_by_probability(ecps)
next_word, wordCumP = next_word_cumP
# get the probabilities used
tp = pTrans[( tag, next_tag, )]
ep = pEmiss[( next_word, next_tag )]
# record word/tag pair
word_tag = ( next_word, next_tag )
swt += [ word_tag ]
stp += [ tp ]
sep += [ ep ]
# continue generating as long as the next word is not the end of sentence token
sent, sent_tagged = self._assemble_sentence(swt)
prob = np.prod(np.array(stp)) * np.prod(np.array(sep))
return swt, stp, sep, sent, sent_tagged, prob
# ------------------------------------------------------------------------
# Tests ---
# ------------------------------------------------------------------------
if __name__ == '__main__':
from datetime import datetime
nowStr = datetime.now().strftime("%B %d, %Y %I:%M:%S %p")
print("====" + nowStr + "====")
testPath = pathToyPOS
hmm = POS_HMM_BiGram()
TOO_FEW = 1
files = os.listdir(testPath)
fnx = files[-1]
print("--- ", fnx, " ---")
fnx_sents = hmm._tagged_sentences_from_file(testPath, fnx)
print("Len sentences:", len(fnx_sents))
print("First 5 sentences:", fnx_sents[:5])
print("last 5 sentences:", fnx_sents[-5:])
fnx_word_tag_pairs = hmm._tags_from_sentences(fnx_sents)
print("Len word tag pairs:", len(fnx_word_tag_pairs))
print("First 5 word tag pairs:", fnx_word_tag_pairs[:5])
print("Last 5 word tag pairs:", fnx_word_tag_pairs[-5:])
fnx_count_word_tags, fnx_count_tag_unigrams, fnx_count_tag_bigrams = \
hmm._counts_from_word_tag_pairs(fnx_word_tag_pairs)
fnx_count_word_tags_sum = sum([c for p, c in
fnx_count_word_tags.items()])
fnx_count_tag_unigrams_sum = sum([c for p, c in
fnx_count_tag_unigrams.items()])
fnx_count_tag_bigrams_sum = sum([c for p, c in
fnx_count_tag_bigrams.items()])
print("Sum counts in count word tag pairs =", fnx_count_word_tags_sum)
print("Length count word tag pairs:", len(fnx_count_word_tags))
print("First 5 count word tag pairs:", list(fnx_count_word_tags.items())[:5])
print("Last 5 count word tag pairs:", list(fnx_count_word_tags.items())[-5:])
print("Length count tag unigrams:", len(fnx_count_tag_unigrams))
print("Sum counts in count tag unigrams =", fnx_count_tag_unigrams_sum)
print("First 5 count tag unigrams:", list(fnx_count_tag_unigrams.items())[:5])
print("Last 5 count tag unigrams:", list(fnx_count_tag_unigrams.items())[-5:])
print("Sum counts in count tag bigrams =", fnx_count_tag_bigrams_sum)
print("Length count tag bigrams:", len(fnx_count_tag_bigrams))
print("First 5 count tag bigrams:", list(fnx_count_tag_bigrams.items())[:5])
print("Last 5 count tag bigrams:", list(fnx_count_tag_bigrams.items())[-5:])
fnx_count_words, fnx_count_infrequent, fnx_word_tag_pairs_UNK = \
hmm._infrequent_words(fnx_word_tag_pairs, TOO_FEW)
fnx_count_words_sum = sum([c for p, c in
fnx_count_words.items()])
fnx_count_infrequent_sum = sum([c for p, c in
fnx_count_infrequent.items()])
print("Sum counts in count words =", fnx_count_words_sum)
print("Length count words:", len(fnx_count_words))
print("First 5 count words:", list(fnx_count_words.items())[:5])
print("Last 5 count words:", list(fnx_count_words.items())[-5:])
print("Sum counts in count infrequent words =", fnx_count_infrequent_sum)
print("Length count infrequent words:", len(fnx_count_infrequent))
print("First 5 count infrequent words:", list(fnx_count_infrequent.items())[:5])
print("Last 5 count infrequent words:", list(fnx_count_infrequent.items())[-5:])
fnx_count_word_tags, fnx_count_tag_unigrams, fnx_count_tag_bigrams = \
hmm._counts_from_word_tag_pairs(fnx_word_tag_pairs_UNK)
fnx_count_word_tag_pairs_UNK = \
hmm._unknown_word_tags(fnx_word_tag_pairs, fnx_count_infrequent)
fnx_count_word_tag_pairs_UNK_sum = sum([c for p, c in
fnx_count_word_tag_pairs_UNK.items()])
print("Sum counts in count word tags UNK =", fnx_count_word_tag_pairs_UNK_sum)
print("Length count word tags UNK:", len(fnx_count_word_tag_pairs_UNK))
print("First 5 count word tags UNK:", list(fnx_count_word_tag_pairs_UNK.items())[:5])
print("Last 5 count word tags UNK:", list(fnx_count_word_tag_pairs_UNK.items())[-5:])
fnx_pTrans, fnx_pTagTrans, fnx_pTransUnseen = \
hmm._transition_probabilities(fnx_count_tag_unigrams, fnx_count_tag_bigrams)
print("Length transition probabilities:", len(fnx_pTrans))
print("First 5 transition probabilities:", list(fnx_pTrans.items())[:5])
print("Last 5 transition probabilities:", list(fnx_pTrans.items())[-5:])
print("Length tag transition probabilities:", len(fnx_pTagTrans))
print("First 5 tag transition probabilities:", list(fnx_pTagTrans.items())[:5])
print("Last 5 tag transition probabilities:", list(fnx_pTagTrans.items())[-5:])
fnx_pEmiss, fnx_pTagEmiss, fnx_pEmissUnseen = \
hmm._emission_probabilities(fnx_count_tag_unigrams, fnx_count_word_tags)
print("Length emission probabilities:", len(fnx_pEmiss))
print("First 5 emission probabilities:", list(fnx_pEmiss.items())[:5])
print("Last 5 emission probabilities:", list(fnx_pEmiss.items())[-5:])
print("Length tag emission probabilities:", len(fnx_pTagEmiss))
print("First 5 tag emission probabilities:", list(fnx_pTagEmiss.items())[:5])
print("Last 5 tag emission probabilities:", list(fnx_pTagEmiss.items())[-5:])
fnx_pEmUNK, fnx_pTagEmUNK, fnx_pEmUNKUnknown = \
hmm._emission_probabilities(fnx_count_tag_unigrams, fnx_count_word_tag_pairs_UNK)
print("Length emission probabilities UNK:", len(fnx_pEmUNK))
print("First 5 emission probabilities UNK:", list(fnx_pEmUNK.items())[:5])
print("Last 5 emission probabilities UNK:", list(fnx_pEmUNK.items())[-5:])
print("Length tag emission probabilities UNK:", len(fnx_pTagEmUNK))
print("First 5 tag emission probabilities UNK:", list(fnx_pTagEmUNK.items())[:5])
print("Last 5 tag emission probabilities UNK:", list(fnx_pTagEmUNK.items())[-5:])
fnx_pCumTrans = hmm._cumulative_probabilities(fnx_pTagTrans)
print("Length cumulative tag transition probabilities UNK:", len(fnx_pCumTrans))
print("First 5 cumulative tag transition probabilities UNK:", list(fnx_pCumTrans.items())[:5])
print("Last 5 cumulative tag transition probabilities UNK:", list(fnx_pCumTrans.items())[-5:])
fnx_pCumEmiss = hmm._cumulative_probabilities(fnx_pTagEmiss)
print("Length cumulative tag emission probabilities:", len(fnx_pCumEmiss))
print("First 5 cumulative tag emission probabilities:", list(fnx_pCumEmiss.items())[:5])
print("Last 5 cumulative tag emission probabilities:", list(fnx_pCumEmiss.items())[-5:])
fnx_pCumEmUNK = hmm._cumulative_probabilities(fnx_pTagEmUNK)
print("Length cumulative tag emission probabilities UNK:", len(fnx_pCumEmUNK))
print("First 5 cumulative tag emission probabilities UNK:", list(fnx_pCumEmUNK.items())[:5])
print("Last 5 cumulative tag emission probabilities UNK:", list(fnx_pCumEmUNK.items())[-5:])
nowStr = datetime.now().strftime("%B %d, %Y %I:%M:%S %p")
print("====" + nowStr + "====")
print("Randomly generated characters ...")
cps = [ ('a', 0.5), ('b', 0.6), ('c', 0.8), ('d', 0.95), ('e', 1.0) ]
print(cps)
sent = ""
for i in range(30):
char_prob = hmm._choose_by_probability(cps)
char, prob = char_prob
sent += char
print(char_prob, end='')
print()
print(sent)
nowStr = datetime.now().strftime("%B %d, %Y %I:%M:%S %p")
print("====" + nowStr + "====")
print("Randomly generated sentences ...")
swp = stp = sep = sent = sent_tagged = prob = None
for i in range(5):
print("--- %d ---" % i)
swt, stp, sep, sent, sent_tagged, prob = hmm.generate_sentence( \
fnx_pTrans, fnx_pEmiss, fnx_pCumTrans, fnx_pCumEmiss)
print("SWT---")
print(swt)
print("STP---")
print(stp)
print("SEP---")
print(sep)
print("SENTENCE ---")
print(sent)
print("TAGGED SENTENCE ---")
print(sent_tagged)
print("Sentence probability---")
print(prob)
nowStr = datetime.now().strftime("%B %d, %Y %I:%M:%S %p")
print("====" + nowStr + "====")
testPath = pathToyPOS
print("Test with all file in %s -----" % testPath)
hmm.init(testPath, TOO_FEW=5)
nowStr = datetime.now().strftime("%B %d, %Y %I:%M:%S %p")
print("====" + nowStr + "====")
print("Randomly generated sentences ...")
swp = stp = sep = sent = sent_tagged = prob = None
for i in range(5):
print("--- %d ---" % i)
swt, stp, sep, sent, sent_tagged, prob = hmm.generate_sentence( \
fnx_pTrans, fnx_pEmiss, fnx_pCumTrans, fnx_pCumEmiss)
print("SWT---")
print(swt)
print("STP---")
print(stp)
print("SEP---")
print(sep)
print("SENTENCE ---")
print(sent)
print("TAGGED SENTENCE ---")
print(sent_tagged)
print("Sentence probability---")
print(prob)
nowStr = datetime.now().strftime("%B %d, %Y %I:%M:%S %p")
print("====" + nowStr + "====")
testPath = pathBrownData
print("Test with all file in %s -----" % testPath)
hmm.init(testPath, TOO_FEW=5)
nowStr = datetime.now().strftime("%B %d, %Y %I:%M:%S %p")
print("====" + nowStr + "====")
print("Randomly generated sentences ...")
swp = stp = sep = sent = sent_tagged = prob = None
for i in range(5):
print("--- %d ---" % i)
swt, stp, sep, sent, sent_tagged, prob = hmm.generate_sentence( \
fnx_pTrans, fnx_pEmiss, fnx_pCumTrans, fnx_pCumEmiss)
print("SWT---")
print(swt)
print("STP---")
print(stp)
print("SEP---")
print(sep)
print("SENTENCE ---")
print(sent)
print("TAGGED SENTENCE ---")
print(sent_tagged)
print("Sentence probability---")
print(prob)
nowStr = datetime.now().strftime("%B %d, %Y %I:%M:%S %p")
print("====" + nowStr + "====")
| 43.983631
| 98
| 0.573908
|
import nltk
import numpy as np
import os
import re
from collections import defaultdict
TOK_SS = '<s>'
TAG_SS = '$S'
TOK_ES = '</s>'
TAG_ES = 'S$'
pathToyPOS = r'D:/Documents/NLP/NEU_CS6120/assignment_1/toyPOS'
pathBrownData = r'D:/Documents/NLP/NEU_CS6120/assignment_1/brown'
pathTestDataFile = r'D:/Documents/NLP/NEU_CS6120/science_sample.txt'
class POS_HMM_BiGram:
def _cumulative_probabilities_for_prior(self, probabilities):
cps = []
cumulative_probability = 0.0
for s, p in probabilities:
cumulative_probability += p
cps += [(s, cumulative_probability, )]
return cps
def _cumulative_probabilities(self, successor_probabilities):
scps = { }
for prior, probabilities in successor_probabilities.items():
cps = self._cumulative_probabilities_for_prior(probabilities)
last, cp = cps[-1]
if abs(1.0 - cp) > 1e-14:
print("Warning: Probabilities don't add to 1.0", prior, last, cp)
cps[-1] = ( last, 1.0 )
scps[prior] = cps
return scps
def _choose_by_probability(self, cps):
from random import uniform
cumulative_probability = cps[-1][1]
r = uniform(0.0, cumulative_probability)
if self.DEBUG:
print("Random value, r:", r, ", Item list size:", len(cps))
entry = None
first = 0
last = len(cps) - 1
found = False
while first < last: # while interval size > 1
i = (first + last) // 2
entry = cps[i]
prob = entry[1];
if self.DEBUG and i < 20:
print("---", first, i, last, ":", entry, prob)
if r < prob:
last = i # in this or earlier interval
else:
first = i + 1 # in later interval
return cps[last]
# ------------------------------------------------------------------------
# HMM Probabilities - transition and emission ---
# ------------------------------------------------------------------------
def _emission_probabilities(self, count_tag_unigrams, count_word_tag_pairs):
alpha = 0.1
V = len(count_word_tag_pairs) # count of unique word tag pairs
alpha_V = alpha * V
# Compute probability of unseen word tag pair (count = 0)
emission_probabilities_unseen = defaultdict(lambda: 1.0 / V)
for tag, tag_count in count_tag_unigrams.items():
unseen_probability = alpha / (tag_count + alpha_V)
emission_probabilities_unseen[tag] = unseen_probability
# Calculate the emission probability P(w_i | t_i)
emission_probabilities = { }
word_emission_probabilities = defaultdict(list)
for word_tag_pair, word_tag_count in count_word_tag_pairs.items():
word, tag = word_tag_pair
tag_count = count_tag_unigrams[tag]
probability = (float(word_tag_count) + alpha) \
/ (tag_count + alpha_V)
emission_probabilities[word_tag_pair] = probability
word_emission_probabilities[tag] += [ ( word, probability ) ]
return emission_probabilities, word_emission_probabilities, \
emission_probabilities_unseen
def _transition_probabilities(self, count_tag_unigrams, count_tag_bigrams):
alpha = 0.1
V = len(count_tag_bigrams) # count of unique tag bigrams
alpha_V = alpha * V
# Compute probability of unseen bigram (count = 0)
transition_probabilities_unseen = defaultdict(lambda: 1.0 / V)
for tag, tag_count in count_tag_unigrams.items():
unseen_probability = alpha / (tag_count + alpha_V)
transition_probabilities_unseen[tag] = unseen_probability
# Calculate the transition probability P(t_i-1, t_i)
transition_probabilities = { }
tag_transition_probabilities = defaultdict(list)
for bigram, bigram_count in count_tag_bigrams.items():
prev_tag, tag = bigram
prev_tag_count = count_tag_unigrams[prev_tag]
probability = (float(bigram_count) + alpha) \
/ (prev_tag_count + alpha_V)
transition_probabilities[bigram] = probability
tag_transition_probabilities[prev_tag] += [ ( tag, probability, ) ]
return transition_probabilities, tag_transition_probabilities, \
transition_probabilities_unseen
# ------------------------------------------------------------------------
# Infrequent and unknown words, conversion to 'UNK' ---
# ------------------------------------------------------------------------
def _infrequent_words(self, word_tag_pairs, TOO_FEW):
count_words = defaultdict(int)
for word, tag in word_tag_pairs:
count_words[word] += 1
count_infrequent = defaultdict(int)
for word, count in count_words.items():
if count <= TOO_FEW:
count_infrequent[word] += count
word_tag_pairs_UNK = []
for word, tag in word_tag_pairs:
if word in count_infrequent:
word = 'UNK'
word_tag_pairs_UNK += [ ( word, tag ) ]
return count_words, count_infrequent, word_tag_pairs_UNK
def _unknown_word_tags(self, word_tag_pairs, count_infrequent):
count_word_tag_pairs = defaultdict(int)
for word_tag in word_tag_pairs:
word, tag = word_tag
count_word_tag_pairs[word_tag] += 1
count_word_tag_pairs_UNK = count_word_tag_pairs.copy()
for word_tag, count in count_word_tag_pairs.items():
word, tag = word_tag
if word in count_infrequent:
count_word_tag_pairs_UNK[('UNK', tag,)] += count
del count_word_tag_pairs_UNK[word_tag]
return count_word_tag_pairs_UNK
# ------------------------------------------------------------------------
# Sentences, words, tags and counts ---
# ------------------------------------------------------------------------
def _counts_from_word_tag_pairs(self, word_tag_pairs):
count_word_tags = defaultdict(int)
count_tag_unigrams = defaultdict(int)
count_tag_bigrams = defaultdict(int)
tag_prev = None
for pair in word_tag_pairs:
word, tag = pair
count_word_tags[pair] += 1
tag_unigram = ( tag, )
count_tag_unigrams[tag_unigram] += 1
if tag_prev != None:
tag_bigram = ( tag_prev, tag, )
count_tag_bigrams[tag_bigram] += 1
tag_prev = tag
return count_word_tags, count_tag_unigrams, count_tag_bigrams
def _tags_from_sentences(self, sents):
p = re.compile(r'(\S+)/(\S+)')
word_tag_pairs = []
for sent in sents:
pairs_in_sent = [ (word.lower(), tag) for word, tag in p.findall(sent) ]
word_tag_pairs += [ ( TOK_SS, TAG_SS, ) ] # Start of sentence
word_tag_pairs += pairs_in_sent # words and tags
word_tag_pairs += [ ( TOK_ES, TAG_ES, ) ] # End of sentence
return word_tag_pairs
def _tagged_sentences_from_file(self, dirPath, fnx):
fnxPath = os.path.join(dirPath, fnx)
re_nl = re.compile(r'\n')
re_sb = re.compile(r'( )+')
sents_in_file = []
with open(fnxPath) as f:
print(fnx)
for line in f:
nnl = re_nl.sub(' ', line) # '\n' -> ' '
sb = re_sb.sub(' ', nnl) # ' '+ -> ' '
if sb != ' ':
sents_in_file += [ sb ]
return sents_in_file
def _tagged_sentences_from_files(self, dirPath, files):
sents = []
for fnx in files:
fnx_sents = self._tagged_sentences_from_file(dirPath, fnx)
sents += fnx_sents
return sents
# ------------------------------------------------------------------------
# Class constructor and training ---
# ------------------------------------------------------------------------
def init(self, dirPath, TOO_FEW=5):
self.files = os.listdir(dirPath)
self.TOO_FEW = TOO_FEW
# sentences, word/tag pairs, counts
self.sents = self._tagged_sentences_from_files(dirPath, self.files)
self.word_tag_pairs = self._tags_from_sentences(self.sents)
# identify infrequent words and replace with ('UNK',tag) counts
self.count_words, self.count_infrequent, self.word_tag_pairs_UNK = \
self._infrequent_words(self.word_tag_pairs, self.TOO_FEW)
self.count_word_tag_pairs_UNK = \
self._unknown_word_tags(self.word_tag_pairs, self.count_infrequent)
# bigrams and counts, from word tag pairs with infrequent set to UNK
self.count_word_tags, self.count_tag_unigrams, self.count_tag_bigrams = \
self._counts_from_word_tag_pairs(self.word_tag_pairs_UNK)
# transition and emission probabilities
self.pTrans, self.pTagTrans, self.pTransUnseen = \
self._transition_probabilities( \
self.count_tag_unigrams, self.count_tag_bigrams)
self.pEmiss, self.pTagEmiss, self.pEmissUnseen = \
self._emission_probabilities( \
self.count_tag_unigrams, self.count_word_tags)
self.pEmUNK, self.pTagEmUNK, self.pEmUNKUnseen = \
self._emission_probabilities( \
self.count_tag_unigrams, self.count_word_tag_pairs_UNK)
# cumulative probabilities for random choosing
self.pCumTrans = self._cumulative_probabilities(self.pTagTrans)
self.pCumEmiss = self._cumulative_probabilities(self.pTagEmiss)
self.pCumEmUNK = self._cumulative_probabilities(self.pTagEmUNK)
def reset(self):
# ... over whole training set ...
self.files = None # List of files in training set
self.TOO_FEW = None # UNK if word count <= TOO_FEW
self.sents = None # List of sentences
self.tags = None # List of (word, tag) pairs
# counts ...
self.count_word_tags = None # { (w_i, t_i) : count, .. }
self.count_words = None # { w_i : count, ... }
self.count_infrequent = None # { (w_i, t_i) : count, ... }
self.count_word_tag_pairs_UNK = None # { (w_i, t_i) : count, ... }
self.count_tag_unigrams = None # { (t_i) : count, ... }
self.count_tag_bigrams = None # { (t_i-1, t_i) : count, ... }
# probabilities
self.pTrans = None # { (t_i-1, t_i) : P(t_i-1, t_i), ... }
self.pEmiss = None # { (w_i, t_i) : P(w_i | t_i), ... }
self.pEmUNK = None # { (w_i, t_i) : P(w_i | t_i), ... }
# conditional probabilities
self.pTagTrans = None # { t_i-1 : (t_i, P(t_i-1, t_i)), ... }
self.pTagEmiss = None # { t_i : (w_i, P(w_i | t_i)), ... }
self.pTagEmUNK = None # { t_i : (w_i, P(w_i | t_i)), ... }
# cumulative conditional probabilities
self.pCumTrans = None # { t_i-1 : [ (t_i, cP(t_i-1, t_i)) ], ... }
self.pCumEmiss = None # { t_i : [ (w_i, cP(w_i | t_i)) ], ... }
self.pCumEmUNK = None # { t_i : [ (w_i, cP(w_i | t_i)) ], ... }
def set_DEBUG(self, DEBUG=True):
self.DEBUG=DEBUG
def __init__(self, DEBUG=False):
self.set_DEBUG(DEBUG)
self.reset()
# ------------------------------------------------------------------------
# Sentence Generation ---
# ------------------------------------------------------------------------
def _assemble_sentence(self, swt):
sent = ""
sent_tagged = ""
ss = False
for word, tag in swt:
if tag == TAG_SS:
ss = True
elif tag != TAG_ES:
if tag == 'np' or ss:
word = word.capitalize()
ss = False
sent += word + ' '
sent_tagged += word + "/" + tag + ' '
if tag == TAG_ES:
sent = sent[:-1]
sent_tagged = sent_tagged[:-1]
return sent, sent_tagged
def generate_sentence(self, pTrans, pEmiss, pCumTrans, pCumEmiss):
swt = [] # sentence word/tag pairs
stp = [] # sentence transition probabilities
sep = [] # sentence emission probabilities
# start of sentence word and tag
word_tag = ( TOK_SS, TAG_SS, )
swt += [ word_tag ]
stp += [ 1.0 ]
sep += [ 1.0 ]
# Iterate choosing tags and words until end of sentence is chosen
next_word = None
next_tag = None
while next_word != TOK_ES:
# generate the next word/tag pair
word, tag = word_tag
tcps = pCumTrans[tag] # List of cumulative transition probabilities
next_tag_cumP = self._choose_by_probability(tcps)
next_tag, tagCumP = next_tag_cumP
ecps = pCumEmiss[next_tag] # List of cumulative emission probabilities
next_word_cumP = self._choose_by_probability(ecps)
next_word, wordCumP = next_word_cumP
# get the probabilities used
tp = pTrans[( tag, next_tag, )]
ep = pEmiss[( next_word, next_tag )]
# record word/tag pair
word_tag = ( next_word, next_tag )
swt += [ word_tag ]
stp += [ tp ]
sep += [ ep ]
# continue generating as long as the next word is not the end of sentence token
sent, sent_tagged = self._assemble_sentence(swt)
prob = np.prod(np.array(stp)) * np.prod(np.array(sep))
return swt, stp, sep, sent, sent_tagged, prob
# ------------------------------------------------------------------------
# Tests ---
# ------------------------------------------------------------------------
if __name__ == '__main__':
from datetime import datetime
nowStr = datetime.now().strftime("%B %d, %Y %I:%M:%S %p")
print("====" + nowStr + "====")
testPath = pathToyPOS
hmm = POS_HMM_BiGram()
TOO_FEW = 1
files = os.listdir(testPath)
fnx = files[-1]
print("--- ", fnx, " ---")
fnx_sents = hmm._tagged_sentences_from_file(testPath, fnx)
print("Len sentences:", len(fnx_sents))
print("First 5 sentences:", fnx_sents[:5])
print("last 5 sentences:", fnx_sents[-5:])
fnx_word_tag_pairs = hmm._tags_from_sentences(fnx_sents)
print("Len word tag pairs:", len(fnx_word_tag_pairs))
print("First 5 word tag pairs:", fnx_word_tag_pairs[:5])
print("Last 5 word tag pairs:", fnx_word_tag_pairs[-5:])
fnx_count_word_tags, fnx_count_tag_unigrams, fnx_count_tag_bigrams = \
hmm._counts_from_word_tag_pairs(fnx_word_tag_pairs)
fnx_count_word_tags_sum = sum([c for p, c in
fnx_count_word_tags.items()])
fnx_count_tag_unigrams_sum = sum([c for p, c in
fnx_count_tag_unigrams.items()])
fnx_count_tag_bigrams_sum = sum([c for p, c in
fnx_count_tag_bigrams.items()])
print("Sum counts in count word tag pairs =", fnx_count_word_tags_sum)
print("Length count word tag pairs:", len(fnx_count_word_tags))
print("First 5 count word tag pairs:", list(fnx_count_word_tags.items())[:5])
print("Last 5 count word tag pairs:", list(fnx_count_word_tags.items())[-5:])
print("Length count tag unigrams:", len(fnx_count_tag_unigrams))
print("Sum counts in count tag unigrams =", fnx_count_tag_unigrams_sum)
print("First 5 count tag unigrams:", list(fnx_count_tag_unigrams.items())[:5])
print("Last 5 count tag unigrams:", list(fnx_count_tag_unigrams.items())[-5:])
print("Sum counts in count tag bigrams =", fnx_count_tag_bigrams_sum)
print("Length count tag bigrams:", len(fnx_count_tag_bigrams))
print("First 5 count tag bigrams:", list(fnx_count_tag_bigrams.items())[:5])
print("Last 5 count tag bigrams:", list(fnx_count_tag_bigrams.items())[-5:])
fnx_count_words, fnx_count_infrequent, fnx_word_tag_pairs_UNK = \
hmm._infrequent_words(fnx_word_tag_pairs, TOO_FEW)
fnx_count_words_sum = sum([c for p, c in
fnx_count_words.items()])
fnx_count_infrequent_sum = sum([c for p, c in
fnx_count_infrequent.items()])
print("Sum counts in count words =", fnx_count_words_sum)
print("Length count words:", len(fnx_count_words))
print("First 5 count words:", list(fnx_count_words.items())[:5])
print("Last 5 count words:", list(fnx_count_words.items())[-5:])
print("Sum counts in count infrequent words =", fnx_count_infrequent_sum)
print("Length count infrequent words:", len(fnx_count_infrequent))
print("First 5 count infrequent words:", list(fnx_count_infrequent.items())[:5])
print("Last 5 count infrequent words:", list(fnx_count_infrequent.items())[-5:])
fnx_count_word_tags, fnx_count_tag_unigrams, fnx_count_tag_bigrams = \
hmm._counts_from_word_tag_pairs(fnx_word_tag_pairs_UNK)
fnx_count_word_tag_pairs_UNK = \
hmm._unknown_word_tags(fnx_word_tag_pairs, fnx_count_infrequent)
fnx_count_word_tag_pairs_UNK_sum = sum([c for p, c in
fnx_count_word_tag_pairs_UNK.items()])
print("Sum counts in count word tags UNK =", fnx_count_word_tag_pairs_UNK_sum)
print("Length count word tags UNK:", len(fnx_count_word_tag_pairs_UNK))
print("First 5 count word tags UNK:", list(fnx_count_word_tag_pairs_UNK.items())[:5])
print("Last 5 count word tags UNK:", list(fnx_count_word_tag_pairs_UNK.items())[-5:])
fnx_pTrans, fnx_pTagTrans, fnx_pTransUnseen = \
hmm._transition_probabilities(fnx_count_tag_unigrams, fnx_count_tag_bigrams)
print("Length transition probabilities:", len(fnx_pTrans))
print("First 5 transition probabilities:", list(fnx_pTrans.items())[:5])
print("Last 5 transition probabilities:", list(fnx_pTrans.items())[-5:])
print("Length tag transition probabilities:", len(fnx_pTagTrans))
print("First 5 tag transition probabilities:", list(fnx_pTagTrans.items())[:5])
print("Last 5 tag transition probabilities:", list(fnx_pTagTrans.items())[-5:])
fnx_pEmiss, fnx_pTagEmiss, fnx_pEmissUnseen = \
hmm._emission_probabilities(fnx_count_tag_unigrams, fnx_count_word_tags)
print("Length emission probabilities:", len(fnx_pEmiss))
print("First 5 emission probabilities:", list(fnx_pEmiss.items())[:5])
print("Last 5 emission probabilities:", list(fnx_pEmiss.items())[-5:])
print("Length tag emission probabilities:", len(fnx_pTagEmiss))
print("First 5 tag emission probabilities:", list(fnx_pTagEmiss.items())[:5])
print("Last 5 tag emission probabilities:", list(fnx_pTagEmiss.items())[-5:])
fnx_pEmUNK, fnx_pTagEmUNK, fnx_pEmUNKUnknown = \
hmm._emission_probabilities(fnx_count_tag_unigrams, fnx_count_word_tag_pairs_UNK)
print("Length emission probabilities UNK:", len(fnx_pEmUNK))
print("First 5 emission probabilities UNK:", list(fnx_pEmUNK.items())[:5])
print("Last 5 emission probabilities UNK:", list(fnx_pEmUNK.items())[-5:])
print("Length tag emission probabilities UNK:", len(fnx_pTagEmUNK))
print("First 5 tag emission probabilities UNK:", list(fnx_pTagEmUNK.items())[:5])
print("Last 5 tag emission probabilities UNK:", list(fnx_pTagEmUNK.items())[-5:])
fnx_pCumTrans = hmm._cumulative_probabilities(fnx_pTagTrans)
print("Length cumulative tag transition probabilities UNK:", len(fnx_pCumTrans))
print("First 5 cumulative tag transition probabilities UNK:", list(fnx_pCumTrans.items())[:5])
print("Last 5 cumulative tag transition probabilities UNK:", list(fnx_pCumTrans.items())[-5:])
fnx_pCumEmiss = hmm._cumulative_probabilities(fnx_pTagEmiss)
print("Length cumulative tag emission probabilities:", len(fnx_pCumEmiss))
print("First 5 cumulative tag emission probabilities:", list(fnx_pCumEmiss.items())[:5])
print("Last 5 cumulative tag emission probabilities:", list(fnx_pCumEmiss.items())[-5:])
fnx_pCumEmUNK = hmm._cumulative_probabilities(fnx_pTagEmUNK)
print("Length cumulative tag emission probabilities UNK:", len(fnx_pCumEmUNK))
print("First 5 cumulative tag emission probabilities UNK:", list(fnx_pCumEmUNK.items())[:5])
print("Last 5 cumulative tag emission probabilities UNK:", list(fnx_pCumEmUNK.items())[-5:])
nowStr = datetime.now().strftime("%B %d, %Y %I:%M:%S %p")
print("====" + nowStr + "====")
print("Randomly generated characters ...")
cps = [ ('a', 0.5), ('b', 0.6), ('c', 0.8), ('d', 0.95), ('e', 1.0) ]
print(cps)
sent = ""
for i in range(30):
char_prob = hmm._choose_by_probability(cps)
char, prob = char_prob
sent += char
print(char_prob, end='')
print()
print(sent)
nowStr = datetime.now().strftime("%B %d, %Y %I:%M:%S %p")
print("====" + nowStr + "====")
print("Randomly generated sentences ...")
swp = stp = sep = sent = sent_tagged = prob = None
for i in range(5):
print("--- %d ---" % i)
swt, stp, sep, sent, sent_tagged, prob = hmm.generate_sentence( \
fnx_pTrans, fnx_pEmiss, fnx_pCumTrans, fnx_pCumEmiss)
print("SWT---")
print(swt)
print("STP---")
print(stp)
print("SEP---")
print(sep)
print("SENTENCE ---")
print(sent)
print("TAGGED SENTENCE ---")
print(sent_tagged)
print("Sentence probability---")
print(prob)
nowStr = datetime.now().strftime("%B %d, %Y %I:%M:%S %p")
print("====" + nowStr + "====")
testPath = pathToyPOS
print("Test with all file in %s -----" % testPath)
hmm.init(testPath, TOO_FEW=5)
nowStr = datetime.now().strftime("%B %d, %Y %I:%M:%S %p")
print("====" + nowStr + "====")
print("Randomly generated sentences ...")
swp = stp = sep = sent = sent_tagged = prob = None
for i in range(5):
print("--- %d ---" % i)
swt, stp, sep, sent, sent_tagged, prob = hmm.generate_sentence( \
fnx_pTrans, fnx_pEmiss, fnx_pCumTrans, fnx_pCumEmiss)
print("SWT---")
print(swt)
print("STP---")
print(stp)
print("SEP---")
print(sep)
print("SENTENCE ---")
print(sent)
print("TAGGED SENTENCE ---")
print(sent_tagged)
print("Sentence probability---")
print(prob)
nowStr = datetime.now().strftime("%B %d, %Y %I:%M:%S %p")
print("====" + nowStr + "====")
testPath = pathBrownData
print("Test with all file in %s -----" % testPath)
hmm.init(testPath, TOO_FEW=5)
nowStr = datetime.now().strftime("%B %d, %Y %I:%M:%S %p")
print("====" + nowStr + "====")
print("Randomly generated sentences ...")
swp = stp = sep = sent = sent_tagged = prob = None
for i in range(5):
print("--- %d ---" % i)
swt, stp, sep, sent, sent_tagged, prob = hmm.generate_sentence( \
fnx_pTrans, fnx_pEmiss, fnx_pCumTrans, fnx_pCumEmiss)
print("SWT---")
print(swt)
print("STP---")
print(stp)
print("SEP---")
print(sep)
print("SENTENCE ---")
print(sent)
print("TAGGED SENTENCE ---")
print(sent_tagged)
print("Sentence probability---")
print(prob)
nowStr = datetime.now().strftime("%B %d, %Y %I:%M:%S %p")
print("====" + nowStr + "====")
| true
| true
|
1c42b4fe252a2b8d7e32eecf685d315cd9c26d3f
| 7,458
|
py
|
Python
|
api/src/shallowflow/api/storage.py
|
waikato-datamining/shallow-flow
|
3f1d99921e5138598eb164edeb1d23e6f199501c
|
[
"MIT"
] | null | null | null |
api/src/shallowflow/api/storage.py
|
waikato-datamining/shallow-flow
|
3f1d99921e5138598eb164edeb1d23e6f199501c
|
[
"MIT"
] | 2
|
2021-08-18T22:00:08.000Z
|
2021-08-18T22:00:47.000Z
|
api/src/shallowflow/api/storage.py
|
waikato-datamining/shallowflow
|
3f1d99921e5138598eb164edeb1d23e6f199501c
|
[
"MIT"
] | null | null | null |
from .serialization.vars import AbstractStringReader, add_string_reader
STORAGE_EVENT_ADDED = "added"
STORAGE_EVENT_UPDATED = "updated"
STORAGE_EVENT_DELETED = "deleted"
STORAGE_EVENT_CLEARED = "cleared"
STORAGE_EVENTS = [
STORAGE_EVENT_ADDED,
STORAGE_EVENT_UPDATED,
STORAGE_EVENT_DELETED,
STORAGE_EVENT_CLEARED,
]
VALID_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_"
def is_valid_name(s):
"""
Checks whether the string is a valid storage name.
:param s: the string to check
:type s: str
:return: True if valid
:rtype: bool
"""
for i in range(len(s)):
if s[i] not in VALID_CHARS:
return False
return True
class StorageName(str):
"""
Class that enforces correct storage names.
"""
def __new__(cls, s):
if not is_valid_name(s):
raise Exception("Invalid variable name: %s" % s)
return super().__new__(cls, s)
class StorageNameStringReader(AbstractStringReader):
"""
Turns strings into StorageName objects.
"""
def handles(self, cls):
"""
Whether it can convert a string into the specified class.
:param cls: the class to convert to
:type cls: type
:return: True if it can handle it
"""
return issubclass(cls, StorageName)
def convert(self, s, base_type=None):
"""
Turns the string into an object.
:param s: the string to convert
:type s: str
:param base_type: optional type when reconstructing lists etc
:return: the generated object
"""
return StorageName(s)
class StorageChangeEvent(object):
"""
Event that gets sent out if storage changes.
"""
def __init__(self, storage, event_type, key=None):
"""
Initializes the event.
:param storage: the affected storage
:type storage: Storage
:param event_type: the event type
:type event_type: str
:param key: the affected key
:type key: str
"""
if (event_type is not None) and (event_type not in STORAGE_EVENTS):
raise Exception("Invalid storage event type: %s" % event_type)
self.storage = storage
self.event_type = event_type
self.key = key
class StorageChangeListener(object):
"""
Interface for classes that listen to storage change events.
"""
def storage_changed(self, event):
"""
Gets called when the storage changes.
:param event: the event
:type event: StorageChangeEvent
"""
raise NotImplemented()
class Storage(object):
"""
Manages the storage.
"""
def __init__(self):
"""
Initializes the storage.
"""
self._data = dict()
self._listeners = set()
def add_listener(self, l):
"""
Adds the listener for events.
:param l: the listener to add
:type l: StorageChangeListener
:return: itself
:rtype: Storage
"""
self._listeners.add(l)
return self
def remove_listener(self, l):
"""
Removes the specified listener.
:param l: the listener to remove
:type l: StorageChangeListener
:return: itself
:rtype: Storage
"""
self._listeners.remove(l)
return self
def clear_listeners(self):
"""
Removes all listeners.
:return: itself
:rtype: Storage
"""
self._listeners.clear()
return self
def clear(self):
"""
Removes all stored items.
:return: itself
:rtype: Storage
"""
self._data.clear()
self._notify_listeners(StorageChangeEvent(self, STORAGE_EVENT_CLEARED))
return self
def has(self, key):
"""
Checks whether a storage item is available for the name.
:param key: the storage name to look up
:type key: str
:return: True if available
:rtype: bool
"""
if not is_valid_name(key):
raise Exception("Invalid storage name: %s" + key)
return key in self._data
def set(self, key, value):
"""
Adds the specified storage item.
:param key: the key for the item
:type key: str
:param value: the value to store
:type value: object
:return: itself
:rtype: Storage
"""
if not is_valid_name(key):
raise Exception("Invalid storage name: %s" + key)
if key not in self._data:
self._data[key] = value
self._notify_listeners(StorageChangeEvent(self, STORAGE_EVENT_ADDED, key))
else:
self._data[key] = value
self._notify_listeners(StorageChangeEvent(self, STORAGE_EVENT_UPDATED, key))
return self
def get(self, key):
"""
Returns the storage value.
:param key: the key to get the value for
:type key: str
:return: the storage value, None if not available
:rtype: object
"""
if not is_valid_name(key):
raise Exception("Invalid storage name: %s" + key)
if key in self._data:
return self._data[key]
else:
return None
def remove(self, key):
"""
Removes the storage value.
:param key: the name of the value to remove
:type key: str
:return: itself
:rtype: Storage
"""
if not is_valid_name(key):
raise Exception("Invalid storage name: %s" + key)
if key in self._data:
del self._data[key]
self._notify_listeners(StorageChangeEvent(self, STORAGE_EVENT_DELETED, key))
return self
def keys(self):
"""
Returns all the names of the currently stored items.
:return: the set of names
:rtype: set
"""
return self._data.keys()
def merge(self, storage):
"""
Incorporates the supplied storage (replaces any existing ones).
:param storage: the variables to merge
:type storage: Storage
:return: itself
:rtype: Storage
"""
for key in storage.keys():
self.set(key, storage.get(key))
return self
def _notify_listeners(self, event):
"""
Notifies all listeners with the event.
:param event: the event to send
:type event: StorageChangeEvent
"""
for l in self._listeners:
l.variables_changed(event)
def __str__(self):
"""
Returns a string representation of the stored items.
:return: the stored items
:rtype: str
"""
return str(self._data)
class StorageHandler(object):
"""
Interface for classes that manage storage.
"""
@property
def storage(self):
"""
Returns the storage.
:return: the storage
:rtype: Storage
"""
raise NotImplemented()
class StorageUser(object):
"""
Interface for classes that use storage.
"""
@property
def uses_storage(self):
"""
Returns whether storage is used.
:return: True if used
:rtype: bool
"""
raise NotImplemented()
# serialization
add_string_reader(StorageNameStringReader)
| 24.135922
| 88
| 0.581121
|
from .serialization.vars import AbstractStringReader, add_string_reader
STORAGE_EVENT_ADDED = "added"
STORAGE_EVENT_UPDATED = "updated"
STORAGE_EVENT_DELETED = "deleted"
STORAGE_EVENT_CLEARED = "cleared"
STORAGE_EVENTS = [
STORAGE_EVENT_ADDED,
STORAGE_EVENT_UPDATED,
STORAGE_EVENT_DELETED,
STORAGE_EVENT_CLEARED,
]
VALID_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_"
def is_valid_name(s):
for i in range(len(s)):
if s[i] not in VALID_CHARS:
return False
return True
class StorageName(str):
def __new__(cls, s):
if not is_valid_name(s):
raise Exception("Invalid variable name: %s" % s)
return super().__new__(cls, s)
class StorageNameStringReader(AbstractStringReader):
def handles(self, cls):
return issubclass(cls, StorageName)
def convert(self, s, base_type=None):
return StorageName(s)
class StorageChangeEvent(object):
def __init__(self, storage, event_type, key=None):
if (event_type is not None) and (event_type not in STORAGE_EVENTS):
raise Exception("Invalid storage event type: %s" % event_type)
self.storage = storage
self.event_type = event_type
self.key = key
class StorageChangeListener(object):
def storage_changed(self, event):
raise NotImplemented()
class Storage(object):
def __init__(self):
self._data = dict()
self._listeners = set()
def add_listener(self, l):
self._listeners.add(l)
return self
def remove_listener(self, l):
self._listeners.remove(l)
return self
def clear_listeners(self):
self._listeners.clear()
return self
def clear(self):
self._data.clear()
self._notify_listeners(StorageChangeEvent(self, STORAGE_EVENT_CLEARED))
return self
def has(self, key):
if not is_valid_name(key):
raise Exception("Invalid storage name: %s" + key)
return key in self._data
def set(self, key, value):
if not is_valid_name(key):
raise Exception("Invalid storage name: %s" + key)
if key not in self._data:
self._data[key] = value
self._notify_listeners(StorageChangeEvent(self, STORAGE_EVENT_ADDED, key))
else:
self._data[key] = value
self._notify_listeners(StorageChangeEvent(self, STORAGE_EVENT_UPDATED, key))
return self
def get(self, key):
if not is_valid_name(key):
raise Exception("Invalid storage name: %s" + key)
if key in self._data:
return self._data[key]
else:
return None
def remove(self, key):
if not is_valid_name(key):
raise Exception("Invalid storage name: %s" + key)
if key in self._data:
del self._data[key]
self._notify_listeners(StorageChangeEvent(self, STORAGE_EVENT_DELETED, key))
return self
def keys(self):
return self._data.keys()
def merge(self, storage):
for key in storage.keys():
self.set(key, storage.get(key))
return self
def _notify_listeners(self, event):
for l in self._listeners:
l.variables_changed(event)
def __str__(self):
return str(self._data)
class StorageHandler(object):
@property
def storage(self):
raise NotImplemented()
class StorageUser(object):
@property
def uses_storage(self):
raise NotImplemented()
add_string_reader(StorageNameStringReader)
| true
| true
|
1c42b50300a237606c6e96e351bb8643bd3bedc4
| 315
|
py
|
Python
|
Learning/Test16_KeywordArguments.py
|
liang1024/Python
|
a80127500f7a171567e32699f42128f3ddc44b3f
|
[
"Apache-2.0"
] | 1
|
2017-03-07T13:49:27.000Z
|
2017-03-07T13:49:27.000Z
|
Learning/Test16_KeywordArguments.py
|
liang1024/Python
|
a80127500f7a171567e32699f42128f3ddc44b3f
|
[
"Apache-2.0"
] | null | null | null |
Learning/Test16_KeywordArguments.py
|
liang1024/Python
|
a80127500f7a171567e32699f42128f3ddc44b3f
|
[
"Apache-2.0"
] | null | null | null |
'''
Keyword Arguments :参数
'''
def dumb_sentence(name='Bucky', action='ate', item='tuna'):
print(name, action, item)
dumb_sentence()
dumb_sentence("Sally", "farts", "gently")
# 给固定的参数赋值
dumb_sentence(item="awesome")
dumb_sentence(item="awesome", action="is")
dumb_sentence("哈哈")
dumb_sentence("", "", "哈哈")
| 17.5
| 59
| 0.68254
|
def dumb_sentence(name='Bucky', action='ate', item='tuna'):
print(name, action, item)
dumb_sentence()
dumb_sentence("Sally", "farts", "gently")
dumb_sentence(item="awesome")
dumb_sentence(item="awesome", action="is")
dumb_sentence("哈哈")
dumb_sentence("", "", "哈哈")
| true
| true
|
1c42b6cacbed951c3e9b79ad8a47fceae615b25a
| 1,252
|
py
|
Python
|
ict/Interface.py
|
sclel016/ict_py
|
a5333b4a2a882ea64ae88825118e0ab0cc734b67
|
[
"MIT"
] | null | null | null |
ict/Interface.py
|
sclel016/ict_py
|
a5333b4a2a882ea64ae88825118e0ab0cc734b67
|
[
"MIT"
] | null | null | null |
ict/Interface.py
|
sclel016/ict_py
|
a5333b4a2a882ea64ae88825118e0ab0cc734b67
|
[
"MIT"
] | null | null | null |
import pyvisa
import re
class Interface:
inst = ''
ip = ''
ident = ''
rm = pyvisa.ResourceManager()
def __init__(self,ip):
self.ip = ip
self.inst = self.rm.open_resource('TCPIP0::%s::INSTR' % self.ip)
self.ident = self.inst.query("*IDN?")
def query(self,cmd):
return self.inst.query(cmd)
def write(self,cmd):
print(cmd)
self.inst.write(cmd)
def read(self,*args,**kwargs):
return self.inst.query_ascii_values(*args,**kwargs)
def write_binary_values(self,*args,**kwargs):
self.inst.write_binary_values(*args,**kwargs)
def read(self,*args,**kwargs):
return self.inst.read(*args,**kwargs)
def query_binary_values(self,*args,**kwargs):
return self.inst.query_binary_values(*args,**kwargs)
def query_ascii_values(self,*args,**kwargs):
return self.inst.query_ascii_values(*args,**kwargs)
def read_raw(self,*args,**kwargs):
return self.inst.read_raw(*args,**kwargs)
def read_bytes(self,*args,**kwargs):
return self.inst.read_bytes(*args,**kwargs)
def parse_sci(self,in_str):
expr = r"[+-]?\d+\.\d+([eE][+-]?\d+)?"
return float(re.search(expr, in_str).group())
| 23.185185
| 72
| 0.610224
|
import pyvisa
import re
class Interface:
inst = ''
ip = ''
ident = ''
rm = pyvisa.ResourceManager()
def __init__(self,ip):
self.ip = ip
self.inst = self.rm.open_resource('TCPIP0::%s::INSTR' % self.ip)
self.ident = self.inst.query("*IDN?")
def query(self,cmd):
return self.inst.query(cmd)
def write(self,cmd):
print(cmd)
self.inst.write(cmd)
def read(self,*args,**kwargs):
return self.inst.query_ascii_values(*args,**kwargs)
def write_binary_values(self,*args,**kwargs):
self.inst.write_binary_values(*args,**kwargs)
def read(self,*args,**kwargs):
return self.inst.read(*args,**kwargs)
def query_binary_values(self,*args,**kwargs):
return self.inst.query_binary_values(*args,**kwargs)
def query_ascii_values(self,*args,**kwargs):
return self.inst.query_ascii_values(*args,**kwargs)
def read_raw(self,*args,**kwargs):
return self.inst.read_raw(*args,**kwargs)
def read_bytes(self,*args,**kwargs):
return self.inst.read_bytes(*args,**kwargs)
def parse_sci(self,in_str):
expr = r"[+-]?\d+\.\d+([eE][+-]?\d+)?"
return float(re.search(expr, in_str).group())
| true
| true
|
1c42b7c5508775b934a83eceaf7f1a2496db8071
| 1,379
|
py
|
Python
|
xlsxwriter/test/comparison/test_chart_axis05.py
|
eddiechapman/XlsxWriter
|
c636117ab30e64e4b7b824c9105595c42887c2c9
|
[
"BSD-2-Clause-FreeBSD"
] | 2,766
|
2015-01-02T17:36:42.000Z
|
2022-03-31T09:23:30.000Z
|
xlsxwriter/test/comparison/test_chart_axis05.py
|
xiaolanmeng86/XlsxWriter
|
6c3ea23a410e8216eab8f5751e5544ffb444b3da
|
[
"BSD-2-Clause-FreeBSD"
] | 683
|
2015-01-03T09:55:02.000Z
|
2022-03-31T07:18:15.000Z
|
xlsxwriter/test/comparison/test_chart_axis05.py
|
xiaolanmeng86/XlsxWriter
|
6c3ea23a410e8216eab8f5751e5544ffb444b3da
|
[
"BSD-2-Clause-FreeBSD"
] | 636
|
2015-01-05T01:57:08.000Z
|
2022-03-25T18:42:41.000Z
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_axis05.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [47076480, 47078016]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_x_axis({'name': 'XXX'})
chart.set_y_axis({'name': 'YYY'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 25.072727
| 79
| 0.5562
| true
| true
|
|
1c42b967ac3297973c93f17f3c6acb4ba1b51b04
| 3,975
|
py
|
Python
|
metricbeat/tests/system/test_base.py
|
kemokemo/beats
|
dda9f353f1203da243bd76baf53d2e83b6f26c1a
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2020-11-17T06:29:30.000Z
|
2021-08-08T11:56:01.000Z
|
metricbeat/tests/system/test_base.py
|
kemokemo/beats
|
dda9f353f1203da243bd76baf53d2e83b6f26c1a
|
[
"ECL-2.0",
"Apache-2.0"
] | 36
|
2021-02-02T14:18:40.000Z
|
2022-03-20T15:07:30.000Z
|
metricbeat/tests/system/test_base.py
|
kemokemo/beats
|
dda9f353f1203da243bd76baf53d2e83b6f26c1a
|
[
"ECL-2.0",
"Apache-2.0"
] | 6
|
2021-03-10T05:38:32.000Z
|
2021-08-16T13:11:19.000Z
|
import os
import pytest
import re
import shutil
import sys
import unittest
from metricbeat import BaseTest
from beat.beat import INTEGRATION_TESTS
from beat import common_tests
from elasticsearch import Elasticsearch
class Test(BaseTest, common_tests.TestExportsMixin):
COMPOSE_SERVICES = ['elasticsearch', 'kibana']
@unittest.skipUnless(re.match("(?i)win|linux|darwin|freebsd|openbsd", sys.platform), "os")
def test_start_stop(self):
"""
Metricbeat starts and stops without error.
"""
self.render_config_template(modules=[{
"name": "system",
"metricsets": ["cpu"],
"period": "5s"
}])
proc = self.start_beat()
self.wait_until(lambda: self.log_contains("start running"))
proc.check_kill_and_wait()
self.assert_no_logged_warnings()
# Ensure all Beater stages are used.
assert self.log_contains("Setup Beat: metricbeat")
assert self.log_contains("metricbeat start running")
assert self.log_contains("metricbeat stopped")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_template(self):
"""
Test that the template can be loaded with `setup --template`
"""
es = Elasticsearch([self.get_elasticsearch_url()])
self.render_config_template(
modules=[{
"name": "apache",
"metricsets": ["status"],
"hosts": ["localhost"],
}],
elasticsearch={"host": self.get_elasticsearch_url()},
)
exit_code = self.run_beat(extra_args=["setup", "--template", "-E", "setup.template.overwrite=true"])
assert exit_code == 0
assert self.log_contains('Loaded index template')
assert len(es.cat.templates(name='metricbeat-*', h='name')) > 0
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
@pytest.mark.timeout(180, func_only=True)
def test_dashboards(self):
"""
Test that the dashboards can be loaded with `setup --dashboards`
"""
shutil.copytree(self.kibana_dir(), os.path.join(self.working_dir, "kibana"))
es = Elasticsearch([self.get_elasticsearch_url()])
self.render_config_template(
modules=[{
"name": "apache",
"metricsets": ["status"],
"hosts": ["localhost"],
}],
elasticsearch={"host": self.get_elasticsearch_url()},
kibana={"host": self.get_kibana_url()},
)
exit_code = self.run_beat(extra_args=["setup", "--dashboards"])
assert exit_code == 0, 'Error output: ' + self.get_log()
assert self.log_contains("Kibana dashboards successfully loaded.")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_migration(self):
"""
Test that the template loads when migration is enabled
"""
es = Elasticsearch([self.get_elasticsearch_url()])
self.render_config_template(
modules=[{
"name": "apache",
"metricsets": ["status"],
"hosts": ["localhost"],
}],
elasticsearch={"host": self.get_elasticsearch_url()},
)
exit_code = self.run_beat(extra_args=["setup", "--template",
"-E", "setup.template.overwrite=true", "-E", "migration.6_to_7.enabled=true"])
assert exit_code == 0
assert self.log_contains('Loaded index template')
assert len(es.cat.templates(name='metricbeat-*', h='name')) > 0
def get_elasticsearch_url(self):
return "http://" + self.compose_host("elasticsearch")
def get_kibana_url(self):
"""
Returns kibana host URL
"""
return "http://" + self.compose_host("kibana")
def kibana_dir(self):
return os.path.join(self.beat_path, "build", "kibana")
| 34.565217
| 124
| 0.597484
|
import os
import pytest
import re
import shutil
import sys
import unittest
from metricbeat import BaseTest
from beat.beat import INTEGRATION_TESTS
from beat import common_tests
from elasticsearch import Elasticsearch
class Test(BaseTest, common_tests.TestExportsMixin):
COMPOSE_SERVICES = ['elasticsearch', 'kibana']
@unittest.skipUnless(re.match("(?i)win|linux|darwin|freebsd|openbsd", sys.platform), "os")
def test_start_stop(self):
self.render_config_template(modules=[{
"name": "system",
"metricsets": ["cpu"],
"period": "5s"
}])
proc = self.start_beat()
self.wait_until(lambda: self.log_contains("start running"))
proc.check_kill_and_wait()
self.assert_no_logged_warnings()
assert self.log_contains("Setup Beat: metricbeat")
assert self.log_contains("metricbeat start running")
assert self.log_contains("metricbeat stopped")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_template(self):
es = Elasticsearch([self.get_elasticsearch_url()])
self.render_config_template(
modules=[{
"name": "apache",
"metricsets": ["status"],
"hosts": ["localhost"],
}],
elasticsearch={"host": self.get_elasticsearch_url()},
)
exit_code = self.run_beat(extra_args=["setup", "--template", "-E", "setup.template.overwrite=true"])
assert exit_code == 0
assert self.log_contains('Loaded index template')
assert len(es.cat.templates(name='metricbeat-*', h='name')) > 0
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
@pytest.mark.timeout(180, func_only=True)
def test_dashboards(self):
shutil.copytree(self.kibana_dir(), os.path.join(self.working_dir, "kibana"))
es = Elasticsearch([self.get_elasticsearch_url()])
self.render_config_template(
modules=[{
"name": "apache",
"metricsets": ["status"],
"hosts": ["localhost"],
}],
elasticsearch={"host": self.get_elasticsearch_url()},
kibana={"host": self.get_kibana_url()},
)
exit_code = self.run_beat(extra_args=["setup", "--dashboards"])
assert exit_code == 0, 'Error output: ' + self.get_log()
assert self.log_contains("Kibana dashboards successfully loaded.")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_migration(self):
es = Elasticsearch([self.get_elasticsearch_url()])
self.render_config_template(
modules=[{
"name": "apache",
"metricsets": ["status"],
"hosts": ["localhost"],
}],
elasticsearch={"host": self.get_elasticsearch_url()},
)
exit_code = self.run_beat(extra_args=["setup", "--template",
"-E", "setup.template.overwrite=true", "-E", "migration.6_to_7.enabled=true"])
assert exit_code == 0
assert self.log_contains('Loaded index template')
assert len(es.cat.templates(name='metricbeat-*', h='name')) > 0
def get_elasticsearch_url(self):
return "http://" + self.compose_host("elasticsearch")
def get_kibana_url(self):
return "http://" + self.compose_host("kibana")
def kibana_dir(self):
return os.path.join(self.beat_path, "build", "kibana")
| true
| true
|
1c42b9f7576667b34b41fe63bc2f0dbe7a514dda
| 60,347
|
py
|
Python
|
vivarium/core/experiment.py
|
U8NWXD/vivarium
|
19c6a4096fe94e3342e40ce03e6708c24dd38fa3
|
[
"MIT"
] | null | null | null |
vivarium/core/experiment.py
|
U8NWXD/vivarium
|
19c6a4096fe94e3342e40ce03e6708c24dd38fa3
|
[
"MIT"
] | null | null | null |
vivarium/core/experiment.py
|
U8NWXD/vivarium
|
19c6a4096fe94e3342e40ce03e6708c24dd38fa3
|
[
"MIT"
] | null | null | null |
"""
==========================================
Experiment, Compartment, and Store Classes
==========================================
"""
from __future__ import absolute_import, division, print_function
import os
import copy
import random
import datetime
import numpy as np
import logging as log
import pprint
pretty=pprint.PrettyPrinter(indent=2)
def pp(x):
pretty.pprint(x)
def pf(x):
return pretty.pformat(x)
from vivarium.library.units import Quantity
from vivarium.library.dict_utils import merge_dicts, deep_merge, deep_merge_check
from vivarium.core.emitter import get_emitter
from vivarium.core.process import Process
from vivarium.core.repository import (
divider_library,
updater_library,
deriver_library,
serializer_library,
)
INFINITY = float('inf')
VERBOSE = False
log.basicConfig(level=os.environ.get("LOGLEVEL", log.WARNING))
# Store
def key_for_value(d, looking):
found = None
for key, value in d.items():
if looking == value:
found = key
break
return found
def get_in(d, path):
if path:
head = path[0]
if head in d:
return get_in(d[head], path[1:])
else:
return d
def assoc_in(d, path, value):
if path:
return dict(d, **{path[0]: assoc_in(d.get(path[0], {}), path[1:], value)})
else:
return value
def assoc_path(d, path, value):
if path:
head = path[0]
if len(path) == 1:
d[head] = value
else:
if head not in d:
d[head] = {}
assoc_path(d[head], path[1:], value)
else:
value
def update_in(d, path, f):
if path:
head = path[0]
if len(path) == 1:
d[head] = f(d.get(head, None))
else:
if not head in d:
d[head] = {}
update_in(d[head], path[1:], f)
def dissoc(d, removing):
return {
key: value
for key, value in d.items()
if key not in removing}
def without(d, removing):
return {
key: value
for key, value in d.items()
if key != removing}
def schema_for(port, keys, initial_state, default=0.0, updater='accumulate'):
return {
key: {
'_default': initial_state.get(
port, {}).get(key, default),
'_updater': updater}
for key in keys}
def always_true(x):
return True
def identity(y):
return y
class Store(object):
"""Holds a subset of the overall model state
The total state of the model can be broken down into :term:`stores`,
each of which is represented by an instance of this `Store` class.
The store's state is a set of :term:`variables`, each of which is
defined by a set of :term:`schema key-value pairs`. The valid schema
keys are listed in :py:attr:`schema_keys`, and they are:
* **_default** (Type should match the variable value): The default
value of the variable.
* **_updater** (:py:class:`str`): The name of the :term:`updater` to
use. By default this is ``accumulate``.
* **_divider** (:py:class:`str`): The name of the :term:`divider` to
use.
* **_value** (Type should match the variable value): The current
value of the variable. This is ``None`` by default.
* **_properties** (:py:class:`dict`): Extra properties of the
variable that don't have a specific schema key. This is an empty
dictionary by default.
* **_emit** (:py:class:`bool`): Whether to emit the variable to the
:term:`emitter`. This is ``False`` by default.
"""
schema_keys = set([
'_default',
'_updater',
'_value',
'_properties',
'_emit',
'_serializer',
])
def __init__(self, config, outer=None, source=None):
self.outer = outer
self.inner = {}
self.subschema = {}
self.subtopology = {}
self.properties = {}
self.default = None
self.updater = None
self.value = None
self.units = None
self.divider = None
self.emit = False
self.sources = {}
self.deleted = False
self.leaf = False
self.serializer = None
self.apply_config(config, source)
def check_default(self, new_default):
if self.default is not None and new_default != self.default:
if new_default == 0 and self.default != 0:
log.info('_default schema conflict: {} and {}. selecting {}'.format(
self.default, new_default, self.default))
return self.default
else:
log.info('_default schema conflict: {} and {}. selecting {}'.format(
self.default, new_default, new_default))
return new_default
def check_value(self, new_value):
if self.value is not None and new_value != self.value:
raise Exception('_value schema conflict: {} and {}'.format(new_value, self.value))
return new_value
def merge_subtopology(self, subtopology):
self.subtopology = deep_merge(self.subtopology, subtopology)
def apply_subschema_config(self, subschema):
self.subschema = deep_merge(
self.subschema,
subschema)
def apply_config(self, config, source=None):
'''
Expand the tree by applying additional config.
Special keys for the config are:
* _default - Default value for this node.
* _properties - An arbitrary map of keys to values. This can be used
for any properties which exist outside of the operation of the
tree (like mass or energy).
* _updater - Which updater to use. Default is 'accumulate' which
adds the new value to the existing value, but 'set' is common
as well. You can also provide your own function here instead of
a string key into the updater library.
* _emit - whether or not to emit the values under this point in the tree.
* _divider - What to do with this node when division happens.
Default behavior is to leave it alone, but you can also pass
'split' here, or a function of your choosing. If you need other
values from the state you need to supply a dictionary here
containing the updater and the topology for where the other
state values are coming from. This has two keys:
* divider - a function that takes the existing value and any
values supplied from the adjoining topology.
* topology - a mapping of keys to paths where the value for
those keys will be found. This will be passed in as the second
argument to the divider function.
* _subschema/* - If this node was declared to house an unbounded set
of related states, the schema for these states is held in this
nodes subschema and applied whenever new subkeys are added here.
* _subtopology - The subschema is informed by the subtopology to
map the process perspective to the actual tree structure.
'''
if '*' in config:
self.apply_subschema_config(config['*'])
config = without(config, '*')
if '_subschema' in config:
if source:
self.sources[source] = config['_subschema']
self.apply_subschema_config(config['_subschema'])
config = without(config, '_subschema')
if '_subtopology' in config:
self.merge_subtopology(config['_subtopology'])
config = without(config, '_subtopology')
if '_divider' in config:
self.divider = config['_divider']
if isinstance(self.divider, str):
self.divider = divider_library[self.divider]
if isinstance(self.divider, dict) and isinstance(self.divider['divider'], str):
self.divider['divider'] = divider_library[self.divider['divider']]
config = without(config, '_divider')
if self.schema_keys & set(config.keys()):
if self.inner:
raise Exception('trying to assign leaf values to a branch at: {}'.format(self.path_for()))
self.leaf = True
# self.units = config.get('_units', self.units)
if '_serializer' in config:
self.serializer = config['_serializer']
if isinstance(self.serializer, str):
self.serializer = serializer_library[self.serializer]
if '_default' in config:
self.default = self.check_default(config.get('_default'))
if isinstance(self.default, Quantity):
self.units = self.default.units
if isinstance(self.default, np.ndarray):
self.serializer = self.serializer or serializer_library['numpy']
if '_value' in config:
self.value = self.check_value(config.get('_value'))
if isinstance(self.value, Quantity):
self.units = self.value.units
self.updater = config.get('_updater', self.updater or 'accumulate')
if isinstance(self.updater, str):
self.updater = updater_library[self.updater]
self.properties = deep_merge(
self.properties,
config.get('_properties', {}))
self.emit = config.get('_emit', self.emit)
if source:
self.sources[source] = config
else:
if self.leaf and config:
raise Exception('trying to assign create inner for leaf node: {}'.format(self.path_for()))
self.value = None
for key, child in config.items():
if key not in self.inner:
self.inner[key] = Store(child, outer=self, source=source)
else:
self.inner[key].apply_config(child, source=source)
def get_updater(self, update):
updater = self.updater
if '_updater' in update:
updater = update['_updater']
if isinstance(updater, str):
updater = updater_library[updater]
return updater
def get_config(self, sources=False):
'''
Assemble a dictionary representation of the config for this node.
A desired property is that the node can be exactly recreated by
applying the resulting config to an empty node again.
'''
config = {}
if self.properties:
config['_properties'] = self.properties
if self.subschema:
config['_subschema'] = self.subschema
if self.subtopology:
config['_subtopology'] = self.subtopology
if self.divider:
config['_divider'] = self.divider
if sources and self.sources:
config['_sources'] = self.sources
if self.inner:
child_config = {
key: child.get_config(sources)
for key, child in self.inner.items()}
config.update(child_config)
else:
config.update({
'_default': self.default,
'_value': self.value})
if self.updater:
config['_updater'] = self.updater
if self.units:
config['_units'] = self.units
if self.emit:
config['_emit'] = self.emit
return config
def top(self):
'''
Find the top of this tree.
'''
if self.outer:
return self.outer.top()
else:
return self
def path_for(self):
'''
Find the path to this node.
'''
if self.outer:
key = key_for_value(self.outer.inner, self)
above = self.outer.path_for()
return above + (key,)
else:
return tuple()
def get_value(self, condition=None, f=None):
'''
Pull the values out of the tree in a structure symmetrical to the tree.
'''
if self.inner:
if condition is None:
condition = always_true
if f is None:
f = identity
return {
key: f(child.get_value(condition, f))
for key, child in self.inner.items()
if condition(child)}
else:
if self.subschema:
return {}
else:
return self.value
def get_path(self, path):
'''
Get the node at the given path relative to this node.
'''
if path:
step = path[0]
if step == '..':
child = self.outer
else:
child = self.inner.get(step)
if child:
return child.get_path(path[1:])
else:
# TODO: more handling for bad paths?
return None
else:
return self
def get_paths(self, paths):
return {
key: self.get_path(path)
for key, path in paths.items()}
def get_values(self, paths):
return {
key: self.get_in(path)
for key, path in paths.items()}
def get_in(self, path):
return self.get_path(path).get_value()
def get_template(self, template):
"""
Pass in a template dict with None for each value you want to
retrieve from the tree!
"""
state = {}
for key, value in template.items():
child = self.inner[key]
if value is None:
state[key] = child.get_value()
else:
state[key] = child.get_template(value)
return state
def emit_data(self):
data = {}
if self.inner:
for key, child in self.inner.items():
child_data = child.emit_data()
if child_data is not None or child_data == 0:
data[key] = child_data
return data
else:
if self.emit:
if self.serializer:
return self.serializer.serialize(self.value)
elif isinstance(self.value, Process):
return self.value.pull_data()
else:
if self.units:
return self.value.to(self.units).magnitude
else:
return self.value
def mark_deleted(self):
'''
When nodes are removed from the tree, they are marked as deleted
in case something else has a reference to them.
'''
self.deleted = True
if self.inner:
for child in self.inner.values():
child.mark_deleted()
def delete_path(self, path):
'''
Delete the subtree at the given path.
'''
if not path:
self.inner = {}
self.value = None
return self
else:
target = self.get_path(path[:-1])
remove = path[-1]
if remove in target.inner:
lost = target.inner[remove]
del target.inner[remove]
lost.mark_deleted()
return lost
def divide_value(self):
'''
Apply the divider for each node to the value in that node to
assemble two parallel divided states of this subtree.
'''
if self.divider:
# divider is either a function or a dict with topology
if isinstance(self.divider, dict):
divider = self.divider['divider']
topology = self.divider['topology']
state = self.outer.get_values(topology)
return divider(self.get_value(), state)
else:
return self.divider(self.get_value())
elif self.inner:
daughters = [{}, {}]
for key, child in self.inner.items():
division = child.divide_value()
if division:
for daughter, divide in zip(daughters, division):
daughter[key] = divide
return daughters
def reduce(self, reducer, initial=None):
'''
Call the reducer on each node accumulating over the result.
'''
value = initial
for path, node in self.depth():
value = reducer(value, path, node)
return value
def reduce_to(self, path, reducer, initial=None):
value = self.reduce(reducer, initial)
assoc_path({}, path, value)
self.apply_update(update)
def set_value(self, value):
'''
Set the value for the given tree elements directly instead of using
the updaters from their nodes.
'''
if self.inner or self.subschema:
for child, inner_value in value.items():
if child not in self.inner:
if self.subschema:
self.inner[child] = Store(self.subschema, self)
else:
pass
# TODO: continue to ignore extra keys?
# print("setting value that doesn't exist in tree {} {}".format(
# child, inner_value))
if child in self.inner:
self.inner[child].set_value(inner_value)
else:
self.value = value
def apply_defaults(self):
'''
If value is None, set to default.
'''
if self.inner:
for child in self.inner.values():
child.apply_defaults()
else:
if self.value is None:
self.value = self.default
def apply_update(self, update):
'''
Given an arbitrary update, map all the values in that update
to their positions in the tree where they apply, and update
these values using each node's `_updater`.
There are five special update keys:
* `_updater` - Override the default updater with any updater you want.
* `_delete` - The value here is a list of paths to delete from
the tree.
* `_generate` - The value has four keys, which are essentially
the arguments to the `generate()` function:
* path - Path into the tree to generate this subtree.
* processes - Tree of processes to generate.
* topology - Connections of all the process's `ports_schema()`.
* initial_state - Initial state for this new subtree.
* `_divide` - Performs cell division by constructing two new
daugther cells and removing the mother. Takes a dict with two keys:
* mother - The id of the mother (for removal)
* daughters - List of two new daughter generate directives, of the
same form as the `_generate` value above.
* `_reduce` - This allows a reduction over the entire subtree from some
point downward. Its three keys are:
* from - What point to start the reduction.
* initial - The initial value of the reduction.
* reducer - A function of three arguments, which is called
on every node from the `from` point in the tree down:
* value - The current accumulated value of the reduction.
* path - The path to this point in the tree
* node - The actual node being visited.
This function returns the next `value` for the reduction.
The result of the reduction will be assigned to this
point in the tree.
'''
if self.inner or self.subschema:
topology_updates = {}
if '_delete' in update:
# delete a list of paths
for path in update['_delete']:
self.delete_path(path)
update = dissoc(update, ['_delete'])
if '_add' in update:
# add a list of sub-compartments
for added in update['_add']:
path = added['path']
state = added['state']
target = self.establish_path(path, {})
target.set_value(state)
self.apply_subschemas()
self.apply_defaults()
update = dissoc(update, ['_add'])
if '_generate' in update:
# generate a list of new compartments
for generate in update['_generate']:
self.generate(
generate['path'],
generate['processes'],
generate['topology'],
generate['initial_state'])
assoc_path(
topology_updates,
generate['path'],
generate['topology'])
self.apply_subschemas()
self.apply_defaults()
update = dissoc(update, '_generate')
if '_divide' in update:
# use dividers to find initial states for daughters
divide = update['_divide']
mother = divide['mother']
daughters = divide['daughters']
initial_state = self.inner[mother].get_value(
condition=lambda child: not (isinstance(child.value, Process)),
f=lambda child: copy.deepcopy(child))
states = self.inner[mother].divide_value()
for daughter, state in zip(daughters, states):
daughter_id = daughter['daughter']
# use initiapl state as default, merge in divided values
initial_state = deep_merge(
initial_state,
state)
self.generate(
daughter['path'],
daughter['processes'],
daughter['topology'],
daughter['initial_state'])
assoc_path(
topology_updates,
daughter['path'],
daughter['topology'])
self.apply_subschemas()
self.inner[daughter_id].set_value(initial_state)
self.apply_defaults()
self.delete_path((mother,))
update = dissoc(update, '_divide')
for key, value in update.items():
if key in self.inner:
child = self.inner[key]
inner_updates = child.apply_update(value)
if inner_updates:
topology_updates = deep_merge(
topology_updates,
{key: inner_updates})
elif self.subschema:
self.inner[key] = Store(self.subschema, self)
self.inner[key].set_value(value)
self.inner[key].apply_defaults()
return topology_updates
else:
if isinstance(update, dict) and '_reduce' in update:
reduction = update['_reduce']
top = self.get_path(reduction.get('from'))
update = top.reduce(
reduction['reducer'],
initial=reduction['initial'])
updater = self.updater
if (
isinstance(update, dict) and self.schema_keys & set(update.keys())
):
if '_updater' in update:
updater = self.get_updater(update)
update = update.get('_value', self.default)
self.value = updater(self.value, update)
def inner_value(self, key):
'''
Get the value of an inner state
'''
if key in self.inner:
return self.inner[key].get_value()
def topology_state(self, topology):
'''
Fill in the structure of the given topology with the values at all
the paths the topology points at. Essentially, anywhere in the topology
that has a tuple path will be filled in with the value at that path.
This is the inverse function of the standalone `inverse_topology`.
'''
state = {}
for key, path in topology.items():
if key == '*':
if isinstance(path, dict):
node, path = self.outer_path(path)
for child, child_node in node.inner.items():
state[child] = child_node.topology_state(path)
else:
node = self.get_path(path)
for child, child_node in node.inner.items():
state[child] = child_node.get_value()
elif isinstance(path, dict):
node, path = self.outer_path(path)
state[key] = node.topology_state(path)
else:
state[key] = self.get_path(path).get_value()
return state
def schema_topology(self, schema, topology):
'''
Fill in the structure of the given schema with the values located according
to the given topology.
'''
state = {}
if self.leaf:
state = self.get_value()
else:
for key, subschema in schema.items():
path = topology.get(key)
if key == '*':
if isinstance(path, dict):
node, path = self.outer_path(path)
for child, child_node in node.inner.items():
state[child] = child_node.schema_topology(subschema, path)
else:
node = self.get_path(path)
for child, child_node in node.inner.items():
state[child] = child_node.schema_topology(subschema, {})
elif key == '_divider':
pass
elif isinstance(path, dict):
node, path = self.outer_path(path)
state[key] = node.schema_topology(subschema, path)
else:
if path is None:
path = (key,)
node = self.get_path(path)
state[key] = node.schema_topology(subschema, {})
return state
def state_for(self, path, keys):
'''
Get the value of a state at a given path
'''
state = self.get_path(path)
if state is None:
return {}
elif keys and keys[0] == '*':
return state.get_value()
else:
return {
key: state.inner_value(key)
for key in keys}
def depth(self, path=()):
'''
Create a mapping of every path in the tree to the node living at
that path in the tree.
'''
base = [(path, self)]
for key, child in self.inner.items():
down = tuple(path + (key,))
base += child.depth(down)
return base
def processes(self, path=()):
return {
path: state
for path, state in self.depth()
if state.value and isinstance(state.value, Process)}
def apply_subschema(self, subschema=None, subtopology=None, source=None):
'''
Apply a subschema to all inner nodes (either provided or from this
node's personal subschema) as governed by the given/personal
subtopology.
'''
if subschema is None:
subschema = self.subschema
if subtopology is None:
subtopology = self.subtopology or {}
inner = list(self.inner.items())
for child_key, child in inner:
child.topology_ports(
subschema,
subtopology,
source=self.path_for() + ('*',))
def apply_subschemas(self):
'''
Apply all subschemas from all nodes at this point or lower in the tree.
'''
if self.subschema:
self.apply_subschema()
for child in self.inner.values():
child.apply_subschemas()
def update_subschema(self, path, subschema):
'''
Merge a new subschema into an existing subschema at the given path.
'''
target = self.get_path(path)
if target.subschema is None:
target.subschema = subschema
else:
target.subschema = deep_merge(
target.subschema,
subschema)
return target
def establish_path(self, path, config, source=None):
'''
Create a node at the given path if it does not exist, then
apply a config to it.
Paths can include '..' to go up a level (which raises an exception
if that level does not exist).
'''
if len(path) > 0:
path_step = path[0]
remaining = path[1:]
if path_step == '..':
if not self.outer:
raise Exception('outer does not exist for path: {}'.format(path))
return self.outer.establish_path(
remaining,
config,
source=source)
else:
if path_step not in self.inner:
self.inner[path_step] = Store({}, outer=self, source=source)
return self.inner[path_step].establish_path(
remaining,
config,
source=source)
else:
self.apply_config(config, source=source)
return self
def outer_path(self, path, source=None):
'''
Address a topology with the `_path` keyword if present,
establishing a path to this node and using it as the
starting point for future path operations.
'''
node = self
if '_path' in path:
node = self.establish_path(
path['_path'],
{},
source=source)
path = without(path, '_path')
return node, path
def topology_ports(self, schema, topology, source=None):
'''
Distribute a schema into the tree by mapping its ports
according to the given topology.
'''
source = source or self.path_for()
if set(schema.keys()) & self.schema_keys:
self.get_path(topology).apply_config(schema)
else:
mismatch_topology = (
set(topology.keys()) - set(schema.keys()))
mismatch_schema = (
set(schema.keys()) - set(topology.keys()))
if mismatch_topology:
raise Exception(
'topology at path {} and source {} has keys that are not in the schema: {}'.format(
self.path_for(), source, mismatch_topology))
if mismatch_schema:
log.info('{} schema has keys not in topology: {}'.format(
source, mismatch_schema))
for port, subschema in schema.items():
path = topology.get(port, (port,))
if port == '*':
subschema_config = {
'_subschema': subschema}
if isinstance(path, dict):
node, path = self.outer_path(
path, source=source)
node.merge_subtopology(path)
node.apply_config(subschema_config)
else:
node = self.establish_path(
path,
subschema_config,
source=source)
node.apply_subschema()
node.apply_defaults()
elif isinstance(path, dict):
node, path = self.outer_path(
path, source=source)
node.topology_ports(
subschema,
path,
source=source)
else:
self.establish_path(
path,
subschema,
source=source)
def generate_paths(self, processes, topology):
for key, subprocess in processes.items():
subtopology = topology[key]
if isinstance(subprocess, Process):
process_state = Store({
'_value': subprocess,
'_updater': 'set'}, outer=self)
self.inner[key] = process_state
subprocess.schema = subprocess.ports_schema()
self.topology_ports(
subprocess.schema,
subtopology,
source=self.path_for() + (key,))
else:
if key not in self.inner:
self.inner[key] = Store({}, outer=self)
self.inner[key].generate_paths(
subprocess,
subtopology)
def generate(self, path, processes, topology, initial_state):
'''
Generate a subtree of this store at the given path.
The processes will be mapped into locations in the tree by the
topology, and once everything is constructed the initial_state
will be applied.
'''
target = self.establish_path(path, {})
target.generate_paths(processes, topology)
target.set_value(initial_state)
target.apply_subschemas()
target.apply_defaults()
def inverse_topology(outer, update, topology):
'''
Transform an update from the form its process produced into
one aligned to the given topology.
The inverse of this function (using a topology to construct a view for
the perspective of a Process ports_schema()) lives in `Store`, called
`topology_state`. This one stands alone as it does not require a store
to calculate.
'''
inverse = {}
for key, path in topology.items():
if key == '*':
if isinstance(path, dict):
node = inverse
if '_path' in path:
inner = normalize_path(outer + path['_path'])
node = get_in(inverse, inner)
if node is None:
node = {}
assoc_path(inverse, inner, node)
path = without(path, '_path')
for child, child_update in update.items():
node[child] = inverse_topology(
tuple(),
update[child],
path)
else:
for child, child_update in update.items():
inner = normalize_path(outer + path + (child,))
assoc_path(inverse, inner, child_update)
elif key in update:
value = update[key]
if isinstance(path, dict):
node = inverse
if '_path' in path:
inner = normalize_path(outer + path['_path'])
node = get_in(inverse, inner)
if node is None:
node = {}
assoc_path(inverse, inner, node)
path = without(path, '_path')
node.update(inverse_topology(
tuple(),
value,
path))
else:
inner = normalize_path(outer + path)
assoc_path(inverse, inner, value)
return inverse
def generate_derivers(processes, topology):
deriver_processes = {}
deriver_topology = {}
for process_key, node in processes.items():
subtopology = topology[process_key]
if isinstance(node, Process):
for deriver_key, config in node.derivers().items():
if deriver_key not in deriver_processes:
# generate deriver process
deriver_config = config.get('config', {})
generate = config['deriver']
if isinstance(generate, str):
generate = deriver_library[generate]
deriver = generate(deriver_config)
deriver_processes[deriver_key] = deriver
# generate deriver topology
deriver_topology[deriver_key] = {}
for target, source in config.get('port_mapping', {}).items():
path = subtopology[source]
deriver_topology[deriver_key][target] = path
else:
subderivers = generate_derivers(node, subtopology)
deriver_processes[process_key] = subderivers['processes']
deriver_topology[process_key] = subderivers['topology']
return {
'processes': deriver_processes,
'topology': deriver_topology}
class Compartment(object):
"""Compartment parent class
All :term:`compartment` classes must inherit from this class.
"""
def __init__(self, config):
self.config = config
def generate_processes(self, config):
"""Generate processes dictionary
Every subclass must override this method.
Arguments:
config (dict): A dictionary of configuration options. All
subclass implementation must accept this parameter, but
some may ignore it.
Returns:
dict: Subclass implementations must return a dictionary
mapping process names to instantiated and configured process
objects.
"""
return {}
def generate_topology(self, config):
"""Generate topology dictionary
Every subclass must override this method.
Arguments:
config (dict): A dictionary of configuration options. All
subclass implementation must accept this parameter, but
some may ignore it.
Returns:
dict: Subclass implementations must return a :term:`topology`
dictionary.
"""
return {}
def generate(self, config=None, path=tuple()):
'''Generate processes and topology dictionaries for the compartment
Arguments:
config (dict): Updates values in the configuration declared
in the constructor
path (tuple): Tuple with ('path', 'to', 'level') associates
the processes and topology at this level
Returns:
dict: Dictionary with two keys: ``processes``, which has a
value of a processes dictionary, and ``topology``, which has
a value of a topology dictionary. Both are suitable to be
passed to the constructor for
:py:class:`vivarium.core.experiment.Experiment`.
'''
# merge config with self.config
if config is None:
config = self.config
else:
default = copy.deepcopy(self.config)
config = deep_merge(default, config)
processes = self.generate_processes(config)
topology = self.generate_topology(config)
# add derivers
derivers = generate_derivers(processes, topology)
processes = deep_merge(derivers['processes'], processes)
topology = deep_merge(derivers['topology'], topology)
return {
'processes': assoc_in({}, path, processes),
'topology': assoc_in({}, path, topology)}
def or_default(self, parameters, key):
return parameters.get(key, self.defaults[key])
def get_parameters(self):
network = self.generate({})
processes = network['processes']
return {
process_id: process.parameters
for process_id, process in processes.items()}
def generate_state(processes, topology, initial_state):
state = Store({})
state.generate_paths(processes, topology)
state.apply_subschemas()
state.set_value(initial_state)
state.apply_defaults()
return state
def normalize_path(path):
progress = []
for step in path:
if step == '..' and len(progress) > 0:
progress = progress[:-1]
else:
progress.append(step)
return progress
def timestamp(dt=None):
if not dt:
dt = datetime.datetime.now()
return "%04d%02d%02d.%02d%02d%02d" % (
dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second)
class Experiment(object):
def __init__(self, config):
"""Defines simulations
Arguments:
config (dict): A dictionary of configuration options. The
required options are:
* **processes** (:py:class:`dict`): A dictionary that
maps :term:`process` names to process objects. You
will usually get this from the ``processes``
attribute of the dictionary from
:py:meth:`vivarium.core.experiment.Compartment.generate`.
* **topology** (:py:class:`dict`): A dictionary that
maps process names to sub-dictionaries. These
sub-dictionaries map the process's port names to
tuples that specify a path through the :term:`tree`
from the :term:`compartment` root to the
:term:`store` that will be passed to the process for
that port.
The following options are optional:
* **experiment_id** (:py:class:`uuid.UUID` or
:py:class:`str`): A unique identifier for the
experiment. A UUID will be generated if none is
provided.
* **description** (:py:class:`str`): A description of
the experiment. A blank string by default.
* **initial_state** (:py:class:`dict`): By default an
empty dictionary, this is the initial state of the
simulation.
* **emitter** (:py:class:`dict`): An emitter
configuration which must conform to the
specification in the documentation for
:py:func:`vivarium.core.emitter.get_emitter`. The
experiment ID will be added to the dictionary you
provide as the value for the key ``experiment_id``.
"""
self.config = config
self.experiment_id = config.get(
'experiment_id', timestamp(datetime.datetime.utcnow()))
self.description = config.get('description', '')
self.processes = config['processes']
self.topology = config['topology']
self.initial_state = config.get('initial_state', {})
self.emit_step = config.get('emit_step')
self.state = generate_state(
self.processes,
self.topology,
self.initial_state)
emitter_config = config.get('emitter', {})
emitter_config['experiment_id'] = self.experiment_id
self.emitter = get_emitter(emitter_config)
self.local_time = 0.0
# run the derivers
self.send_updates([])
# run the emitter
self.emit_configuration()
self.emit_data()
log.info('experiment {}'.format(self.experiment_id))
log.info('\nPROCESSES:')
log.info(pf(self.processes))
log.info('\nTOPOLOGY:')
log.info(pf(self.topology))
log.info('\nSTATE:')
log.info(pf(self.state.get_value()))
log.info('\nCONFIG:')
log.info(pf(self.state.get_config(True)))
def emit_configuration(self):
data = {
'time_created': timestamp(),
'experiment_id': self.experiment_id,
'description': self.description,
# TODO -- serialize processes, topology, state
# 'processes': self.processes,
# 'topology': self.topology,
# 'state': self.state.get_config()
}
emit_config = {
'table': 'configuration',
'data': data}
self.emitter.emit(emit_config)
def process_update(self, path, state, interval):
process = state.value
process_topology = get_in(self.topology, path)
# translate the values from the tree structure into the form
# that this process expects, based on its declared topology
ports = state.outer.schema_topology(process.schema, process_topology)
# perform the process update with the current states
update = process.next_update(interval, ports)
# translate the values from the process update back into the
# paths they have in the state tree
# inverse = inverse_topology(path[:-1], update, process_topology)
# absolute = assoc_in({}, path[:-1], inverse)
absolute = inverse_topology(path[:-1], update, process_topology)
return absolute
def apply_update(self, update):
topology_updates = self.state.apply_update(update)
if topology_updates:
self.topology = deep_merge(self.topology, topology_updates)
def run_derivers(self, derivers):
for path, deriver in derivers.items():
# timestep shouldn't influence derivers
if not deriver.deleted:
update = self.process_update(path, deriver, 0)
self.apply_update(update)
def emit_data(self):
data = self.state.emit_data()
data.update({
'time': self.local_time})
emit_config = {
'table': 'history',
'data': data}
self.emitter.emit(emit_config)
def send_updates(self, updates, derivers=None):
for update in updates:
self.apply_update(update)
if derivers is None:
derivers = {
path: state
for path, state in self.state.depth()
if state.value is not None and isinstance(state.value, Process) and state.value.is_deriver()}
self.run_derivers(derivers)
def update(self, interval):
""" Run each process for the given interval and update the related states. """
time = 0
emit_time = self.emit_step
def empty_front(t):
return {
'time': t,
'update': {}}
# keep track of which processes have simulated until when
front = {}
while time < interval:
full_step = INFINITY
if VERBOSE:
for state_id in self.states:
print('{}: {}'.format(time, self.states[state_id].to_dict()))
# find all existing processes and derivers in the tree
processes = {}
derivers = {}
for path, state in self.state.depth():
if state.value is not None and isinstance(state.value, Process):
if state.value.is_deriver():
derivers[path] = state
else:
processes[path] = state
# setup a way to track how far each process has simulated in time
front = {
path: process
for path, process in front.items()
if path in processes}
# go through each process and find those that are able to update
# based on their current time being less than the global time.
for path, state in processes.items():
if not path in front:
front[path] = empty_front(time)
process_time = front[path]['time']
if process_time <= time:
process = state.value
future = min(process_time + process.local_timestep(), interval)
timestep = future - process_time
# calculate the update for this process
update = self.process_update(path, state, timestep)
# store the update to apply at its projected time
if timestep < full_step:
full_step = timestep
front[path]['time'] = future
front[path]['update'] = update
if full_step == INFINITY:
# no processes ran, jump to next process
next_event = interval
for process_name in front.keys():
if front[path]['time'] < next_event:
next_event = front[path]['time']
time = next_event
else:
# at least one process ran, apply updates and continue
future = time + full_step
updates = []
paths = []
for path, advance in front.items():
if advance['time'] <= future:
new_update = advance['update']
new_update['_path'] = path
updates.append(new_update)
advance['update'] = {}
paths.append(path)
self.send_updates(updates, derivers)
time = future
self.local_time += full_step
if self.emit_step is None:
self.emit_data()
elif emit_time <= time:
while emit_time <= time:
self.emit_data()
emit_time += self.emit_step
for process_name, advance in front.items():
assert advance['time'] == time == interval
assert len(advance['update']) == 0
# def update_interval(self, time, interval):
# while self.local_time < time:
# self.update(interval)
# Tests
def test_recursive_store():
environment_config = {
'environment': {
'temperature': {
'_default': 0.0,
'_updater': 'accumulate'},
'fields': {
(0, 1): {
'enzymeX': {
'_default': 0.0,
'_updater': 'set'},
'enzymeY': {
'_default': 0.0,
'_updater': 'set'}},
(0, 2): {
'enzymeX': {
'_default': 0.0,
'_updater': 'set'},
'enzymeY': {
'_default': 0.0,
'_updater': 'set'}}},
'agents': {
'1': {
'location': {
'_default': (0, 0),
'_updater': 'set'},
'boundary': {
'external': {
'_default': 0.0,
'_updater': 'set'},
'internal': {
'_default': 0.0,
'_updater': 'set'}},
'transcripts': {
'flhDC': {
'_default': 0,
'_updater': 'accumulate'},
'fliA': {
'_default': 0,
'_updater': 'accumulate'}},
'proteins': {
'ribosome': {
'_default': 0,
'_updater': 'set'},
'flagella': {
'_default': 0,
'_updater': 'accumulate'}}},
'2': {
'location': {
'_default': (0, 0),
'_updater': 'set'},
'boundary': {
'external': {
'_default': 0.0,
'_updater': 'set'},
'internal': {
'_default': 0.0,
'_updater': 'set'}},
'transcripts': {
'flhDC': {
'_default': 0,
'_updater': 'accumulate'},
'fliA': {
'_default': 0,
'_updater': 'accumulate'}},
'proteins': {
'ribosome': {
'_default': 0,
'_updater': 'set'},
'flagella': {
'_default': 0,
'_updater': 'accumulate'}}}}}}
state = Store(environment_config)
state.apply_update({})
state.state_for(['environment'], ['temperature'])
def test_in():
blank = {}
path = ['where', 'are', 'we']
assoc_path(blank, path, 5)
print(blank)
print(get_in(blank, path))
update_in(blank, path, lambda x: x + 6)
print(blank)
def test_topology_ports():
quark_colors = ['green', 'red', 'blue']
quark_spins = ['up', 'down']
electron_spins = ['-1/2', '1/2']
electron_orbitals = [
str(orbit) + 's'
for orbit in range(1, 8)]
class Proton(Process):
defaults = {
'time_step': 1.0,
'radius': 0.0}
def __init__(self, parameters=None):
if not parameters:
parameters = {}
self.radius = self.or_default(parameters, 'radius')
self.parameters = parameters
self.time_step = self.or_default(parameters, 'time_step')
def ports_schema(self):
return {
'radius': {
'_updater': 'set',
'_default': self.radius},
'quarks': {
'_divider': 'split_dict',
'*': {
'color': {
'_updater': 'set',
'_default': quark_colors[0]},
'spin': {
'_updater': 'set',
'_default': quark_spins[0]}}},
'electrons': {
'*': {
'orbital': {
'_updater': 'set',
'_default': electron_orbitals[0]},
'spin': {
'_default': electron_spins[0]}}}}
def next_update(self, timestep, states):
update = {}
collapse = np.random.random()
if collapse < states['radius'] * timestep:
update['radius'] = collapse
update['quarks'] = {}
for name, quark in states['quarks'].items():
update['quarks'][name] = {
'color': np.random.choice(quark_colors),
'spin': np.random.choice(quark_spins)}
update['electrons'] = {}
orbitals = electron_orbitals.copy()
for name, electron in states['electrons'].items():
np.random.shuffle(orbitals)
update['electrons'][name] = {
'orbital': orbitals.pop()}
return update
class Electron(Process):
defaults = {
'time_step': 1.0,
'spin': electron_spins[0]}
def __init__(self, parameters=None):
self.parameters = parameters or {}
self.spin = self.or_default(self.parameters, 'spin')
self.time_step = self.or_default(self.parameters, 'time_step')
def ports_schema(self):
return {
'spin': {
'_updater': 'set',
'_default': self.spin},
'proton': {
'radius': {
'_default': 0.0}}}
def next_update(self, timestep, states):
update = {}
if np.random.random() < states['proton']['radius']:
update['spin'] = np.random.choice(electron_spins)
return update
processes = {
'proton': Proton(),
'electrons': {
'a': {
'electron': Electron()},
'b': {
'electron': Electron()}}}
spin_path = ('internal', 'spin')
radius_path = ('structure', 'radius')
topology = {
'proton': {
'radius': radius_path,
'quarks': ('internal', 'quarks'),
'electrons': {
'_path': ('electrons',),
'*': {
'orbital': ('shell', 'orbital'),
'spin': spin_path}}},
'electrons': {
'a': {
'electron': {
'spin': spin_path,
'proton': {
'_path': ('..', '..'),
'radius': radius_path}}},
'b': {
'electron': {
'spin': spin_path,
'proton': {
'_path': ('..', '..'),
'radius': radius_path}}}}}
initial_state = {
'structure': {
'radius': 0.7},
'internal': {
'quarks': {
'x': {
'color': 'green',
'spin': 'up'},
'y': {
'color': 'red',
'spin': 'up'},
'z': {
'color': 'blue',
'spin': 'down'}}}}
experiment = Experiment({
'processes': processes,
'topology': topology,
'initial_state': initial_state})
log.debug(pf(experiment.state.get_config(True)))
experiment.update(10.0)
log.debug(pf(experiment.state.get_config(True)))
log.debug(pf(experiment.state.divide_value()))
def test_timescales():
class Slow(Process):
def __init__(self):
self.timestep = 3.0
self.ports = {
'state': ['base']}
def ports_schema(self):
return {
'state': {
'base': {
'_default': 1.0}}}
def local_timestep(self):
return self.timestep
def next_update(self, timestep, states):
base = states['state']['base']
next_base = timestep * base * 0.1
return {
'state': {'base': next_base}}
class Fast(Process):
def __init__(self):
self.timestep = 0.1
self.ports = {
'state': ['base', 'motion']}
def ports_schema(self):
return {
'state': {
'base': {
'_default': 1.0},
'motion': {
'_default': 0.0}}}
def local_timestep(self):
return self.timestep
def next_update(self, timestep, states):
base = states['state']['base']
motion = timestep * base * 0.001
return {
'state': {'motion': motion}}
processes = {
'slow': Slow(),
'fast': Fast()}
states = {
'state': {
'base': 1.0,
'motion': 0.0}}
topology = {
'slow': {'state': ('state',)},
'fast': {'state': ('state',)}}
emitter = {'type': 'null'}
experiment = Experiment({
'processes': processes,
'topology': topology,
'emitter': emitter,
'initial_state': states})
experiment.update(10.0)
if __name__ == '__main__':
# test_recursive_store()
# test_in()
# test_timescales()
test_topology_ports()
| 34.132919
| 109
| 0.507962
|
from __future__ import absolute_import, division, print_function
import os
import copy
import random
import datetime
import numpy as np
import logging as log
import pprint
pretty=pprint.PrettyPrinter(indent=2)
def pp(x):
pretty.pprint(x)
def pf(x):
return pretty.pformat(x)
from vivarium.library.units import Quantity
from vivarium.library.dict_utils import merge_dicts, deep_merge, deep_merge_check
from vivarium.core.emitter import get_emitter
from vivarium.core.process import Process
from vivarium.core.repository import (
divider_library,
updater_library,
deriver_library,
serializer_library,
)
INFINITY = float('inf')
VERBOSE = False
log.basicConfig(level=os.environ.get("LOGLEVEL", log.WARNING))
def key_for_value(d, looking):
found = None
for key, value in d.items():
if looking == value:
found = key
break
return found
def get_in(d, path):
if path:
head = path[0]
if head in d:
return get_in(d[head], path[1:])
else:
return d
def assoc_in(d, path, value):
if path:
return dict(d, **{path[0]: assoc_in(d.get(path[0], {}), path[1:], value)})
else:
return value
def assoc_path(d, path, value):
if path:
head = path[0]
if len(path) == 1:
d[head] = value
else:
if head not in d:
d[head] = {}
assoc_path(d[head], path[1:], value)
else:
value
def update_in(d, path, f):
if path:
head = path[0]
if len(path) == 1:
d[head] = f(d.get(head, None))
else:
if not head in d:
d[head] = {}
update_in(d[head], path[1:], f)
def dissoc(d, removing):
return {
key: value
for key, value in d.items()
if key not in removing}
def without(d, removing):
return {
key: value
for key, value in d.items()
if key != removing}
def schema_for(port, keys, initial_state, default=0.0, updater='accumulate'):
return {
key: {
'_default': initial_state.get(
port, {}).get(key, default),
'_updater': updater}
for key in keys}
def always_true(x):
return True
def identity(y):
return y
class Store(object):
schema_keys = set([
'_default',
'_updater',
'_value',
'_properties',
'_emit',
'_serializer',
])
def __init__(self, config, outer=None, source=None):
self.outer = outer
self.inner = {}
self.subschema = {}
self.subtopology = {}
self.properties = {}
self.default = None
self.updater = None
self.value = None
self.units = None
self.divider = None
self.emit = False
self.sources = {}
self.deleted = False
self.leaf = False
self.serializer = None
self.apply_config(config, source)
def check_default(self, new_default):
if self.default is not None and new_default != self.default:
if new_default == 0 and self.default != 0:
log.info('_default schema conflict: {} and {}. selecting {}'.format(
self.default, new_default, self.default))
return self.default
else:
log.info('_default schema conflict: {} and {}. selecting {}'.format(
self.default, new_default, new_default))
return new_default
def check_value(self, new_value):
if self.value is not None and new_value != self.value:
raise Exception('_value schema conflict: {} and {}'.format(new_value, self.value))
return new_value
def merge_subtopology(self, subtopology):
self.subtopology = deep_merge(self.subtopology, subtopology)
def apply_subschema_config(self, subschema):
self.subschema = deep_merge(
self.subschema,
subschema)
def apply_config(self, config, source=None):
if '*' in config:
self.apply_subschema_config(config['*'])
config = without(config, '*')
if '_subschema' in config:
if source:
self.sources[source] = config['_subschema']
self.apply_subschema_config(config['_subschema'])
config = without(config, '_subschema')
if '_subtopology' in config:
self.merge_subtopology(config['_subtopology'])
config = without(config, '_subtopology')
if '_divider' in config:
self.divider = config['_divider']
if isinstance(self.divider, str):
self.divider = divider_library[self.divider]
if isinstance(self.divider, dict) and isinstance(self.divider['divider'], str):
self.divider['divider'] = divider_library[self.divider['divider']]
config = without(config, '_divider')
if self.schema_keys & set(config.keys()):
if self.inner:
raise Exception('trying to assign leaf values to a branch at: {}'.format(self.path_for()))
self.leaf = True
if '_serializer' in config:
self.serializer = config['_serializer']
if isinstance(self.serializer, str):
self.serializer = serializer_library[self.serializer]
if '_default' in config:
self.default = self.check_default(config.get('_default'))
if isinstance(self.default, Quantity):
self.units = self.default.units
if isinstance(self.default, np.ndarray):
self.serializer = self.serializer or serializer_library['numpy']
if '_value' in config:
self.value = self.check_value(config.get('_value'))
if isinstance(self.value, Quantity):
self.units = self.value.units
self.updater = config.get('_updater', self.updater or 'accumulate')
if isinstance(self.updater, str):
self.updater = updater_library[self.updater]
self.properties = deep_merge(
self.properties,
config.get('_properties', {}))
self.emit = config.get('_emit', self.emit)
if source:
self.sources[source] = config
else:
if self.leaf and config:
raise Exception('trying to assign create inner for leaf node: {}'.format(self.path_for()))
self.value = None
for key, child in config.items():
if key not in self.inner:
self.inner[key] = Store(child, outer=self, source=source)
else:
self.inner[key].apply_config(child, source=source)
def get_updater(self, update):
updater = self.updater
if '_updater' in update:
updater = update['_updater']
if isinstance(updater, str):
updater = updater_library[updater]
return updater
def get_config(self, sources=False):
config = {}
if self.properties:
config['_properties'] = self.properties
if self.subschema:
config['_subschema'] = self.subschema
if self.subtopology:
config['_subtopology'] = self.subtopology
if self.divider:
config['_divider'] = self.divider
if sources and self.sources:
config['_sources'] = self.sources
if self.inner:
child_config = {
key: child.get_config(sources)
for key, child in self.inner.items()}
config.update(child_config)
else:
config.update({
'_default': self.default,
'_value': self.value})
if self.updater:
config['_updater'] = self.updater
if self.units:
config['_units'] = self.units
if self.emit:
config['_emit'] = self.emit
return config
def top(self):
if self.outer:
return self.outer.top()
else:
return self
def path_for(self):
if self.outer:
key = key_for_value(self.outer.inner, self)
above = self.outer.path_for()
return above + (key,)
else:
return tuple()
def get_value(self, condition=None, f=None):
if self.inner:
if condition is None:
condition = always_true
if f is None:
f = identity
return {
key: f(child.get_value(condition, f))
for key, child in self.inner.items()
if condition(child)}
else:
if self.subschema:
return {}
else:
return self.value
def get_path(self, path):
if path:
step = path[0]
if step == '..':
child = self.outer
else:
child = self.inner.get(step)
if child:
return child.get_path(path[1:])
else:
return None
else:
return self
def get_paths(self, paths):
return {
key: self.get_path(path)
for key, path in paths.items()}
def get_values(self, paths):
return {
key: self.get_in(path)
for key, path in paths.items()}
def get_in(self, path):
return self.get_path(path).get_value()
def get_template(self, template):
state = {}
for key, value in template.items():
child = self.inner[key]
if value is None:
state[key] = child.get_value()
else:
state[key] = child.get_template(value)
return state
def emit_data(self):
data = {}
if self.inner:
for key, child in self.inner.items():
child_data = child.emit_data()
if child_data is not None or child_data == 0:
data[key] = child_data
return data
else:
if self.emit:
if self.serializer:
return self.serializer.serialize(self.value)
elif isinstance(self.value, Process):
return self.value.pull_data()
else:
if self.units:
return self.value.to(self.units).magnitude
else:
return self.value
def mark_deleted(self):
self.deleted = True
if self.inner:
for child in self.inner.values():
child.mark_deleted()
def delete_path(self, path):
if not path:
self.inner = {}
self.value = None
return self
else:
target = self.get_path(path[:-1])
remove = path[-1]
if remove in target.inner:
lost = target.inner[remove]
del target.inner[remove]
lost.mark_deleted()
return lost
def divide_value(self):
if self.divider:
if isinstance(self.divider, dict):
divider = self.divider['divider']
topology = self.divider['topology']
state = self.outer.get_values(topology)
return divider(self.get_value(), state)
else:
return self.divider(self.get_value())
elif self.inner:
daughters = [{}, {}]
for key, child in self.inner.items():
division = child.divide_value()
if division:
for daughter, divide in zip(daughters, division):
daughter[key] = divide
return daughters
def reduce(self, reducer, initial=None):
value = initial
for path, node in self.depth():
value = reducer(value, path, node)
return value
def reduce_to(self, path, reducer, initial=None):
value = self.reduce(reducer, initial)
assoc_path({}, path, value)
self.apply_update(update)
def set_value(self, value):
if self.inner or self.subschema:
for child, inner_value in value.items():
if child not in self.inner:
if self.subschema:
self.inner[child] = Store(self.subschema, self)
else:
pass
# child, inner_value))
if child in self.inner:
self.inner[child].set_value(inner_value)
else:
self.value = value
def apply_defaults(self):
if self.inner:
for child in self.inner.values():
child.apply_defaults()
else:
if self.value is None:
self.value = self.default
def apply_update(self, update):
if self.inner or self.subschema:
topology_updates = {}
if '_delete' in update:
# delete a list of paths
for path in update['_delete']:
self.delete_path(path)
update = dissoc(update, ['_delete'])
if '_add' in update:
# add a list of sub-compartments
for added in update['_add']:
path = added['path']
state = added['state']
target = self.establish_path(path, {})
target.set_value(state)
self.apply_subschemas()
self.apply_defaults()
update = dissoc(update, ['_add'])
if '_generate' in update:
# generate a list of new compartments
for generate in update['_generate']:
self.generate(
generate['path'],
generate['processes'],
generate['topology'],
generate['initial_state'])
assoc_path(
topology_updates,
generate['path'],
generate['topology'])
self.apply_subschemas()
self.apply_defaults()
update = dissoc(update, '_generate')
if '_divide' in update:
# use dividers to find initial states for daughters
divide = update['_divide']
mother = divide['mother']
daughters = divide['daughters']
initial_state = self.inner[mother].get_value(
condition=lambda child: not (isinstance(child.value, Process)),
f=lambda child: copy.deepcopy(child))
states = self.inner[mother].divide_value()
for daughter, state in zip(daughters, states):
daughter_id = daughter['daughter']
# use initiapl state as default, merge in divided values
initial_state = deep_merge(
initial_state,
state)
self.generate(
daughter['path'],
daughter['processes'],
daughter['topology'],
daughter['initial_state'])
assoc_path(
topology_updates,
daughter['path'],
daughter['topology'])
self.apply_subschemas()
self.inner[daughter_id].set_value(initial_state)
self.apply_defaults()
self.delete_path((mother,))
update = dissoc(update, '_divide')
for key, value in update.items():
if key in self.inner:
child = self.inner[key]
inner_updates = child.apply_update(value)
if inner_updates:
topology_updates = deep_merge(
topology_updates,
{key: inner_updates})
elif self.subschema:
self.inner[key] = Store(self.subschema, self)
self.inner[key].set_value(value)
self.inner[key].apply_defaults()
return topology_updates
else:
if isinstance(update, dict) and '_reduce' in update:
reduction = update['_reduce']
top = self.get_path(reduction.get('from'))
update = top.reduce(
reduction['reducer'],
initial=reduction['initial'])
updater = self.updater
if (
isinstance(update, dict) and self.schema_keys & set(update.keys())
):
if '_updater' in update:
updater = self.get_updater(update)
update = update.get('_value', self.default)
self.value = updater(self.value, update)
def inner_value(self, key):
if key in self.inner:
return self.inner[key].get_value()
def topology_state(self, topology):
state = {}
for key, path in topology.items():
if key == '*':
if isinstance(path, dict):
node, path = self.outer_path(path)
for child, child_node in node.inner.items():
state[child] = child_node.topology_state(path)
else:
node = self.get_path(path)
for child, child_node in node.inner.items():
state[child] = child_node.get_value()
elif isinstance(path, dict):
node, path = self.outer_path(path)
state[key] = node.topology_state(path)
else:
state[key] = self.get_path(path).get_value()
return state
def schema_topology(self, schema, topology):
state = {}
if self.leaf:
state = self.get_value()
else:
for key, subschema in schema.items():
path = topology.get(key)
if key == '*':
if isinstance(path, dict):
node, path = self.outer_path(path)
for child, child_node in node.inner.items():
state[child] = child_node.schema_topology(subschema, path)
else:
node = self.get_path(path)
for child, child_node in node.inner.items():
state[child] = child_node.schema_topology(subschema, {})
elif key == '_divider':
pass
elif isinstance(path, dict):
node, path = self.outer_path(path)
state[key] = node.schema_topology(subschema, path)
else:
if path is None:
path = (key,)
node = self.get_path(path)
state[key] = node.schema_topology(subschema, {})
return state
def state_for(self, path, keys):
state = self.get_path(path)
if state is None:
return {}
elif keys and keys[0] == '*':
return state.get_value()
else:
return {
key: state.inner_value(key)
for key in keys}
def depth(self, path=()):
base = [(path, self)]
for key, child in self.inner.items():
down = tuple(path + (key,))
base += child.depth(down)
return base
def processes(self, path=()):
return {
path: state
for path, state in self.depth()
if state.value and isinstance(state.value, Process)}
def apply_subschema(self, subschema=None, subtopology=None, source=None):
if subschema is None:
subschema = self.subschema
if subtopology is None:
subtopology = self.subtopology or {}
inner = list(self.inner.items())
for child_key, child in inner:
child.topology_ports(
subschema,
subtopology,
source=self.path_for() + ('*',))
def apply_subschemas(self):
if self.subschema:
self.apply_subschema()
for child in self.inner.values():
child.apply_subschemas()
def update_subschema(self, path, subschema):
target = self.get_path(path)
if target.subschema is None:
target.subschema = subschema
else:
target.subschema = deep_merge(
target.subschema,
subschema)
return target
def establish_path(self, path, config, source=None):
if len(path) > 0:
path_step = path[0]
remaining = path[1:]
if path_step == '..':
if not self.outer:
raise Exception('outer does not exist for path: {}'.format(path))
return self.outer.establish_path(
remaining,
config,
source=source)
else:
if path_step not in self.inner:
self.inner[path_step] = Store({}, outer=self, source=source)
return self.inner[path_step].establish_path(
remaining,
config,
source=source)
else:
self.apply_config(config, source=source)
return self
def outer_path(self, path, source=None):
node = self
if '_path' in path:
node = self.establish_path(
path['_path'],
{},
source=source)
path = without(path, '_path')
return node, path
def topology_ports(self, schema, topology, source=None):
source = source or self.path_for()
if set(schema.keys()) & self.schema_keys:
self.get_path(topology).apply_config(schema)
else:
mismatch_topology = (
set(topology.keys()) - set(schema.keys()))
mismatch_schema = (
set(schema.keys()) - set(topology.keys()))
if mismatch_topology:
raise Exception(
'topology at path {} and source {} has keys that are not in the schema: {}'.format(
self.path_for(), source, mismatch_topology))
if mismatch_schema:
log.info('{} schema has keys not in topology: {}'.format(
source, mismatch_schema))
for port, subschema in schema.items():
path = topology.get(port, (port,))
if port == '*':
subschema_config = {
'_subschema': subschema}
if isinstance(path, dict):
node, path = self.outer_path(
path, source=source)
node.merge_subtopology(path)
node.apply_config(subschema_config)
else:
node = self.establish_path(
path,
subschema_config,
source=source)
node.apply_subschema()
node.apply_defaults()
elif isinstance(path, dict):
node, path = self.outer_path(
path, source=source)
node.topology_ports(
subschema,
path,
source=source)
else:
self.establish_path(
path,
subschema,
source=source)
def generate_paths(self, processes, topology):
for key, subprocess in processes.items():
subtopology = topology[key]
if isinstance(subprocess, Process):
process_state = Store({
'_value': subprocess,
'_updater': 'set'}, outer=self)
self.inner[key] = process_state
subprocess.schema = subprocess.ports_schema()
self.topology_ports(
subprocess.schema,
subtopology,
source=self.path_for() + (key,))
else:
if key not in self.inner:
self.inner[key] = Store({}, outer=self)
self.inner[key].generate_paths(
subprocess,
subtopology)
def generate(self, path, processes, topology, initial_state):
target = self.establish_path(path, {})
target.generate_paths(processes, topology)
target.set_value(initial_state)
target.apply_subschemas()
target.apply_defaults()
def inverse_topology(outer, update, topology):
inverse = {}
for key, path in topology.items():
if key == '*':
if isinstance(path, dict):
node = inverse
if '_path' in path:
inner = normalize_path(outer + path['_path'])
node = get_in(inverse, inner)
if node is None:
node = {}
assoc_path(inverse, inner, node)
path = without(path, '_path')
for child, child_update in update.items():
node[child] = inverse_topology(
tuple(),
update[child],
path)
else:
for child, child_update in update.items():
inner = normalize_path(outer + path + (child,))
assoc_path(inverse, inner, child_update)
elif key in update:
value = update[key]
if isinstance(path, dict):
node = inverse
if '_path' in path:
inner = normalize_path(outer + path['_path'])
node = get_in(inverse, inner)
if node is None:
node = {}
assoc_path(inverse, inner, node)
path = without(path, '_path')
node.update(inverse_topology(
tuple(),
value,
path))
else:
inner = normalize_path(outer + path)
assoc_path(inverse, inner, value)
return inverse
def generate_derivers(processes, topology):
deriver_processes = {}
deriver_topology = {}
for process_key, node in processes.items():
subtopology = topology[process_key]
if isinstance(node, Process):
for deriver_key, config in node.derivers().items():
if deriver_key not in deriver_processes:
# generate deriver process
deriver_config = config.get('config', {})
generate = config['deriver']
if isinstance(generate, str):
generate = deriver_library[generate]
deriver = generate(deriver_config)
deriver_processes[deriver_key] = deriver
# generate deriver topology
deriver_topology[deriver_key] = {}
for target, source in config.get('port_mapping', {}).items():
path = subtopology[source]
deriver_topology[deriver_key][target] = path
else:
subderivers = generate_derivers(node, subtopology)
deriver_processes[process_key] = subderivers['processes']
deriver_topology[process_key] = subderivers['topology']
return {
'processes': deriver_processes,
'topology': deriver_topology}
class Compartment(object):
def __init__(self, config):
self.config = config
def generate_processes(self, config):
return {}
def generate_topology(self, config):
return {}
def generate(self, config=None, path=tuple()):
# merge config with self.config
if config is None:
config = self.config
else:
default = copy.deepcopy(self.config)
config = deep_merge(default, config)
processes = self.generate_processes(config)
topology = self.generate_topology(config)
# add derivers
derivers = generate_derivers(processes, topology)
processes = deep_merge(derivers['processes'], processes)
topology = deep_merge(derivers['topology'], topology)
return {
'processes': assoc_in({}, path, processes),
'topology': assoc_in({}, path, topology)}
def or_default(self, parameters, key):
return parameters.get(key, self.defaults[key])
def get_parameters(self):
network = self.generate({})
processes = network['processes']
return {
process_id: process.parameters
for process_id, process in processes.items()}
def generate_state(processes, topology, initial_state):
state = Store({})
state.generate_paths(processes, topology)
state.apply_subschemas()
state.set_value(initial_state)
state.apply_defaults()
return state
def normalize_path(path):
progress = []
for step in path:
if step == '..' and len(progress) > 0:
progress = progress[:-1]
else:
progress.append(step)
return progress
def timestamp(dt=None):
if not dt:
dt = datetime.datetime.now()
return "%04d%02d%02d.%02d%02d%02d" % (
dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second)
class Experiment(object):
def __init__(self, config):
self.config = config
self.experiment_id = config.get(
'experiment_id', timestamp(datetime.datetime.utcnow()))
self.description = config.get('description', '')
self.processes = config['processes']
self.topology = config['topology']
self.initial_state = config.get('initial_state', {})
self.emit_step = config.get('emit_step')
self.state = generate_state(
self.processes,
self.topology,
self.initial_state)
emitter_config = config.get('emitter', {})
emitter_config['experiment_id'] = self.experiment_id
self.emitter = get_emitter(emitter_config)
self.local_time = 0.0
# run the derivers
self.send_updates([])
# run the emitter
self.emit_configuration()
self.emit_data()
log.info('experiment {}'.format(self.experiment_id))
log.info('\nPROCESSES:')
log.info(pf(self.processes))
log.info('\nTOPOLOGY:')
log.info(pf(self.topology))
log.info('\nSTATE:')
log.info(pf(self.state.get_value()))
log.info('\nCONFIG:')
log.info(pf(self.state.get_config(True)))
def emit_configuration(self):
data = {
'time_created': timestamp(),
'experiment_id': self.experiment_id,
'description': self.description,
# TODO -- serialize processes, topology, state
# 'processes': self.processes,
# 'topology': self.topology,
# 'state': self.state.get_config()
}
emit_config = {
'table': 'configuration',
'data': data}
self.emitter.emit(emit_config)
def process_update(self, path, state, interval):
process = state.value
process_topology = get_in(self.topology, path)
# translate the values from the tree structure into the form
# that this process expects, based on its declared topology
ports = state.outer.schema_topology(process.schema, process_topology)
# perform the process update with the current states
update = process.next_update(interval, ports)
# translate the values from the process update back into the
# paths they have in the state tree
# inverse = inverse_topology(path[:-1], update, process_topology)
# absolute = assoc_in({}, path[:-1], inverse)
absolute = inverse_topology(path[:-1], update, process_topology)
return absolute
def apply_update(self, update):
topology_updates = self.state.apply_update(update)
if topology_updates:
self.topology = deep_merge(self.topology, topology_updates)
def run_derivers(self, derivers):
for path, deriver in derivers.items():
# timestep shouldn't influence derivers
if not deriver.deleted:
update = self.process_update(path, deriver, 0)
self.apply_update(update)
def emit_data(self):
data = self.state.emit_data()
data.update({
'time': self.local_time})
emit_config = {
'table': 'history',
'data': data}
self.emitter.emit(emit_config)
def send_updates(self, updates, derivers=None):
for update in updates:
self.apply_update(update)
if derivers is None:
derivers = {
path: state
for path, state in self.state.depth()
if state.value is not None and isinstance(state.value, Process) and state.value.is_deriver()}
self.run_derivers(derivers)
def update(self, interval):
time = 0
emit_time = self.emit_step
def empty_front(t):
return {
'time': t,
'update': {}}
front = {}
while time < interval:
full_step = INFINITY
if VERBOSE:
for state_id in self.states:
print('{}: {}'.format(time, self.states[state_id].to_dict()))
processes = {}
derivers = {}
for path, state in self.state.depth():
if state.value is not None and isinstance(state.value, Process):
if state.value.is_deriver():
derivers[path] = state
else:
processes[path] = state
front = {
path: process
for path, process in front.items()
if path in processes}
for path, state in processes.items():
if not path in front:
front[path] = empty_front(time)
process_time = front[path]['time']
if process_time <= time:
process = state.value
future = min(process_time + process.local_timestep(), interval)
timestep = future - process_time
update = self.process_update(path, state, timestep)
if timestep < full_step:
full_step = timestep
front[path]['time'] = future
front[path]['update'] = update
if full_step == INFINITY:
next_event = interval
for process_name in front.keys():
if front[path]['time'] < next_event:
next_event = front[path]['time']
time = next_event
else:
future = time + full_step
updates = []
paths = []
for path, advance in front.items():
if advance['time'] <= future:
new_update = advance['update']
new_update['_path'] = path
updates.append(new_update)
advance['update'] = {}
paths.append(path)
self.send_updates(updates, derivers)
time = future
self.local_time += full_step
if self.emit_step is None:
self.emit_data()
elif emit_time <= time:
while emit_time <= time:
self.emit_data()
emit_time += self.emit_step
for process_name, advance in front.items():
assert advance['time'] == time == interval
assert len(advance['update']) == 0
def test_recursive_store():
environment_config = {
'environment': {
'temperature': {
'_default': 0.0,
'_updater': 'accumulate'},
'fields': {
(0, 1): {
'enzymeX': {
'_default': 0.0,
'_updater': 'set'},
'enzymeY': {
'_default': 0.0,
'_updater': 'set'}},
(0, 2): {
'enzymeX': {
'_default': 0.0,
'_updater': 'set'},
'enzymeY': {
'_default': 0.0,
'_updater': 'set'}}},
'agents': {
'1': {
'location': {
'_default': (0, 0),
'_updater': 'set'},
'boundary': {
'external': {
'_default': 0.0,
'_updater': 'set'},
'internal': {
'_default': 0.0,
'_updater': 'set'}},
'transcripts': {
'flhDC': {
'_default': 0,
'_updater': 'accumulate'},
'fliA': {
'_default': 0,
'_updater': 'accumulate'}},
'proteins': {
'ribosome': {
'_default': 0,
'_updater': 'set'},
'flagella': {
'_default': 0,
'_updater': 'accumulate'}}},
'2': {
'location': {
'_default': (0, 0),
'_updater': 'set'},
'boundary': {
'external': {
'_default': 0.0,
'_updater': 'set'},
'internal': {
'_default': 0.0,
'_updater': 'set'}},
'transcripts': {
'flhDC': {
'_default': 0,
'_updater': 'accumulate'},
'fliA': {
'_default': 0,
'_updater': 'accumulate'}},
'proteins': {
'ribosome': {
'_default': 0,
'_updater': 'set'},
'flagella': {
'_default': 0,
'_updater': 'accumulate'}}}}}}
state = Store(environment_config)
state.apply_update({})
state.state_for(['environment'], ['temperature'])
def test_in():
blank = {}
path = ['where', 'are', 'we']
assoc_path(blank, path, 5)
print(blank)
print(get_in(blank, path))
update_in(blank, path, lambda x: x + 6)
print(blank)
def test_topology_ports():
quark_colors = ['green', 'red', 'blue']
quark_spins = ['up', 'down']
electron_spins = ['-1/2', '1/2']
electron_orbitals = [
str(orbit) + 's'
for orbit in range(1, 8)]
class Proton(Process):
defaults = {
'time_step': 1.0,
'radius': 0.0}
def __init__(self, parameters=None):
if not parameters:
parameters = {}
self.radius = self.or_default(parameters, 'radius')
self.parameters = parameters
self.time_step = self.or_default(parameters, 'time_step')
def ports_schema(self):
return {
'radius': {
'_updater': 'set',
'_default': self.radius},
'quarks': {
'_divider': 'split_dict',
'*': {
'color': {
'_updater': 'set',
'_default': quark_colors[0]},
'spin': {
'_updater': 'set',
'_default': quark_spins[0]}}},
'electrons': {
'*': {
'orbital': {
'_updater': 'set',
'_default': electron_orbitals[0]},
'spin': {
'_default': electron_spins[0]}}}}
def next_update(self, timestep, states):
update = {}
collapse = np.random.random()
if collapse < states['radius'] * timestep:
update['radius'] = collapse
update['quarks'] = {}
for name, quark in states['quarks'].items():
update['quarks'][name] = {
'color': np.random.choice(quark_colors),
'spin': np.random.choice(quark_spins)}
update['electrons'] = {}
orbitals = electron_orbitals.copy()
for name, electron in states['electrons'].items():
np.random.shuffle(orbitals)
update['electrons'][name] = {
'orbital': orbitals.pop()}
return update
class Electron(Process):
defaults = {
'time_step': 1.0,
'spin': electron_spins[0]}
def __init__(self, parameters=None):
self.parameters = parameters or {}
self.spin = self.or_default(self.parameters, 'spin')
self.time_step = self.or_default(self.parameters, 'time_step')
def ports_schema(self):
return {
'spin': {
'_updater': 'set',
'_default': self.spin},
'proton': {
'radius': {
'_default': 0.0}}}
def next_update(self, timestep, states):
update = {}
if np.random.random() < states['proton']['radius']:
update['spin'] = np.random.choice(electron_spins)
return update
processes = {
'proton': Proton(),
'electrons': {
'a': {
'electron': Electron()},
'b': {
'electron': Electron()}}}
spin_path = ('internal', 'spin')
radius_path = ('structure', 'radius')
topology = {
'proton': {
'radius': radius_path,
'quarks': ('internal', 'quarks'),
'electrons': {
'_path': ('electrons',),
'*': {
'orbital': ('shell', 'orbital'),
'spin': spin_path}}},
'electrons': {
'a': {
'electron': {
'spin': spin_path,
'proton': {
'_path': ('..', '..'),
'radius': radius_path}}},
'b': {
'electron': {
'spin': spin_path,
'proton': {
'_path': ('..', '..'),
'radius': radius_path}}}}}
initial_state = {
'structure': {
'radius': 0.7},
'internal': {
'quarks': {
'x': {
'color': 'green',
'spin': 'up'},
'y': {
'color': 'red',
'spin': 'up'},
'z': {
'color': 'blue',
'spin': 'down'}}}}
experiment = Experiment({
'processes': processes,
'topology': topology,
'initial_state': initial_state})
log.debug(pf(experiment.state.get_config(True)))
experiment.update(10.0)
log.debug(pf(experiment.state.get_config(True)))
log.debug(pf(experiment.state.divide_value()))
def test_timescales():
class Slow(Process):
def __init__(self):
self.timestep = 3.0
self.ports = {
'state': ['base']}
def ports_schema(self):
return {
'state': {
'base': {
'_default': 1.0}}}
def local_timestep(self):
return self.timestep
def next_update(self, timestep, states):
base = states['state']['base']
next_base = timestep * base * 0.1
return {
'state': {'base': next_base}}
class Fast(Process):
def __init__(self):
self.timestep = 0.1
self.ports = {
'state': ['base', 'motion']}
def ports_schema(self):
return {
'state': {
'base': {
'_default': 1.0},
'motion': {
'_default': 0.0}}}
def local_timestep(self):
return self.timestep
def next_update(self, timestep, states):
base = states['state']['base']
motion = timestep * base * 0.001
return {
'state': {'motion': motion}}
processes = {
'slow': Slow(),
'fast': Fast()}
states = {
'state': {
'base': 1.0,
'motion': 0.0}}
topology = {
'slow': {'state': ('state',)},
'fast': {'state': ('state',)}}
emitter = {'type': 'null'}
experiment = Experiment({
'processes': processes,
'topology': topology,
'emitter': emitter,
'initial_state': states})
experiment.update(10.0)
if __name__ == '__main__':
test_topology_ports()
| true
| true
|
1c42bdc5fc14b0c0b376f93d59c871b4d33bc477
| 7,831
|
py
|
Python
|
job_app/views.py
|
dtlisir/app0508
|
35cdddfd794996365ab16dc77ed601926fa2ec64
|
[
"Apache-2.0"
] | null | null | null |
job_app/views.py
|
dtlisir/app0508
|
35cdddfd794996365ab16dc77ed601926fa2ec64
|
[
"Apache-2.0"
] | null | null | null |
job_app/views.py
|
dtlisir/app0508
|
35cdddfd794996365ab16dc77ed601926fa2ec64
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import base64
import datetime
import json
import time
from common.mymako import render_mako_context, render_json, render_mako_tostring
from blueking.component.shortcuts import get_client_by_request
from job_app.models import Script, Operation
from celery.task import task
def execute_job(request):
"""
首页
"""
result, biz_list, message = get_biz_list(request)
scripts = Script.objects.all()
return render_mako_context(request, '/job_app/execute.html', {'biz_list': biz_list, 'scripts': scripts})
def show_history(request):
"""
开发指引
"""
result, biz_list, message = get_biz_list(request)
scripts = Script.objects.all()
operators = set(Operation.objects.values_list('user', flat=True))
return render_mako_context(request, '/job_app/history.html',
{'biz_list': biz_list, 'scripts': scripts, 'operators': operators})
def get_biz_list(request):
biz_list = []
client = get_client_by_request(request)
kwargs = {
'fields': ['bk_biz_id', 'bk_biz_name']
}
resp = client.cc.search_business(**kwargs)
if resp.get('result'):
data = resp.get('data', {}).get('info', {})
for _d in data:
biz_list.append({
'name': _d.get('bk_biz_name'),
'id': _d.get('bk_biz_id'),
})
return resp.get('result'), biz_list, resp.get('message')
def get_hosts(request):
biz_id = request.GET.get("biz_id", 0)
if biz_id:
biz_id = int(biz_id)
else:
return render_json({
'result': False,
'message': "must provide biz_id to get hosts"
})
client = get_client_by_request(request)
resp = client.cc.search_host({
"page": {"start": 0, "limit": 5, "sort": "bk_host_id"},
"ip": {
"flag": "bk_host_innerip|bk_host_outerip",
"exact": 1,
"data": []
},
"condition": [
{
"bk_obj_id": "host",
"fields": [
# "bk_cloud_id",
# "bk_host_id",
# "bk_host_name",
# "bk_os_name",
# "bk_os_type",
# "bk_host_innerip",
],
"condition": []
},
# {"bk_obj_id": "module", "fields": [], "condition": []},
# {"bk_obj_id": "set", "fields": [], "condition": []},
{
"bk_obj_id": "biz",
"fields": [
"default",
"bk_biz_id",
"bk_biz_name",
],
"condition": [
{
"field": "bk_biz_id",
"operator": "$eq",
"value": biz_id
}
]
}
]
})
hosts = [{
"ip": host['host']['bk_host_innerip'],
"os": host['host']['bk_os_name'],
"bk_cloud_id": host['host']['bk_cloud_id'][0]["id"],
} for host in resp['data']['info']]
table_data = render_mako_tostring('/job_app/execute_tbody.html', {
'hosts': hosts,
})
return render_json({
'result': True,
'data': table_data,
'message': "success"
})
def execute(request):
"""执行任务"""
biz_id = request.POST.get("biz_id")
script_type = request.POST.get("script_type")
script_param = request.POST.get("script_param", "")
ips = request.POST.getlist("ips[]")
if biz_id:
biz_id = int(biz_id)
if script_type:
script_type = int(script_type)
try:
script_content = Script.objects.get(id=script_type).script
except Script.DoesNotExist:
return render_json({"result": False, "message": "script not exist!"})
client = get_client_by_request(request)
execute_task = run_script.delay(client, biz_id, script_content, script_param, ips)
opt = Operation.objects.create(
biz=biz_id,
script=Script.objects.get(id=script_type),
machine_numbers=len(ips),
celery_id=execute_task.id,
argument=script_param,
user=request.user.username
)
return render_json({"result": True, "data": opt.celery_id, "message": "success"})
@task
def run_script(client, biz_id, script_content, script_param, ips):
"""快速执行脚本"""
# 执行中
Operation.objects.filter(celery_id=run_script.request.id).update(
status="running"
)
resp = client.job.fast_execute_script(
bk_biz_id=biz_id,
account="root",
script_param=base64.b64encode(script_param),
script_content=base64.b64encode(script_content),
ip_list=[{"bk_cloud_id": 0, "ip": ip} for ip in ips]
)
# 启动失败
if not resp.get('result', False):
Operation.objects.filter(celery_id=run_script.request.id).update(
log=json.dumps([resp.get("message")]),
end_time=datetime.datetime.now(),
result=False,
status="start_failed"
)
task_id = resp.get('data').get('job_instance_id')
poll_job_task(client, biz_id, task_id)
# 查询日志
resp = client.job.get_job_instance_log(job_instance_id=task_id, bk_biz_id=biz_id)
ip_logs = resp['data'][0]['step_results'][0]['ip_logs']
status = resp['data'][0]['status']
result = True if status == 3 else False
Operation.objects.filter(celery_id=run_script.request.id).update(
log=json.dumps(ip_logs),
end_time=datetime.datetime.now(),
result=result,
status="successed" if result else "failed"
)
def poll_job_task(client, biz_id, job_instance_id):
"""true/false/timeout"""
count = 0
res = client.job.get_job_instance_status(job_instance_id=job_instance_id, bk_biz_id=biz_id)
while res.get('data', {}).get('is_finished') is False and count < 30:
res = client.job.get_job_instance_status(job_instance_id=job_instance_id, bk_biz_id=biz_id)
count += 1
time.sleep(3)
return res
def get_operations(request):
"""
Ajax加载操作记录
"""
biz = request.GET.get('biz')
script = request.GET.get('script')
operator = request.GET.get('operator')
time_range = request.GET.get('timerange')
operations = Operation.objects.all()
if biz and biz != 'all':
operations = operations.filter(biz=int(biz))
if script and script != 'all':
operations = operations.filter(script_id=int(script))
if operator and operator != 'all':
operations = operations.filter(user=operator)
if time_range:
start_time, end_time = time_range.split('~')
start_time = datetime.datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
end_time = datetime.datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S')
operations = operations.filter(start_time__range=(start_time, end_time))
data = [opt.to_dict() for opt in operations]
return render_json({
'result': True,
'data': data,
'message': "success"
})
def get_log(request, operation_id):
"""查询日志"""
operation = Operation.objects.get(id=operation_id)
try:
logs = json.loads(operation.log)
except TypeError as e:
logs = []
log_content = '<div class="log-content">'
for log_item in logs:
job_log_content = log_item.get('log_content')
log_content += '<div class="ip-start"><prev>IP: {}</prev></div>'.format(log_item.get('ip', ''))
log_content += ''.join(
map(lambda x: '<prev>{}</prev><br>'.format(x), job_log_content.split('\n'))
)
log_content += '<div class="ip-end"></div>'
log_content += '</div>'
return render_json({
'result': True,
'data': log_content
})
| 29.662879
| 108
| 0.573107
|
import base64
import datetime
import json
import time
from common.mymako import render_mako_context, render_json, render_mako_tostring
from blueking.component.shortcuts import get_client_by_request
from job_app.models import Script, Operation
from celery.task import task
def execute_job(request):
result, biz_list, message = get_biz_list(request)
scripts = Script.objects.all()
return render_mako_context(request, '/job_app/execute.html', {'biz_list': biz_list, 'scripts': scripts})
def show_history(request):
result, biz_list, message = get_biz_list(request)
scripts = Script.objects.all()
operators = set(Operation.objects.values_list('user', flat=True))
return render_mako_context(request, '/job_app/history.html',
{'biz_list': biz_list, 'scripts': scripts, 'operators': operators})
def get_biz_list(request):
biz_list = []
client = get_client_by_request(request)
kwargs = {
'fields': ['bk_biz_id', 'bk_biz_name']
}
resp = client.cc.search_business(**kwargs)
if resp.get('result'):
data = resp.get('data', {}).get('info', {})
for _d in data:
biz_list.append({
'name': _d.get('bk_biz_name'),
'id': _d.get('bk_biz_id'),
})
return resp.get('result'), biz_list, resp.get('message')
def get_hosts(request):
biz_id = request.GET.get("biz_id", 0)
if biz_id:
biz_id = int(biz_id)
else:
return render_json({
'result': False,
'message': "must provide biz_id to get hosts"
})
client = get_client_by_request(request)
resp = client.cc.search_host({
"page": {"start": 0, "limit": 5, "sort": "bk_host_id"},
"ip": {
"flag": "bk_host_innerip|bk_host_outerip",
"exact": 1,
"data": []
},
"condition": [
{
"bk_obj_id": "host",
"fields": [
],
"condition": []
},
{
"bk_obj_id": "biz",
"fields": [
"default",
"bk_biz_id",
"bk_biz_name",
],
"condition": [
{
"field": "bk_biz_id",
"operator": "$eq",
"value": biz_id
}
]
}
]
})
hosts = [{
"ip": host['host']['bk_host_innerip'],
"os": host['host']['bk_os_name'],
"bk_cloud_id": host['host']['bk_cloud_id'][0]["id"],
} for host in resp['data']['info']]
table_data = render_mako_tostring('/job_app/execute_tbody.html', {
'hosts': hosts,
})
return render_json({
'result': True,
'data': table_data,
'message': "success"
})
def execute(request):
biz_id = request.POST.get("biz_id")
script_type = request.POST.get("script_type")
script_param = request.POST.get("script_param", "")
ips = request.POST.getlist("ips[]")
if biz_id:
biz_id = int(biz_id)
if script_type:
script_type = int(script_type)
try:
script_content = Script.objects.get(id=script_type).script
except Script.DoesNotExist:
return render_json({"result": False, "message": "script not exist!"})
client = get_client_by_request(request)
execute_task = run_script.delay(client, biz_id, script_content, script_param, ips)
opt = Operation.objects.create(
biz=biz_id,
script=Script.objects.get(id=script_type),
machine_numbers=len(ips),
celery_id=execute_task.id,
argument=script_param,
user=request.user.username
)
return render_json({"result": True, "data": opt.celery_id, "message": "success"})
@task
def run_script(client, biz_id, script_content, script_param, ips):
Operation.objects.filter(celery_id=run_script.request.id).update(
status="running"
)
resp = client.job.fast_execute_script(
bk_biz_id=biz_id,
account="root",
script_param=base64.b64encode(script_param),
script_content=base64.b64encode(script_content),
ip_list=[{"bk_cloud_id": 0, "ip": ip} for ip in ips]
)
if not resp.get('result', False):
Operation.objects.filter(celery_id=run_script.request.id).update(
log=json.dumps([resp.get("message")]),
end_time=datetime.datetime.now(),
result=False,
status="start_failed"
)
task_id = resp.get('data').get('job_instance_id')
poll_job_task(client, biz_id, task_id)
resp = client.job.get_job_instance_log(job_instance_id=task_id, bk_biz_id=biz_id)
ip_logs = resp['data'][0]['step_results'][0]['ip_logs']
status = resp['data'][0]['status']
result = True if status == 3 else False
Operation.objects.filter(celery_id=run_script.request.id).update(
log=json.dumps(ip_logs),
end_time=datetime.datetime.now(),
result=result,
status="successed" if result else "failed"
)
def poll_job_task(client, biz_id, job_instance_id):
count = 0
res = client.job.get_job_instance_status(job_instance_id=job_instance_id, bk_biz_id=biz_id)
while res.get('data', {}).get('is_finished') is False and count < 30:
res = client.job.get_job_instance_status(job_instance_id=job_instance_id, bk_biz_id=biz_id)
count += 1
time.sleep(3)
return res
def get_operations(request):
biz = request.GET.get('biz')
script = request.GET.get('script')
operator = request.GET.get('operator')
time_range = request.GET.get('timerange')
operations = Operation.objects.all()
if biz and biz != 'all':
operations = operations.filter(biz=int(biz))
if script and script != 'all':
operations = operations.filter(script_id=int(script))
if operator and operator != 'all':
operations = operations.filter(user=operator)
if time_range:
start_time, end_time = time_range.split('~')
start_time = datetime.datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
end_time = datetime.datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S')
operations = operations.filter(start_time__range=(start_time, end_time))
data = [opt.to_dict() for opt in operations]
return render_json({
'result': True,
'data': data,
'message': "success"
})
def get_log(request, operation_id):
operation = Operation.objects.get(id=operation_id)
try:
logs = json.loads(operation.log)
except TypeError as e:
logs = []
log_content = '<div class="log-content">'
for log_item in logs:
job_log_content = log_item.get('log_content')
log_content += '<div class="ip-start"><prev>IP: {}</prev></div>'.format(log_item.get('ip', ''))
log_content += ''.join(
map(lambda x: '<prev>{}</prev><br>'.format(x), job_log_content.split('\n'))
)
log_content += '<div class="ip-end"></div>'
log_content += '</div>'
return render_json({
'result': True,
'data': log_content
})
| true
| true
|
1c42be6a0446f6886d76b5202b31d923edd72148
| 420
|
py
|
Python
|
app/api/__init__.py
|
danofsatx/resources_api
|
664532ee3ab4cb7c5000166d84ba371cf8b7713a
|
[
"MIT"
] | null | null | null |
app/api/__init__.py
|
danofsatx/resources_api
|
664532ee3ab4cb7c5000166d84ba371cf8b7713a
|
[
"MIT"
] | null | null | null |
app/api/__init__.py
|
danofsatx/resources_api
|
664532ee3ab4cb7c5000166d84ba371cf8b7713a
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
from flask_cors import CORS
bp = Blueprint('api', __name__)
ALLOWED_ORIGINS = [
'http://localhost:3000',
r"https:\/\/(www\.)?operationcode\.org",
r"https:\/\/(.*\.)?operation-code(-.*)?\.now\.sh"
]
CORS(bp, origins=ALLOWED_ORIGINS)
# We need to import the routes here so they will
# bind to the blueprint before the blueprint is registered.
from app.api import routes # noqa
| 24.705882
| 59
| 0.690476
|
from flask import Blueprint
from flask_cors import CORS
bp = Blueprint('api', __name__)
ALLOWED_ORIGINS = [
'http://localhost:3000',
r"https:\/\/(www\.)?operationcode\.org",
r"https:\/\/(.*\.)?operation-code(-.*)?\.now\.sh"
]
CORS(bp, origins=ALLOWED_ORIGINS)
from app.api import routes
| true
| true
|
1c42be90e6576d03ab878dec3b493cd6cd0318d8
| 1,115
|
py
|
Python
|
homework04-python-scientific-ecosystem/exercise 4 code.py
|
Bokubst/homework-scientific-computing
|
4a7e1f896ad19ed55260a584bee2d6d6521de78b
|
[
"MIT"
] | 1
|
2020-03-26T11:53:59.000Z
|
2020-03-26T11:53:59.000Z
|
homework04-python-scientific-ecosystem/exercise 4 code.py
|
Bokubst/homework-scientific-computing
|
4a7e1f896ad19ed55260a584bee2d6d6521de78b
|
[
"MIT"
] | null | null | null |
homework04-python-scientific-ecosystem/exercise 4 code.py
|
Bokubst/homework-scientific-computing
|
4a7e1f896ad19ed55260a584bee2d6d6521de78b
|
[
"MIT"
] | null | null | null |
import numpy as np
random1 = np.random.uniform(size=100)
print("random1 values")
print(random1 , "\n")
random2 = np.random.uniform(size=100)
print("random2 values")
print(random2 , "\n")
from matplotlib import pyplot as plt
plt.plot(random1, label = 'random number 1')
plt.title('random number 1')
ran_mean1 = [np.mean(random1) for i in random1]
plt.plot(ran_mean1, label = 'mean value')
plt.show()
plt.plot(random2, label = "random number 2")
plt.title("random number 2")
ran_mean2 = [np.mean(random2) for i in random2]
plt.plot(ran_mean2, label = 'mean value')
plt.show()
random3 = (np.add(random1, random2))/2
print("random3 values")
print(random3)
random3means = np.mean(random3)
plt.plot(random3, label = "random number 3")
plt.title("random number 3")
ran_mean3 = [np.mean(random3) for i in random3]
plt.plot(ran_mean3, label = 'mean value')
plt.show()
random1means = np.mean(random1)
print("mean of random1")
print(random1means , "\n")
random2means = np.mean(random2)
print("mean of random2")
print(random2means , "\n")
random3means = np.mean(random3)
print("mean of random3")
print(random3means)
| 24.23913
| 47
| 0.721973
|
import numpy as np
random1 = np.random.uniform(size=100)
print("random1 values")
print(random1 , "\n")
random2 = np.random.uniform(size=100)
print("random2 values")
print(random2 , "\n")
from matplotlib import pyplot as plt
plt.plot(random1, label = 'random number 1')
plt.title('random number 1')
ran_mean1 = [np.mean(random1) for i in random1]
plt.plot(ran_mean1, label = 'mean value')
plt.show()
plt.plot(random2, label = "random number 2")
plt.title("random number 2")
ran_mean2 = [np.mean(random2) for i in random2]
plt.plot(ran_mean2, label = 'mean value')
plt.show()
random3 = (np.add(random1, random2))/2
print("random3 values")
print(random3)
random3means = np.mean(random3)
plt.plot(random3, label = "random number 3")
plt.title("random number 3")
ran_mean3 = [np.mean(random3) for i in random3]
plt.plot(ran_mean3, label = 'mean value')
plt.show()
random1means = np.mean(random1)
print("mean of random1")
print(random1means , "\n")
random2means = np.mean(random2)
print("mean of random2")
print(random2means , "\n")
random3means = np.mean(random3)
print("mean of random3")
print(random3means)
| true
| true
|
1c42bf6645c9e6385037478dd4d3fa6a87397b59
| 2,888
|
py
|
Python
|
tests/test_simple.py
|
resendislab/corda
|
15f4a8e1a046c6191f22e46099dad10aafb1fdce
|
[
"MIT"
] | 9
|
2017-08-21T09:44:19.000Z
|
2021-09-22T12:18:06.000Z
|
tests/test_simple.py
|
resendislab/corda
|
15f4a8e1a046c6191f22e46099dad10aafb1fdce
|
[
"MIT"
] | 9
|
2017-08-23T15:50:39.000Z
|
2021-08-10T17:10:51.000Z
|
tests/test_simple.py
|
resendislab/corda
|
15f4a8e1a046c6191f22e46099dad10aafb1fdce
|
[
"MIT"
] | 7
|
2017-09-12T12:50:10.000Z
|
2021-02-22T18:42:15.000Z
|
# tests.py
#
# Copyright 2016 Christian Diener <mail[at]cdiener.com>
#
# MIT license. See LICENSE for more information.
import pytest
from corda import CORDA
from cobra import Model, Reaction, Metabolite
@pytest.fixture
def model():
A = Metabolite("A")
B = Metabolite("B")
C = Metabolite("C")
r1 = Reaction("r1")
r1.add_metabolites({A: -1, C: 1})
r2 = Reaction("r2")
r2.add_metabolites({B: -1, C: 1})
r3 = Reaction("EX_A")
r3.add_metabolites({A: 1})
r4 = Reaction("EX_B")
r4.add_metabolites({B: 1})
r5 = Reaction("EX_C")
r5.add_metabolites({C: -1})
mod = Model("test model")
mod.add_reactions([r1, r2, r3, r4, r5])
conf = {"r1": 1, "r2": -1, "EX_A": 1, "EX_B": 1, "EX_C": 1}
return (mod, conf)
class TestCORDAsimple:
def test_mock_add(self, model):
mod, conf = model
opt = CORDA(mod, conf, met_prod={"C": -1})
r = opt.model.reactions.get_by_id("EX_CORDA_0")
assert "mock" in r.notes
opt = CORDA(mod, conf, met_prod="C ->")
r = opt.model.reactions.get_by_id("EX_CORDA_0")
assert "mock" in r.notes
with pytest.raises(TypeError):
CORDA(mod, conf, met_prod=[["C"]])
opt.build()
mod = opt.cobra_model()
assert all(mr not in mod.reactions for mr in opt.mocks)
def test_conf_check(self, model):
mod, conf = model
co = conf.copy()
del co["EX_A"]
with pytest.raises(ValueError):
CORDA(mod, co)
def test_valid_conf(self, model):
mod, conf = model
co = conf.copy()
co["EX_A"] = 4
with pytest.raises(ValueError):
CORDA(mod, co)
def test_performance_metrics(self, model):
opt = CORDA(model[0], model[1])
assert "not built" in str(opt)
def test_impossible_req(self, model):
mod, conf = model
D = Metabolite("D")
mod.add_metabolites([D])
opt = CORDA(mod, conf, met_prod=["D"])
need = opt.associated(["EX_CORDA_0"])
assert len(need) == 0
assert "EX_CORDA_0" in opt.impossible
def test_association_works(self, model):
mod, conf = model
opt = CORDA(mod, conf, met_prod="C ->")
need = opt.associated(["EX_CORDA_0"])
solutions = (["EX_A", "r1"], ["EX_B", "r2"])
assert list(need) in solutions
def test_redundancy_works(self, model):
mod, conf = model
conf["r2"] = 2
opt = CORDA(mod, conf, met_prod="C ->")
need = opt.associated(["EX_CORDA_0"], conf)
assert len(need) == 4
assert opt.redundancies["EX_CORDA_0"] == 2
opt = CORDA(mod, conf, met_prod="C ->", n=1)
need = opt.associated(["EX_CORDA_0"], conf)
assert len(need) == 2
assert opt.redundancies["EX_CORDA_0"] == 1
if __name__ == '__main__':
pytest.main()
| 29.171717
| 63
| 0.572368
|
import pytest
from corda import CORDA
from cobra import Model, Reaction, Metabolite
@pytest.fixture
def model():
A = Metabolite("A")
B = Metabolite("B")
C = Metabolite("C")
r1 = Reaction("r1")
r1.add_metabolites({A: -1, C: 1})
r2 = Reaction("r2")
r2.add_metabolites({B: -1, C: 1})
r3 = Reaction("EX_A")
r3.add_metabolites({A: 1})
r4 = Reaction("EX_B")
r4.add_metabolites({B: 1})
r5 = Reaction("EX_C")
r5.add_metabolites({C: -1})
mod = Model("test model")
mod.add_reactions([r1, r2, r3, r4, r5])
conf = {"r1": 1, "r2": -1, "EX_A": 1, "EX_B": 1, "EX_C": 1}
return (mod, conf)
class TestCORDAsimple:
def test_mock_add(self, model):
mod, conf = model
opt = CORDA(mod, conf, met_prod={"C": -1})
r = opt.model.reactions.get_by_id("EX_CORDA_0")
assert "mock" in r.notes
opt = CORDA(mod, conf, met_prod="C ->")
r = opt.model.reactions.get_by_id("EX_CORDA_0")
assert "mock" in r.notes
with pytest.raises(TypeError):
CORDA(mod, conf, met_prod=[["C"]])
opt.build()
mod = opt.cobra_model()
assert all(mr not in mod.reactions for mr in opt.mocks)
def test_conf_check(self, model):
mod, conf = model
co = conf.copy()
del co["EX_A"]
with pytest.raises(ValueError):
CORDA(mod, co)
def test_valid_conf(self, model):
mod, conf = model
co = conf.copy()
co["EX_A"] = 4
with pytest.raises(ValueError):
CORDA(mod, co)
def test_performance_metrics(self, model):
opt = CORDA(model[0], model[1])
assert "not built" in str(opt)
def test_impossible_req(self, model):
mod, conf = model
D = Metabolite("D")
mod.add_metabolites([D])
opt = CORDA(mod, conf, met_prod=["D"])
need = opt.associated(["EX_CORDA_0"])
assert len(need) == 0
assert "EX_CORDA_0" in opt.impossible
def test_association_works(self, model):
mod, conf = model
opt = CORDA(mod, conf, met_prod="C ->")
need = opt.associated(["EX_CORDA_0"])
solutions = (["EX_A", "r1"], ["EX_B", "r2"])
assert list(need) in solutions
def test_redundancy_works(self, model):
mod, conf = model
conf["r2"] = 2
opt = CORDA(mod, conf, met_prod="C ->")
need = opt.associated(["EX_CORDA_0"], conf)
assert len(need) == 4
assert opt.redundancies["EX_CORDA_0"] == 2
opt = CORDA(mod, conf, met_prod="C ->", n=1)
need = opt.associated(["EX_CORDA_0"], conf)
assert len(need) == 2
assert opt.redundancies["EX_CORDA_0"] == 1
if __name__ == '__main__':
pytest.main()
| true
| true
|
1c42c00bdb42ef4e18df5e1c266ee6dd86e9088b
| 58,194
|
py
|
Python
|
tests/test_redshift/test_redshift.py
|
irahulranjan/moto
|
e7fdb633adc75b0e0dec9e5bc04daed697582802
|
[
"Apache-2.0"
] | null | null | null |
tests/test_redshift/test_redshift.py
|
irahulranjan/moto
|
e7fdb633adc75b0e0dec9e5bc04daed697582802
|
[
"Apache-2.0"
] | null | null | null |
tests/test_redshift/test_redshift.py
|
irahulranjan/moto
|
e7fdb633adc75b0e0dec9e5bc04daed697582802
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
import time
import datetime
import boto
import boto3
from boto.redshift.exceptions import (
ClusterNotFound,
ClusterParameterGroupNotFound,
ClusterSecurityGroupNotFound,
ClusterSubnetGroupNotFound,
InvalidSubnet,
)
from botocore.exceptions import ClientError
import pytest
import sure # noqa
from moto import mock_ec2
from moto import mock_ec2_deprecated
from moto import mock_redshift
from moto import mock_redshift_deprecated
from moto.core import ACCOUNT_ID
@mock_redshift
def test_create_cluster_boto3():
client = boto3.client("redshift", region_name="us-east-1")
response = client.create_cluster(
DBName="test",
ClusterIdentifier="test",
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="user",
MasterUserPassword="password",
)
response["Cluster"]["NodeType"].should.equal("ds2.xlarge")
create_time = response["Cluster"]["ClusterCreateTime"]
create_time.should.be.lower_than(datetime.datetime.now(create_time.tzinfo))
create_time.should.be.greater_than(
datetime.datetime.now(create_time.tzinfo) - datetime.timedelta(minutes=1)
)
response["Cluster"]["EnhancedVpcRouting"].should.equal(False)
@mock_redshift
def test_create_cluster_with_enhanced_vpc_routing_enabled():
client = boto3.client("redshift", region_name="us-east-1")
response = client.create_cluster(
DBName="test",
ClusterIdentifier="test",
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="user",
MasterUserPassword="password",
EnhancedVpcRouting=True,
)
response["Cluster"]["NodeType"].should.equal("ds2.xlarge")
create_time = response["Cluster"]["ClusterCreateTime"]
create_time.should.be.lower_than(datetime.datetime.now(create_time.tzinfo))
create_time.should.be.greater_than(
datetime.datetime.now(create_time.tzinfo) - datetime.timedelta(minutes=1)
)
response["Cluster"]["EnhancedVpcRouting"].should.equal(True)
@mock_redshift
def test_create_snapshot_copy_grant():
client = boto3.client("redshift", region_name="us-east-1")
grants = client.create_snapshot_copy_grant(
SnapshotCopyGrantName="test-us-east-1", KmsKeyId="fake"
)
grants["SnapshotCopyGrant"]["SnapshotCopyGrantName"].should.equal("test-us-east-1")
grants["SnapshotCopyGrant"]["KmsKeyId"].should.equal("fake")
client.delete_snapshot_copy_grant(SnapshotCopyGrantName="test-us-east-1")
client.describe_snapshot_copy_grants.when.called_with(
SnapshotCopyGrantName="test-us-east-1"
).should.throw(ClientError)
@mock_redshift
def test_create_many_snapshot_copy_grants():
client = boto3.client("redshift", region_name="us-east-1")
for i in range(10):
client.create_snapshot_copy_grant(
SnapshotCopyGrantName="test-us-east-1-{0}".format(i), KmsKeyId="fake"
)
response = client.describe_snapshot_copy_grants()
len(response["SnapshotCopyGrants"]).should.equal(10)
@mock_redshift
def test_no_snapshot_copy_grants():
client = boto3.client("redshift", region_name="us-east-1")
response = client.describe_snapshot_copy_grants()
len(response["SnapshotCopyGrants"]).should.equal(0)
@mock_redshift_deprecated
def test_create_cluster():
conn = boto.redshift.connect_to_region("us-east-1")
cluster_identifier = "my_cluster"
cluster_response = conn.create_cluster(
cluster_identifier,
node_type="dw.hs1.xlarge",
master_username="username",
master_user_password="password",
db_name="my_db",
cluster_type="multi-node",
availability_zone="us-east-1d",
preferred_maintenance_window="Mon:03:00-Mon:11:00",
automated_snapshot_retention_period=10,
port=1234,
cluster_version="1.0",
allow_version_upgrade=True,
number_of_nodes=3,
)
cluster_response["CreateClusterResponse"]["CreateClusterResult"]["Cluster"][
"ClusterStatus"
].should.equal("creating")
cluster_response = conn.describe_clusters(cluster_identifier)
cluster = cluster_response["DescribeClustersResponse"]["DescribeClustersResult"][
"Clusters"
][0]
cluster["ClusterIdentifier"].should.equal(cluster_identifier)
cluster["NodeType"].should.equal("dw.hs1.xlarge")
cluster["MasterUsername"].should.equal("username")
cluster["DBName"].should.equal("my_db")
cluster["ClusterSecurityGroups"][0]["ClusterSecurityGroupName"].should.equal(
"Default"
)
cluster["VpcSecurityGroups"].should.equal([])
cluster["ClusterSubnetGroupName"].should.equal(None)
cluster["AvailabilityZone"].should.equal("us-east-1d")
cluster["PreferredMaintenanceWindow"].should.equal("Mon:03:00-Mon:11:00")
cluster["ClusterParameterGroups"][0]["ParameterGroupName"].should.equal(
"default.redshift-1.0"
)
cluster["AutomatedSnapshotRetentionPeriod"].should.equal(10)
cluster["Port"].should.equal(1234)
cluster["ClusterVersion"].should.equal("1.0")
cluster["AllowVersionUpgrade"].should.equal(True)
cluster["NumberOfNodes"].should.equal(3)
@mock_redshift_deprecated
def test_create_single_node_cluster():
conn = boto.redshift.connect_to_region("us-east-1")
cluster_identifier = "my_cluster"
conn.create_cluster(
cluster_identifier,
node_type="dw.hs1.xlarge",
master_username="username",
master_user_password="password",
db_name="my_db",
cluster_type="single-node",
)
cluster_response = conn.describe_clusters(cluster_identifier)
cluster = cluster_response["DescribeClustersResponse"]["DescribeClustersResult"][
"Clusters"
][0]
cluster["ClusterIdentifier"].should.equal(cluster_identifier)
cluster["NodeType"].should.equal("dw.hs1.xlarge")
cluster["MasterUsername"].should.equal("username")
cluster["DBName"].should.equal("my_db")
cluster["NumberOfNodes"].should.equal(1)
@mock_redshift_deprecated
def test_default_cluster_attributes():
conn = boto.redshift.connect_to_region("us-east-1")
cluster_identifier = "my_cluster"
conn.create_cluster(
cluster_identifier,
node_type="dw.hs1.xlarge",
master_username="username",
master_user_password="password",
)
cluster_response = conn.describe_clusters(cluster_identifier)
cluster = cluster_response["DescribeClustersResponse"]["DescribeClustersResult"][
"Clusters"
][0]
cluster["DBName"].should.equal("dev")
cluster["ClusterSubnetGroupName"].should.equal(None)
assert "us-east-" in cluster["AvailabilityZone"]
cluster["PreferredMaintenanceWindow"].should.equal("Mon:03:00-Mon:03:30")
cluster["ClusterParameterGroups"][0]["ParameterGroupName"].should.equal(
"default.redshift-1.0"
)
cluster["AutomatedSnapshotRetentionPeriod"].should.equal(1)
cluster["Port"].should.equal(5439)
cluster["ClusterVersion"].should.equal("1.0")
cluster["AllowVersionUpgrade"].should.equal(True)
cluster["NumberOfNodes"].should.equal(1)
@mock_redshift
@mock_ec2
def test_create_cluster_in_subnet_group():
ec2 = boto3.resource("ec2", region_name="us-east-1")
vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16")
subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24")
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster_subnet_group(
ClusterSubnetGroupName="my_subnet_group",
Description="This is my subnet group",
SubnetIds=[subnet.id],
)
client.create_cluster(
ClusterIdentifier="my_cluster",
NodeType="dw.hs1.xlarge",
MasterUsername="username",
MasterUserPassword="password",
ClusterSubnetGroupName="my_subnet_group",
)
cluster_response = client.describe_clusters(ClusterIdentifier="my_cluster")
cluster = cluster_response["Clusters"][0]
cluster["ClusterSubnetGroupName"].should.equal("my_subnet_group")
@mock_redshift
@mock_ec2
def test_create_cluster_in_subnet_group_boto3():
ec2 = boto3.resource("ec2", region_name="us-east-1")
vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16")
subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24")
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster_subnet_group(
ClusterSubnetGroupName="my_subnet_group",
Description="This is my subnet group",
SubnetIds=[subnet.id],
)
client.create_cluster(
ClusterIdentifier="my_cluster",
NodeType="dw.hs1.xlarge",
MasterUsername="username",
MasterUserPassword="password",
ClusterSubnetGroupName="my_subnet_group",
)
cluster_response = client.describe_clusters(ClusterIdentifier="my_cluster")
cluster = cluster_response["Clusters"][0]
cluster["ClusterSubnetGroupName"].should.equal("my_subnet_group")
@mock_redshift_deprecated
def test_create_cluster_with_security_group():
conn = boto.redshift.connect_to_region("us-east-1")
conn.create_cluster_security_group("security_group1", "This is my security group")
conn.create_cluster_security_group("security_group2", "This is my security group")
cluster_identifier = "my_cluster"
conn.create_cluster(
cluster_identifier,
node_type="dw.hs1.xlarge",
master_username="username",
master_user_password="password",
cluster_security_groups=["security_group1", "security_group2"],
)
cluster_response = conn.describe_clusters(cluster_identifier)
cluster = cluster_response["DescribeClustersResponse"]["DescribeClustersResult"][
"Clusters"
][0]
group_names = [
group["ClusterSecurityGroupName"] for group in cluster["ClusterSecurityGroups"]
]
set(group_names).should.equal(set(["security_group1", "security_group2"]))
@mock_redshift
def test_create_cluster_with_security_group_boto3():
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster_security_group(
ClusterSecurityGroupName="security_group1",
Description="This is my security group",
)
client.create_cluster_security_group(
ClusterSecurityGroupName="security_group2",
Description="This is my security group",
)
cluster_identifier = "my_cluster"
client.create_cluster(
ClusterIdentifier=cluster_identifier,
NodeType="dw.hs1.xlarge",
MasterUsername="username",
MasterUserPassword="password",
ClusterSecurityGroups=["security_group1", "security_group2"],
)
response = client.describe_clusters(ClusterIdentifier=cluster_identifier)
cluster = response["Clusters"][0]
group_names = [
group["ClusterSecurityGroupName"] for group in cluster["ClusterSecurityGroups"]
]
set(group_names).should.equal({"security_group1", "security_group2"})
@mock_redshift_deprecated
@mock_ec2_deprecated
def test_create_cluster_with_vpc_security_groups():
vpc_conn = boto.connect_vpc()
ec2_conn = boto.connect_ec2()
redshift_conn = boto.connect_redshift()
vpc = vpc_conn.create_vpc("10.0.0.0/16")
security_group = ec2_conn.create_security_group(
"vpc_security_group", "a group", vpc_id=vpc.id
)
redshift_conn.create_cluster(
"my_cluster",
node_type="dw.hs1.xlarge",
master_username="username",
master_user_password="password",
vpc_security_group_ids=[security_group.id],
)
cluster_response = redshift_conn.describe_clusters("my_cluster")
cluster = cluster_response["DescribeClustersResponse"]["DescribeClustersResult"][
"Clusters"
][0]
group_ids = [group["VpcSecurityGroupId"] for group in cluster["VpcSecurityGroups"]]
list(group_ids).should.equal([security_group.id])
@mock_redshift
@mock_ec2
def test_create_cluster_with_vpc_security_groups_boto3():
ec2 = boto3.resource("ec2", region_name="us-east-1")
vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16")
client = boto3.client("redshift", region_name="us-east-1")
cluster_id = "my_cluster"
security_group = ec2.create_security_group(
Description="vpc_security_group", GroupName="a group", VpcId=vpc.id
)
client.create_cluster(
ClusterIdentifier=cluster_id,
NodeType="dw.hs1.xlarge",
MasterUsername="username",
MasterUserPassword="password",
VpcSecurityGroupIds=[security_group.id],
)
response = client.describe_clusters(ClusterIdentifier=cluster_id)
cluster = response["Clusters"][0]
group_ids = [group["VpcSecurityGroupId"] for group in cluster["VpcSecurityGroups"]]
list(group_ids).should.equal([security_group.id])
@mock_redshift
def test_create_cluster_with_iam_roles():
iam_roles_arn = ["arn:aws:iam:::role/my-iam-role"]
client = boto3.client("redshift", region_name="us-east-1")
cluster_id = "my_cluster"
client.create_cluster(
ClusterIdentifier=cluster_id,
NodeType="dw.hs1.xlarge",
MasterUsername="username",
MasterUserPassword="password",
IamRoles=iam_roles_arn,
)
response = client.describe_clusters(ClusterIdentifier=cluster_id)
cluster = response["Clusters"][0]
iam_roles = [role["IamRoleArn"] for role in cluster["IamRoles"]]
iam_roles_arn.should.equal(iam_roles)
@mock_redshift_deprecated
def test_create_cluster_with_parameter_group():
conn = boto.connect_redshift()
conn.create_cluster_parameter_group(
"my_parameter_group", "redshift-1.0", "This is my parameter group"
)
conn.create_cluster(
"my_cluster",
node_type="dw.hs1.xlarge",
master_username="username",
master_user_password="password",
cluster_parameter_group_name="my_parameter_group",
)
cluster_response = conn.describe_clusters("my_cluster")
cluster = cluster_response["DescribeClustersResponse"]["DescribeClustersResult"][
"Clusters"
][0]
cluster["ClusterParameterGroups"][0]["ParameterGroupName"].should.equal(
"my_parameter_group"
)
@mock_redshift_deprecated
def test_describe_non_existent_cluster():
conn = boto.redshift.connect_to_region("us-east-1")
conn.describe_clusters.when.called_with("not-a-cluster").should.throw(
ClusterNotFound
)
@mock_redshift_deprecated
def test_delete_cluster():
conn = boto.connect_redshift()
cluster_identifier = "my_cluster"
snapshot_identifier = "my_snapshot"
conn.create_cluster(
cluster_identifier,
node_type="single-node",
master_username="username",
master_user_password="password",
)
conn.delete_cluster.when.called_with(cluster_identifier, False).should.throw(
boto.exception.JSONResponseError
)
clusters = conn.describe_clusters()["DescribeClustersResponse"][
"DescribeClustersResult"
]["Clusters"]
list(clusters).should.have.length_of(1)
conn.delete_cluster(
cluster_identifier=cluster_identifier,
skip_final_cluster_snapshot=False,
final_cluster_snapshot_identifier=snapshot_identifier,
)
clusters = conn.describe_clusters()["DescribeClustersResponse"][
"DescribeClustersResult"
]["Clusters"]
list(clusters).should.have.length_of(0)
snapshots = conn.describe_cluster_snapshots()["DescribeClusterSnapshotsResponse"][
"DescribeClusterSnapshotsResult"
]["Snapshots"]
list(snapshots).should.have.length_of(1)
assert snapshot_identifier in snapshots[0]["SnapshotIdentifier"]
# Delete invalid id
conn.delete_cluster.when.called_with("not-a-cluster").should.throw(ClusterNotFound)
@mock_redshift
def test_modify_cluster_vpc_routing():
iam_roles_arn = ["arn:aws:iam:::role/my-iam-role"]
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "my_cluster"
client.create_cluster(
ClusterIdentifier=cluster_identifier,
NodeType="single-node",
MasterUsername="username",
MasterUserPassword="password",
IamRoles=iam_roles_arn,
)
cluster_response = client.describe_clusters(ClusterIdentifier=cluster_identifier)
cluster = cluster_response["Clusters"][0]
cluster["EnhancedVpcRouting"].should.equal(False)
client.create_cluster_security_group(
ClusterSecurityGroupName="security_group", Description="security_group"
)
client.create_cluster_parameter_group(
ParameterGroupName="my_parameter_group",
ParameterGroupFamily="redshift-1.0",
Description="my_parameter_group",
)
client.modify_cluster(
ClusterIdentifier=cluster_identifier,
ClusterType="multi-node",
NodeType="ds2.8xlarge",
NumberOfNodes=3,
ClusterSecurityGroups=["security_group"],
MasterUserPassword="new_password",
ClusterParameterGroupName="my_parameter_group",
AutomatedSnapshotRetentionPeriod=7,
PreferredMaintenanceWindow="Tue:03:00-Tue:11:00",
AllowVersionUpgrade=False,
NewClusterIdentifier=cluster_identifier,
EnhancedVpcRouting=True,
)
cluster_response = client.describe_clusters(ClusterIdentifier=cluster_identifier)
cluster = cluster_response["Clusters"][0]
cluster["ClusterIdentifier"].should.equal(cluster_identifier)
cluster["NodeType"].should.equal("ds2.8xlarge")
cluster["PreferredMaintenanceWindow"].should.equal("Tue:03:00-Tue:11:00")
cluster["AutomatedSnapshotRetentionPeriod"].should.equal(7)
cluster["AllowVersionUpgrade"].should.equal(False)
# This one should remain unmodified.
cluster["NumberOfNodes"].should.equal(3)
cluster["EnhancedVpcRouting"].should.equal(True)
@mock_redshift_deprecated
def test_modify_cluster():
conn = boto.connect_redshift()
cluster_identifier = "my_cluster"
conn.create_cluster_security_group("security_group", "This is my security group")
conn.create_cluster_parameter_group(
"my_parameter_group", "redshift-1.0", "This is my parameter group"
)
conn.create_cluster(
cluster_identifier,
node_type="single-node",
master_username="username",
master_user_password="password",
)
cluster_response = conn.describe_clusters(cluster_identifier)
cluster = cluster_response["DescribeClustersResponse"]["DescribeClustersResult"][
"Clusters"
][0]
cluster["EnhancedVpcRouting"].should.equal(False)
conn.modify_cluster(
cluster_identifier,
cluster_type="multi-node",
number_of_nodes=4,
node_type="dw.hs1.xlarge",
cluster_security_groups="security_group",
master_user_password="new_password",
cluster_parameter_group_name="my_parameter_group",
automated_snapshot_retention_period=7,
preferred_maintenance_window="Tue:03:00-Tue:11:00",
allow_version_upgrade=False,
new_cluster_identifier=cluster_identifier,
)
cluster_response = conn.describe_clusters(cluster_identifier)
cluster = cluster_response["DescribeClustersResponse"]["DescribeClustersResult"][
"Clusters"
][0]
cluster["ClusterIdentifier"].should.equal(cluster_identifier)
cluster["NodeType"].should.equal("dw.hs1.xlarge")
cluster["ClusterSecurityGroups"][0]["ClusterSecurityGroupName"].should.equal(
"security_group"
)
cluster["PreferredMaintenanceWindow"].should.equal("Tue:03:00-Tue:11:00")
cluster["ClusterParameterGroups"][0]["ParameterGroupName"].should.equal(
"my_parameter_group"
)
cluster["AutomatedSnapshotRetentionPeriod"].should.equal(7)
cluster["AllowVersionUpgrade"].should.equal(False)
cluster["NumberOfNodes"].should.equal(4)
@mock_redshift
@mock_ec2
def test_create_cluster_subnet_group():
ec2 = boto3.resource("ec2", region_name="us-east-1")
vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16")
subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24")
subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.1.0/24")
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster_subnet_group(
ClusterSubnetGroupName="my_subnet_group",
Description="This is my subnet group",
SubnetIds=[subnet1.id, subnet2.id],
)
subnets_response = client.describe_cluster_subnet_groups(
ClusterSubnetGroupName="my_subnet_group"
)
my_subnet = subnets_response["ClusterSubnetGroups"][0]
my_subnet["ClusterSubnetGroupName"].should.equal("my_subnet_group")
my_subnet["Description"].should.equal("This is my subnet group")
subnet_ids = [subnet["SubnetIdentifier"] for subnet in my_subnet["Subnets"]]
set(subnet_ids).should.equal(set([subnet1.id, subnet2.id]))
@mock_redshift_deprecated
@mock_ec2_deprecated
def test_create_invalid_cluster_subnet_group():
redshift_conn = boto.connect_redshift()
redshift_conn.create_cluster_subnet_group.when.called_with(
"my_subnet", "This is my subnet group", subnet_ids=["subnet-1234"]
).should.throw(InvalidSubnet)
@mock_redshift_deprecated
def test_describe_non_existent_subnet_group():
conn = boto.redshift.connect_to_region("us-east-1")
conn.describe_cluster_subnet_groups.when.called_with(
"not-a-subnet-group"
).should.throw(ClusterSubnetGroupNotFound)
@mock_redshift
@mock_ec2
def test_delete_cluster_subnet_group():
ec2 = boto3.resource("ec2", region_name="us-east-1")
vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16")
subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24")
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster_subnet_group(
ClusterSubnetGroupName="my_subnet_group",
Description="This is my subnet group",
SubnetIds=[subnet.id],
)
subnets_response = client.describe_cluster_subnet_groups()
subnets = subnets_response["ClusterSubnetGroups"]
subnets.should.have.length_of(1)
client.delete_cluster_subnet_group(ClusterSubnetGroupName="my_subnet_group")
subnets_response = client.describe_cluster_subnet_groups()
subnets = subnets_response["ClusterSubnetGroups"]
subnets.should.have.length_of(0)
# Delete invalid id
client.delete_cluster_subnet_group.when.called_with(
ClusterSubnetGroupName="not-a-subnet-group"
).should.throw(ClientError)
@mock_redshift_deprecated
def test_create_cluster_security_group():
conn = boto.connect_redshift()
conn.create_cluster_security_group("my_security_group", "This is my security group")
groups_response = conn.describe_cluster_security_groups("my_security_group")
my_group = groups_response["DescribeClusterSecurityGroupsResponse"][
"DescribeClusterSecurityGroupsResult"
]["ClusterSecurityGroups"][0]
my_group["ClusterSecurityGroupName"].should.equal("my_security_group")
my_group["Description"].should.equal("This is my security group")
list(my_group["IPRanges"]).should.equal([])
@mock_redshift_deprecated
def test_describe_non_existent_security_group():
conn = boto.redshift.connect_to_region("us-east-1")
conn.describe_cluster_security_groups.when.called_with(
"not-a-security-group"
).should.throw(ClusterSecurityGroupNotFound)
@mock_redshift_deprecated
def test_delete_cluster_security_group():
conn = boto.connect_redshift()
conn.create_cluster_security_group("my_security_group", "This is my security group")
groups_response = conn.describe_cluster_security_groups()
groups = groups_response["DescribeClusterSecurityGroupsResponse"][
"DescribeClusterSecurityGroupsResult"
]["ClusterSecurityGroups"]
groups.should.have.length_of(2) # The default group already exists
conn.delete_cluster_security_group("my_security_group")
groups_response = conn.describe_cluster_security_groups()
groups = groups_response["DescribeClusterSecurityGroupsResponse"][
"DescribeClusterSecurityGroupsResult"
]["ClusterSecurityGroups"]
groups.should.have.length_of(1)
# Delete invalid id
conn.delete_cluster_security_group.when.called_with(
"not-a-security-group"
).should.throw(ClusterSecurityGroupNotFound)
@mock_redshift_deprecated
def test_create_cluster_parameter_group():
conn = boto.connect_redshift()
conn.create_cluster_parameter_group(
"my_parameter_group", "redshift-1.0", "This is my parameter group"
)
groups_response = conn.describe_cluster_parameter_groups("my_parameter_group")
my_group = groups_response["DescribeClusterParameterGroupsResponse"][
"DescribeClusterParameterGroupsResult"
]["ParameterGroups"][0]
my_group["ParameterGroupName"].should.equal("my_parameter_group")
my_group["ParameterGroupFamily"].should.equal("redshift-1.0")
my_group["Description"].should.equal("This is my parameter group")
@mock_redshift_deprecated
def test_describe_non_existent_parameter_group():
conn = boto.redshift.connect_to_region("us-east-1")
conn.describe_cluster_parameter_groups.when.called_with(
"not-a-parameter-group"
).should.throw(ClusterParameterGroupNotFound)
@mock_redshift_deprecated
def test_delete_cluster_parameter_group():
conn = boto.connect_redshift()
conn.create_cluster_parameter_group(
"my_parameter_group", "redshift-1.0", "This is my parameter group"
)
groups_response = conn.describe_cluster_parameter_groups()
groups = groups_response["DescribeClusterParameterGroupsResponse"][
"DescribeClusterParameterGroupsResult"
]["ParameterGroups"]
groups.should.have.length_of(2) # The default group already exists
conn.delete_cluster_parameter_group("my_parameter_group")
groups_response = conn.describe_cluster_parameter_groups()
groups = groups_response["DescribeClusterParameterGroupsResponse"][
"DescribeClusterParameterGroupsResult"
]["ParameterGroups"]
groups.should.have.length_of(1)
# Delete invalid id
conn.delete_cluster_parameter_group.when.called_with(
"not-a-parameter-group"
).should.throw(ClusterParameterGroupNotFound)
@mock_redshift
def test_create_cluster_snapshot_of_non_existent_cluster():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "non-existent-cluster-id"
client.create_cluster_snapshot.when.called_with(
SnapshotIdentifier="snapshot-id", ClusterIdentifier=cluster_identifier
).should.throw(ClientError, "Cluster {} not found.".format(cluster_identifier))
@mock_redshift
def test_create_cluster_snapshot():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "my_cluster"
snapshot_identifier = "my_snapshot"
cluster_response = client.create_cluster(
DBName="test-db",
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
EnhancedVpcRouting=True,
)
cluster_response["Cluster"]["NodeType"].should.equal("ds2.xlarge")
snapshot_response = client.create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier,
ClusterIdentifier=cluster_identifier,
Tags=[{"Key": "test-tag-key", "Value": "test-tag-value"}],
)
snapshot = snapshot_response["Snapshot"]
snapshot["SnapshotIdentifier"].should.equal(snapshot_identifier)
snapshot["ClusterIdentifier"].should.equal(cluster_identifier)
snapshot["NumberOfNodes"].should.equal(1)
snapshot["NodeType"].should.equal("ds2.xlarge")
snapshot["MasterUsername"].should.equal("username")
@mock_redshift
def test_describe_cluster_snapshots():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "my_cluster"
snapshot_identifier_1 = "my_snapshot_1"
snapshot_identifier_2 = "my_snapshot_2"
client.create_cluster(
DBName="test-db",
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
)
client.create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier_1, ClusterIdentifier=cluster_identifier
)
client.create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier_2, ClusterIdentifier=cluster_identifier
)
resp_snap_1 = client.describe_cluster_snapshots(
SnapshotIdentifier=snapshot_identifier_1
)
snapshot_1 = resp_snap_1["Snapshots"][0]
snapshot_1["SnapshotIdentifier"].should.equal(snapshot_identifier_1)
snapshot_1["ClusterIdentifier"].should.equal(cluster_identifier)
snapshot_1["NumberOfNodes"].should.equal(1)
snapshot_1["NodeType"].should.equal("ds2.xlarge")
snapshot_1["MasterUsername"].should.equal("username")
resp_snap_2 = client.describe_cluster_snapshots(
SnapshotIdentifier=snapshot_identifier_2
)
snapshot_2 = resp_snap_2["Snapshots"][0]
snapshot_2["SnapshotIdentifier"].should.equal(snapshot_identifier_2)
snapshot_2["ClusterIdentifier"].should.equal(cluster_identifier)
snapshot_2["NumberOfNodes"].should.equal(1)
snapshot_2["NodeType"].should.equal("ds2.xlarge")
snapshot_2["MasterUsername"].should.equal("username")
resp_clust = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier)
resp_clust["Snapshots"][0].should.equal(resp_snap_1["Snapshots"][0])
resp_clust["Snapshots"][1].should.equal(resp_snap_2["Snapshots"][0])
@mock_redshift
def test_describe_cluster_snapshots_not_found_error():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "non-existent-cluster-id"
snapshot_identifier = "non-existent-snapshot-id"
resp = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier)
resp["Snapshots"].should.have.length_of(0)
client.describe_cluster_snapshots.when.called_with(
SnapshotIdentifier=snapshot_identifier
).should.throw(ClientError, "Snapshot {} not found.".format(snapshot_identifier))
@mock_redshift
def test_delete_cluster_snapshot():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "my_cluster"
snapshot_identifier = "my_snapshot"
client.create_cluster(
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
)
client.create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier, ClusterIdentifier=cluster_identifier
)
snapshots = client.describe_cluster_snapshots()["Snapshots"]
list(snapshots).should.have.length_of(1)
client.delete_cluster_snapshot(SnapshotIdentifier=snapshot_identifier)["Snapshot"][
"Status"
].should.equal("deleted")
snapshots = client.describe_cluster_snapshots()["Snapshots"]
list(snapshots).should.have.length_of(0)
# Delete invalid id
client.delete_cluster_snapshot.when.called_with(
SnapshotIdentifier="non-existent"
).should.throw(ClientError, "Snapshot non-existent not found.")
@mock_redshift
def test_cluster_snapshot_already_exists():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "my_cluster"
snapshot_identifier = "my_snapshot"
client.create_cluster(
DBName="test-db",
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
)
client.create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier, ClusterIdentifier=cluster_identifier
)
client.create_cluster_snapshot.when.called_with(
SnapshotIdentifier=snapshot_identifier, ClusterIdentifier=cluster_identifier
).should.throw(ClientError, "{} already exists".format(snapshot_identifier))
@mock_redshift
def test_create_cluster_from_snapshot():
client = boto3.client("redshift", region_name="us-east-1")
original_cluster_identifier = "original-cluster"
original_snapshot_identifier = "original-snapshot"
new_cluster_identifier = "new-cluster"
client.create_cluster(
ClusterIdentifier=original_cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
EnhancedVpcRouting=True,
)
client.create_cluster_snapshot(
SnapshotIdentifier=original_snapshot_identifier,
ClusterIdentifier=original_cluster_identifier,
)
client.restore_from_cluster_snapshot.when.called_with(
ClusterIdentifier=original_cluster_identifier,
SnapshotIdentifier=original_snapshot_identifier,
).should.throw(ClientError, "ClusterAlreadyExists")
response = client.restore_from_cluster_snapshot(
ClusterIdentifier=new_cluster_identifier,
SnapshotIdentifier=original_snapshot_identifier,
Port=1234,
)
response["Cluster"]["ClusterStatus"].should.equal("creating")
response = client.describe_clusters(ClusterIdentifier=new_cluster_identifier)
new_cluster = response["Clusters"][0]
new_cluster["NodeType"].should.equal("ds2.xlarge")
new_cluster["MasterUsername"].should.equal("username")
new_cluster["Endpoint"]["Port"].should.equal(1234)
new_cluster["EnhancedVpcRouting"].should.equal(True)
@mock_redshift
def test_create_cluster_from_snapshot_with_waiter():
client = boto3.client("redshift", region_name="us-east-1")
original_cluster_identifier = "original-cluster"
original_snapshot_identifier = "original-snapshot"
new_cluster_identifier = "new-cluster"
client.create_cluster(
ClusterIdentifier=original_cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
EnhancedVpcRouting=True,
)
client.create_cluster_snapshot(
SnapshotIdentifier=original_snapshot_identifier,
ClusterIdentifier=original_cluster_identifier,
)
response = client.restore_from_cluster_snapshot(
ClusterIdentifier=new_cluster_identifier,
SnapshotIdentifier=original_snapshot_identifier,
Port=1234,
)
response["Cluster"]["ClusterStatus"].should.equal("creating")
client.get_waiter("cluster_restored").wait(
ClusterIdentifier=new_cluster_identifier,
WaiterConfig={"Delay": 1, "MaxAttempts": 2},
)
response = client.describe_clusters(ClusterIdentifier=new_cluster_identifier)
new_cluster = response["Clusters"][0]
new_cluster["NodeType"].should.equal("ds2.xlarge")
new_cluster["MasterUsername"].should.equal("username")
new_cluster["EnhancedVpcRouting"].should.equal(True)
new_cluster["Endpoint"]["Port"].should.equal(1234)
@mock_redshift
def test_create_cluster_from_non_existent_snapshot():
client = boto3.client("redshift", region_name="us-east-1")
client.restore_from_cluster_snapshot.when.called_with(
ClusterIdentifier="cluster-id", SnapshotIdentifier="non-existent-snapshot"
).should.throw(ClientError, "Snapshot non-existent-snapshot not found.")
@mock_redshift
def test_create_cluster_status_update():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "test-cluster"
response = client.create_cluster(
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
)
response["Cluster"]["ClusterStatus"].should.equal("creating")
response = client.describe_clusters(ClusterIdentifier=cluster_identifier)
response["Clusters"][0]["ClusterStatus"].should.equal("available")
@mock_redshift
def test_describe_tags_with_resource_type():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "my_cluster"
cluster_arn = "arn:aws:redshift:us-east-1:{}:" "cluster:{}".format(
ACCOUNT_ID, cluster_identifier
)
snapshot_identifier = "my_snapshot"
snapshot_arn = "arn:aws:redshift:us-east-1:{}:" "snapshot:{}/{}".format(
ACCOUNT_ID, cluster_identifier, snapshot_identifier
)
tag_key = "test-tag-key"
tag_value = "test-tag-value"
client.create_cluster(
DBName="test-db",
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
Tags=[{"Key": tag_key, "Value": tag_value}],
)
tags_response = client.describe_tags(ResourceType="cluster")
tagged_resources = tags_response["TaggedResources"]
list(tagged_resources).should.have.length_of(1)
tagged_resources[0]["ResourceType"].should.equal("cluster")
tagged_resources[0]["ResourceName"].should.equal(cluster_arn)
tag = tagged_resources[0]["Tag"]
tag["Key"].should.equal(tag_key)
tag["Value"].should.equal(tag_value)
client.create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier,
ClusterIdentifier=cluster_identifier,
Tags=[{"Key": tag_key, "Value": tag_value}],
)
tags_response = client.describe_tags(ResourceType="snapshot")
tagged_resources = tags_response["TaggedResources"]
list(tagged_resources).should.have.length_of(1)
tagged_resources[0]["ResourceType"].should.equal("snapshot")
tagged_resources[0]["ResourceName"].should.equal(snapshot_arn)
tag = tagged_resources[0]["Tag"]
tag["Key"].should.equal(tag_key)
tag["Value"].should.equal(tag_value)
@mock_redshift
def test_describe_tags_cannot_specify_resource_type_and_resource_name():
client = boto3.client("redshift", region_name="us-east-1")
resource_name = "arn:aws:redshift:us-east-1:{}:cluster:cluster-id".format(
ACCOUNT_ID
)
resource_type = "cluster"
client.describe_tags.when.called_with(
ResourceName=resource_name, ResourceType=resource_type
).should.throw(ClientError, "using either an ARN or a resource type")
@mock_redshift
def test_describe_tags_with_resource_name():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "cluster-id"
cluster_arn = "arn:aws:redshift:us-east-1:{}:" "cluster:{}".format(
ACCOUNT_ID, cluster_identifier
)
snapshot_identifier = "snapshot-id"
snapshot_arn = "arn:aws:redshift:us-east-1:{}:" "snapshot:{}/{}".format(
ACCOUNT_ID, cluster_identifier, snapshot_identifier
)
tag_key = "test-tag-key"
tag_value = "test-tag-value"
client.create_cluster(
DBName="test-db",
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
Tags=[{"Key": tag_key, "Value": tag_value}],
)
tags_response = client.describe_tags(ResourceName=cluster_arn)
tagged_resources = tags_response["TaggedResources"]
list(tagged_resources).should.have.length_of(1)
tagged_resources[0]["ResourceType"].should.equal("cluster")
tagged_resources[0]["ResourceName"].should.equal(cluster_arn)
tag = tagged_resources[0]["Tag"]
tag["Key"].should.equal(tag_key)
tag["Value"].should.equal(tag_value)
client.create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier,
ClusterIdentifier=cluster_identifier,
Tags=[{"Key": tag_key, "Value": tag_value}],
)
tags_response = client.describe_tags(ResourceName=snapshot_arn)
tagged_resources = tags_response["TaggedResources"]
list(tagged_resources).should.have.length_of(1)
tagged_resources[0]["ResourceType"].should.equal("snapshot")
tagged_resources[0]["ResourceName"].should.equal(snapshot_arn)
tag = tagged_resources[0]["Tag"]
tag["Key"].should.equal(tag_key)
tag["Value"].should.equal(tag_value)
@mock_redshift
def test_create_tags():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "cluster-id"
cluster_arn = "arn:aws:redshift:us-east-1:{}:" "cluster:{}".format(
ACCOUNT_ID, cluster_identifier
)
tag_key = "test-tag-key"
tag_value = "test-tag-value"
num_tags = 5
tags = []
for i in range(0, num_tags):
tag = {"Key": "{}-{}".format(tag_key, i), "Value": "{}-{}".format(tag_value, i)}
tags.append(tag)
client.create_cluster(
DBName="test-db",
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
)
client.create_tags(ResourceName=cluster_arn, Tags=tags)
response = client.describe_clusters(ClusterIdentifier=cluster_identifier)
cluster = response["Clusters"][0]
list(cluster["Tags"]).should.have.length_of(num_tags)
response = client.describe_tags(ResourceName=cluster_arn)
list(response["TaggedResources"]).should.have.length_of(num_tags)
@mock_redshift
def test_delete_tags():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "cluster-id"
cluster_arn = "arn:aws:redshift:us-east-1:{}:" "cluster:{}".format(
ACCOUNT_ID, cluster_identifier
)
tag_key = "test-tag-key"
tag_value = "test-tag-value"
tags = []
for i in range(1, 2):
tag = {"Key": "{}-{}".format(tag_key, i), "Value": "{}-{}".format(tag_value, i)}
tags.append(tag)
client.create_cluster(
DBName="test-db",
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
Tags=tags,
)
client.delete_tags(
ResourceName=cluster_arn,
TagKeys=[tag["Key"] for tag in tags if tag["Key"] != "{}-1".format(tag_key)],
)
response = client.describe_clusters(ClusterIdentifier=cluster_identifier)
cluster = response["Clusters"][0]
list(cluster["Tags"]).should.have.length_of(1)
response = client.describe_tags(ResourceName=cluster_arn)
list(response["TaggedResources"]).should.have.length_of(1)
@mock_ec2
@mock_redshift
def test_describe_tags_all_resource_types():
ec2 = boto3.resource("ec2", region_name="us-east-1")
vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16")
subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24")
client = boto3.client("redshift", region_name="us-east-1")
response = client.describe_tags()
list(response["TaggedResources"]).should.have.length_of(0)
client.create_cluster_subnet_group(
ClusterSubnetGroupName="my_subnet_group",
Description="This is my subnet group",
SubnetIds=[subnet.id],
Tags=[{"Key": "tag_key", "Value": "tag_value"}],
)
client.create_cluster_security_group(
ClusterSecurityGroupName="security_group1",
Description="This is my security group",
Tags=[{"Key": "tag_key", "Value": "tag_value"}],
)
client.create_cluster(
DBName="test",
ClusterIdentifier="my_cluster",
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="user",
MasterUserPassword="password",
Tags=[{"Key": "tag_key", "Value": "tag_value"}],
)
client.create_cluster_snapshot(
SnapshotIdentifier="my_snapshot",
ClusterIdentifier="my_cluster",
Tags=[{"Key": "tag_key", "Value": "tag_value"}],
)
client.create_cluster_parameter_group(
ParameterGroupName="my_parameter_group",
ParameterGroupFamily="redshift-1.0",
Description="This is my parameter group",
Tags=[{"Key": "tag_key", "Value": "tag_value"}],
)
response = client.describe_tags()
expected_types = [
"cluster",
"parametergroup",
"securitygroup",
"snapshot",
"subnetgroup",
]
tagged_resources = response["TaggedResources"]
returned_types = [resource["ResourceType"] for resource in tagged_resources]
list(tagged_resources).should.have.length_of(len(expected_types))
set(returned_types).should.equal(set(expected_types))
@mock_redshift
def test_tagged_resource_not_found_error():
client = boto3.client("redshift", region_name="us-east-1")
cluster_arn = "arn:aws:redshift:us-east-1::cluster:fake"
client.describe_tags.when.called_with(ResourceName=cluster_arn).should.throw(
ClientError, "cluster (fake) not found."
)
snapshot_arn = "arn:aws:redshift:us-east-1::snapshot:cluster-id/snap-id"
client.delete_tags.when.called_with(
ResourceName=snapshot_arn, TagKeys=["test"]
).should.throw(ClientError, "snapshot (snap-id) not found.")
client.describe_tags.when.called_with(ResourceType="cluster").should.throw(
ClientError, "resource of type 'cluster' not found."
)
client.describe_tags.when.called_with(ResourceName="bad:arn").should.throw(
ClientError, "Tagging is not supported for this type of resource"
)
@mock_redshift
def test_enable_snapshot_copy():
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster(
ClusterIdentifier="test",
ClusterType="single-node",
DBName="test",
Encrypted=True,
MasterUsername="user",
MasterUserPassword="password",
NodeType="ds2.xlarge",
)
with pytest.raises(ClientError) as ex:
client.enable_snapshot_copy(
ClusterIdentifier="test", DestinationRegion="us-west-2", RetentionPeriod=3,
)
ex.value.response["Error"]["Code"].should.equal("InvalidParameterValue")
ex.value.response["Error"]["Message"].should.contain(
"SnapshotCopyGrantName is required for Snapshot Copy on KMS encrypted clusters."
)
with pytest.raises(ClientError) as ex:
client.enable_snapshot_copy(
ClusterIdentifier="test",
DestinationRegion="us-east-1",
RetentionPeriod=3,
SnapshotCopyGrantName="invalid-us-east-1-to-us-east-1",
)
ex.value.response["Error"]["Code"].should.equal("UnknownSnapshotCopyRegionFault")
ex.value.response["Error"]["Message"].should.contain("Invalid region us-east-1")
client.enable_snapshot_copy(
ClusterIdentifier="test",
DestinationRegion="us-west-2",
RetentionPeriod=3,
SnapshotCopyGrantName="copy-us-east-1-to-us-west-2",
)
response = client.describe_clusters(ClusterIdentifier="test")
cluster_snapshot_copy_status = response["Clusters"][0]["ClusterSnapshotCopyStatus"]
cluster_snapshot_copy_status["RetentionPeriod"].should.equal(3)
cluster_snapshot_copy_status["DestinationRegion"].should.equal("us-west-2")
cluster_snapshot_copy_status["SnapshotCopyGrantName"].should.equal(
"copy-us-east-1-to-us-west-2"
)
@mock_redshift
def test_enable_snapshot_copy_unencrypted():
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster(
ClusterIdentifier="test",
ClusterType="single-node",
DBName="test",
MasterUsername="user",
MasterUserPassword="password",
NodeType="ds2.xlarge",
)
client.enable_snapshot_copy(ClusterIdentifier="test", DestinationRegion="us-west-2")
response = client.describe_clusters(ClusterIdentifier="test")
cluster_snapshot_copy_status = response["Clusters"][0]["ClusterSnapshotCopyStatus"]
cluster_snapshot_copy_status["RetentionPeriod"].should.equal(7)
cluster_snapshot_copy_status["DestinationRegion"].should.equal("us-west-2")
@mock_redshift
def test_disable_snapshot_copy():
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster(
DBName="test",
ClusterIdentifier="test",
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="user",
MasterUserPassword="password",
)
client.enable_snapshot_copy(
ClusterIdentifier="test",
DestinationRegion="us-west-2",
RetentionPeriod=3,
SnapshotCopyGrantName="copy-us-east-1-to-us-west-2",
)
client.disable_snapshot_copy(ClusterIdentifier="test")
response = client.describe_clusters(ClusterIdentifier="test")
response["Clusters"][0].shouldnt.contain("ClusterSnapshotCopyStatus")
@mock_redshift
def test_modify_snapshot_copy_retention_period():
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster(
DBName="test",
ClusterIdentifier="test",
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="user",
MasterUserPassword="password",
)
client.enable_snapshot_copy(
ClusterIdentifier="test",
DestinationRegion="us-west-2",
RetentionPeriod=3,
SnapshotCopyGrantName="copy-us-east-1-to-us-west-2",
)
client.modify_snapshot_copy_retention_period(
ClusterIdentifier="test", RetentionPeriod=5
)
response = client.describe_clusters(ClusterIdentifier="test")
cluster_snapshot_copy_status = response["Clusters"][0]["ClusterSnapshotCopyStatus"]
cluster_snapshot_copy_status["RetentionPeriod"].should.equal(5)
@mock_redshift
def test_create_duplicate_cluster_fails():
kwargs = {
"ClusterIdentifier": "test",
"ClusterType": "single-node",
"DBName": "test",
"MasterUsername": "user",
"MasterUserPassword": "password",
"NodeType": "ds2.xlarge",
}
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster(**kwargs)
client.create_cluster.when.called_with(**kwargs).should.throw(
ClientError, "ClusterAlreadyExists"
)
@mock_redshift
def test_delete_cluster_with_final_snapshot():
client = boto3.client("redshift", region_name="us-east-1")
with pytest.raises(ClientError) as ex:
client.delete_cluster(ClusterIdentifier="non-existent")
ex.value.response["Error"]["Code"].should.equal("ClusterNotFound")
ex.value.response["Error"]["Message"].should.match(r"Cluster .+ not found.")
cluster_identifier = "my_cluster"
client.create_cluster(
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
DBName="test",
MasterUsername="user",
MasterUserPassword="password",
NodeType="ds2.xlarge",
)
with pytest.raises(ClientError) as ex:
client.delete_cluster(
ClusterIdentifier=cluster_identifier, SkipFinalClusterSnapshot=False
)
ex.value.response["Error"]["Code"].should.equal("InvalidParameterCombination")
ex.value.response["Error"]["Message"].should.contain(
"FinalClusterSnapshotIdentifier is required unless SkipFinalClusterSnapshot is specified."
)
snapshot_identifier = "my_snapshot"
client.delete_cluster(
ClusterIdentifier=cluster_identifier,
SkipFinalClusterSnapshot=False,
FinalClusterSnapshotIdentifier=snapshot_identifier,
)
resp = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier)
resp["Snapshots"].should.have.length_of(1)
resp["Snapshots"][0]["SnapshotIdentifier"].should.equal(snapshot_identifier)
resp["Snapshots"][0]["SnapshotType"].should.equal("manual")
with pytest.raises(ClientError) as ex:
client.describe_clusters(ClusterIdentifier=cluster_identifier)
ex.value.response["Error"]["Code"].should.equal("ClusterNotFound")
ex.value.response["Error"]["Message"].should.match(r"Cluster .+ not found.")
@mock_redshift
def test_delete_cluster_without_final_snapshot():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "my_cluster"
client.create_cluster(
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
DBName="test",
MasterUsername="user",
MasterUserPassword="password",
NodeType="ds2.xlarge",
)
client.delete_cluster(
ClusterIdentifier=cluster_identifier, SkipFinalClusterSnapshot=True
)
resp = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier)
resp["Snapshots"].should.have.length_of(0)
with pytest.raises(ClientError) as ex:
client.describe_clusters(ClusterIdentifier=cluster_identifier)
ex.value.response["Error"]["Code"].should.equal("ClusterNotFound")
ex.value.response["Error"]["Message"].should.match(r"Cluster .+ not found.")
@mock_redshift
def test_resize_cluster():
client = boto3.client("redshift", region_name="us-east-1")
resp = client.create_cluster(
DBName="test",
ClusterIdentifier="test",
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="user",
MasterUserPassword="password",
)
resp["Cluster"]["NumberOfNodes"].should.equal(1)
client.modify_cluster(
ClusterIdentifier="test", ClusterType="multi-node", NumberOfNodes=2,
)
resp = client.describe_clusters(ClusterIdentifier="test")
resp["Clusters"][0]["NumberOfNodes"].should.equal(2)
client.modify_cluster(
ClusterIdentifier="test", ClusterType="single-node",
)
resp = client.describe_clusters(ClusterIdentifier="test")
resp["Clusters"][0]["NumberOfNodes"].should.equal(1)
with pytest.raises(ClientError) as ex:
client.modify_cluster(
ClusterIdentifier="test", ClusterType="multi-node", NumberOfNodes=1,
)
ex.value.response["Error"]["Code"].should.equal("InvalidParameterCombination")
ex.value.response["Error"]["Message"].should.contain(
"Number of nodes for cluster type multi-node must be greater than or equal to 2"
)
with pytest.raises(ClientError) as ex:
client.modify_cluster(
ClusterIdentifier="test",
ClusterType="invalid-cluster-type",
NumberOfNodes=1,
)
ex.value.response["Error"]["Code"].should.equal("InvalidParameterValue")
ex.value.response["Error"]["Message"].should.contain("Invalid cluster type")
@mock_redshift
def test_get_cluster_credentials_non_existent_cluster():
client = boto3.client("redshift", region_name="us-east-1")
with pytest.raises(ClientError) as ex:
client.get_cluster_credentials(ClusterIdentifier="non-existent")
ex.value.response["Error"]["Code"].should.equal("ClusterNotFound")
ex.value.response["Error"]["Message"].should.match(r"Cluster .+ not found.")
@mock_redshift
def test_get_cluster_credentials_non_existent_cluster():
client = boto3.client("redshift", region_name="us-east-1")
with pytest.raises(ClientError) as ex:
client.get_cluster_credentials(
ClusterIdentifier="non-existent", DbUser="some_user"
)
ex.value.response["Error"]["Code"].should.equal("ClusterNotFound")
ex.value.response["Error"]["Message"].should.match(r"Cluster .+ not found.")
@mock_redshift
def test_get_cluster_credentials_invalid_duration():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "my_cluster"
client.create_cluster(
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
DBName="test",
MasterUsername="user",
MasterUserPassword="password",
NodeType="ds2.xlarge",
)
db_user = "some_user"
with pytest.raises(ClientError) as ex:
client.get_cluster_credentials(
ClusterIdentifier=cluster_identifier, DbUser=db_user, DurationSeconds=899
)
ex.value.response["Error"]["Code"].should.equal("InvalidParameterValue")
ex.value.response["Error"]["Message"].should.contain(
"Token duration must be between 900 and 3600 seconds"
)
with pytest.raises(ClientError) as ex:
client.get_cluster_credentials(
ClusterIdentifier=cluster_identifier, DbUser=db_user, DurationSeconds=3601
)
ex.value.response["Error"]["Code"].should.equal("InvalidParameterValue")
ex.value.response["Error"]["Message"].should.contain(
"Token duration must be between 900 and 3600 seconds"
)
@mock_redshift
def test_get_cluster_credentials():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "my_cluster"
client.create_cluster(
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
DBName="test",
MasterUsername="user",
MasterUserPassword="password",
NodeType="ds2.xlarge",
)
expected_expiration = time.mktime(
(datetime.datetime.now() + datetime.timedelta(0, 900)).timetuple()
)
db_user = "some_user"
response = client.get_cluster_credentials(
ClusterIdentifier=cluster_identifier, DbUser=db_user,
)
response["DbUser"].should.equal("IAM:%s" % db_user)
assert time.mktime((response["Expiration"]).timetuple()) == pytest.approx(
expected_expiration
)
response["DbPassword"].should.have.length_of(32)
response = client.get_cluster_credentials(
ClusterIdentifier=cluster_identifier, DbUser=db_user, AutoCreate=True
)
response["DbUser"].should.equal("IAMA:%s" % db_user)
response = client.get_cluster_credentials(
ClusterIdentifier=cluster_identifier, DbUser="some_other_user", AutoCreate=False
)
response["DbUser"].should.equal("IAM:%s" % "some_other_user")
expected_expiration = time.mktime(
(datetime.datetime.now() + datetime.timedelta(0, 3000)).timetuple()
)
response = client.get_cluster_credentials(
ClusterIdentifier=cluster_identifier, DbUser=db_user, DurationSeconds=3000,
)
assert time.mktime(response["Expiration"].timetuple()) == pytest.approx(
expected_expiration
)
| 36.485266
| 98
| 0.713458
|
from __future__ import unicode_literals
import time
import datetime
import boto
import boto3
from boto.redshift.exceptions import (
ClusterNotFound,
ClusterParameterGroupNotFound,
ClusterSecurityGroupNotFound,
ClusterSubnetGroupNotFound,
InvalidSubnet,
)
from botocore.exceptions import ClientError
import pytest
import sure
from moto import mock_ec2
from moto import mock_ec2_deprecated
from moto import mock_redshift
from moto import mock_redshift_deprecated
from moto.core import ACCOUNT_ID
@mock_redshift
def test_create_cluster_boto3():
client = boto3.client("redshift", region_name="us-east-1")
response = client.create_cluster(
DBName="test",
ClusterIdentifier="test",
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="user",
MasterUserPassword="password",
)
response["Cluster"]["NodeType"].should.equal("ds2.xlarge")
create_time = response["Cluster"]["ClusterCreateTime"]
create_time.should.be.lower_than(datetime.datetime.now(create_time.tzinfo))
create_time.should.be.greater_than(
datetime.datetime.now(create_time.tzinfo) - datetime.timedelta(minutes=1)
)
response["Cluster"]["EnhancedVpcRouting"].should.equal(False)
@mock_redshift
def test_create_cluster_with_enhanced_vpc_routing_enabled():
client = boto3.client("redshift", region_name="us-east-1")
response = client.create_cluster(
DBName="test",
ClusterIdentifier="test",
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="user",
MasterUserPassword="password",
EnhancedVpcRouting=True,
)
response["Cluster"]["NodeType"].should.equal("ds2.xlarge")
create_time = response["Cluster"]["ClusterCreateTime"]
create_time.should.be.lower_than(datetime.datetime.now(create_time.tzinfo))
create_time.should.be.greater_than(
datetime.datetime.now(create_time.tzinfo) - datetime.timedelta(minutes=1)
)
response["Cluster"]["EnhancedVpcRouting"].should.equal(True)
@mock_redshift
def test_create_snapshot_copy_grant():
client = boto3.client("redshift", region_name="us-east-1")
grants = client.create_snapshot_copy_grant(
SnapshotCopyGrantName="test-us-east-1", KmsKeyId="fake"
)
grants["SnapshotCopyGrant"]["SnapshotCopyGrantName"].should.equal("test-us-east-1")
grants["SnapshotCopyGrant"]["KmsKeyId"].should.equal("fake")
client.delete_snapshot_copy_grant(SnapshotCopyGrantName="test-us-east-1")
client.describe_snapshot_copy_grants.when.called_with(
SnapshotCopyGrantName="test-us-east-1"
).should.throw(ClientError)
@mock_redshift
def test_create_many_snapshot_copy_grants():
client = boto3.client("redshift", region_name="us-east-1")
for i in range(10):
client.create_snapshot_copy_grant(
SnapshotCopyGrantName="test-us-east-1-{0}".format(i), KmsKeyId="fake"
)
response = client.describe_snapshot_copy_grants()
len(response["SnapshotCopyGrants"]).should.equal(10)
@mock_redshift
def test_no_snapshot_copy_grants():
client = boto3.client("redshift", region_name="us-east-1")
response = client.describe_snapshot_copy_grants()
len(response["SnapshotCopyGrants"]).should.equal(0)
@mock_redshift_deprecated
def test_create_cluster():
conn = boto.redshift.connect_to_region("us-east-1")
cluster_identifier = "my_cluster"
cluster_response = conn.create_cluster(
cluster_identifier,
node_type="dw.hs1.xlarge",
master_username="username",
master_user_password="password",
db_name="my_db",
cluster_type="multi-node",
availability_zone="us-east-1d",
preferred_maintenance_window="Mon:03:00-Mon:11:00",
automated_snapshot_retention_period=10,
port=1234,
cluster_version="1.0",
allow_version_upgrade=True,
number_of_nodes=3,
)
cluster_response["CreateClusterResponse"]["CreateClusterResult"]["Cluster"][
"ClusterStatus"
].should.equal("creating")
cluster_response = conn.describe_clusters(cluster_identifier)
cluster = cluster_response["DescribeClustersResponse"]["DescribeClustersResult"][
"Clusters"
][0]
cluster["ClusterIdentifier"].should.equal(cluster_identifier)
cluster["NodeType"].should.equal("dw.hs1.xlarge")
cluster["MasterUsername"].should.equal("username")
cluster["DBName"].should.equal("my_db")
cluster["ClusterSecurityGroups"][0]["ClusterSecurityGroupName"].should.equal(
"Default"
)
cluster["VpcSecurityGroups"].should.equal([])
cluster["ClusterSubnetGroupName"].should.equal(None)
cluster["AvailabilityZone"].should.equal("us-east-1d")
cluster["PreferredMaintenanceWindow"].should.equal("Mon:03:00-Mon:11:00")
cluster["ClusterParameterGroups"][0]["ParameterGroupName"].should.equal(
"default.redshift-1.0"
)
cluster["AutomatedSnapshotRetentionPeriod"].should.equal(10)
cluster["Port"].should.equal(1234)
cluster["ClusterVersion"].should.equal("1.0")
cluster["AllowVersionUpgrade"].should.equal(True)
cluster["NumberOfNodes"].should.equal(3)
@mock_redshift_deprecated
def test_create_single_node_cluster():
conn = boto.redshift.connect_to_region("us-east-1")
cluster_identifier = "my_cluster"
conn.create_cluster(
cluster_identifier,
node_type="dw.hs1.xlarge",
master_username="username",
master_user_password="password",
db_name="my_db",
cluster_type="single-node",
)
cluster_response = conn.describe_clusters(cluster_identifier)
cluster = cluster_response["DescribeClustersResponse"]["DescribeClustersResult"][
"Clusters"
][0]
cluster["ClusterIdentifier"].should.equal(cluster_identifier)
cluster["NodeType"].should.equal("dw.hs1.xlarge")
cluster["MasterUsername"].should.equal("username")
cluster["DBName"].should.equal("my_db")
cluster["NumberOfNodes"].should.equal(1)
@mock_redshift_deprecated
def test_default_cluster_attributes():
conn = boto.redshift.connect_to_region("us-east-1")
cluster_identifier = "my_cluster"
conn.create_cluster(
cluster_identifier,
node_type="dw.hs1.xlarge",
master_username="username",
master_user_password="password",
)
cluster_response = conn.describe_clusters(cluster_identifier)
cluster = cluster_response["DescribeClustersResponse"]["DescribeClustersResult"][
"Clusters"
][0]
cluster["DBName"].should.equal("dev")
cluster["ClusterSubnetGroupName"].should.equal(None)
assert "us-east-" in cluster["AvailabilityZone"]
cluster["PreferredMaintenanceWindow"].should.equal("Mon:03:00-Mon:03:30")
cluster["ClusterParameterGroups"][0]["ParameterGroupName"].should.equal(
"default.redshift-1.0"
)
cluster["AutomatedSnapshotRetentionPeriod"].should.equal(1)
cluster["Port"].should.equal(5439)
cluster["ClusterVersion"].should.equal("1.0")
cluster["AllowVersionUpgrade"].should.equal(True)
cluster["NumberOfNodes"].should.equal(1)
@mock_redshift
@mock_ec2
def test_create_cluster_in_subnet_group():
ec2 = boto3.resource("ec2", region_name="us-east-1")
vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16")
subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24")
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster_subnet_group(
ClusterSubnetGroupName="my_subnet_group",
Description="This is my subnet group",
SubnetIds=[subnet.id],
)
client.create_cluster(
ClusterIdentifier="my_cluster",
NodeType="dw.hs1.xlarge",
MasterUsername="username",
MasterUserPassword="password",
ClusterSubnetGroupName="my_subnet_group",
)
cluster_response = client.describe_clusters(ClusterIdentifier="my_cluster")
cluster = cluster_response["Clusters"][0]
cluster["ClusterSubnetGroupName"].should.equal("my_subnet_group")
@mock_redshift
@mock_ec2
def test_create_cluster_in_subnet_group_boto3():
ec2 = boto3.resource("ec2", region_name="us-east-1")
vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16")
subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24")
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster_subnet_group(
ClusterSubnetGroupName="my_subnet_group",
Description="This is my subnet group",
SubnetIds=[subnet.id],
)
client.create_cluster(
ClusterIdentifier="my_cluster",
NodeType="dw.hs1.xlarge",
MasterUsername="username",
MasterUserPassword="password",
ClusterSubnetGroupName="my_subnet_group",
)
cluster_response = client.describe_clusters(ClusterIdentifier="my_cluster")
cluster = cluster_response["Clusters"][0]
cluster["ClusterSubnetGroupName"].should.equal("my_subnet_group")
@mock_redshift_deprecated
def test_create_cluster_with_security_group():
conn = boto.redshift.connect_to_region("us-east-1")
conn.create_cluster_security_group("security_group1", "This is my security group")
conn.create_cluster_security_group("security_group2", "This is my security group")
cluster_identifier = "my_cluster"
conn.create_cluster(
cluster_identifier,
node_type="dw.hs1.xlarge",
master_username="username",
master_user_password="password",
cluster_security_groups=["security_group1", "security_group2"],
)
cluster_response = conn.describe_clusters(cluster_identifier)
cluster = cluster_response["DescribeClustersResponse"]["DescribeClustersResult"][
"Clusters"
][0]
group_names = [
group["ClusterSecurityGroupName"] for group in cluster["ClusterSecurityGroups"]
]
set(group_names).should.equal(set(["security_group1", "security_group2"]))
@mock_redshift
def test_create_cluster_with_security_group_boto3():
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster_security_group(
ClusterSecurityGroupName="security_group1",
Description="This is my security group",
)
client.create_cluster_security_group(
ClusterSecurityGroupName="security_group2",
Description="This is my security group",
)
cluster_identifier = "my_cluster"
client.create_cluster(
ClusterIdentifier=cluster_identifier,
NodeType="dw.hs1.xlarge",
MasterUsername="username",
MasterUserPassword="password",
ClusterSecurityGroups=["security_group1", "security_group2"],
)
response = client.describe_clusters(ClusterIdentifier=cluster_identifier)
cluster = response["Clusters"][0]
group_names = [
group["ClusterSecurityGroupName"] for group in cluster["ClusterSecurityGroups"]
]
set(group_names).should.equal({"security_group1", "security_group2"})
@mock_redshift_deprecated
@mock_ec2_deprecated
def test_create_cluster_with_vpc_security_groups():
vpc_conn = boto.connect_vpc()
ec2_conn = boto.connect_ec2()
redshift_conn = boto.connect_redshift()
vpc = vpc_conn.create_vpc("10.0.0.0/16")
security_group = ec2_conn.create_security_group(
"vpc_security_group", "a group", vpc_id=vpc.id
)
redshift_conn.create_cluster(
"my_cluster",
node_type="dw.hs1.xlarge",
master_username="username",
master_user_password="password",
vpc_security_group_ids=[security_group.id],
)
cluster_response = redshift_conn.describe_clusters("my_cluster")
cluster = cluster_response["DescribeClustersResponse"]["DescribeClustersResult"][
"Clusters"
][0]
group_ids = [group["VpcSecurityGroupId"] for group in cluster["VpcSecurityGroups"]]
list(group_ids).should.equal([security_group.id])
@mock_redshift
@mock_ec2
def test_create_cluster_with_vpc_security_groups_boto3():
ec2 = boto3.resource("ec2", region_name="us-east-1")
vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16")
client = boto3.client("redshift", region_name="us-east-1")
cluster_id = "my_cluster"
security_group = ec2.create_security_group(
Description="vpc_security_group", GroupName="a group", VpcId=vpc.id
)
client.create_cluster(
ClusterIdentifier=cluster_id,
NodeType="dw.hs1.xlarge",
MasterUsername="username",
MasterUserPassword="password",
VpcSecurityGroupIds=[security_group.id],
)
response = client.describe_clusters(ClusterIdentifier=cluster_id)
cluster = response["Clusters"][0]
group_ids = [group["VpcSecurityGroupId"] for group in cluster["VpcSecurityGroups"]]
list(group_ids).should.equal([security_group.id])
@mock_redshift
def test_create_cluster_with_iam_roles():
iam_roles_arn = ["arn:aws:iam:::role/my-iam-role"]
client = boto3.client("redshift", region_name="us-east-1")
cluster_id = "my_cluster"
client.create_cluster(
ClusterIdentifier=cluster_id,
NodeType="dw.hs1.xlarge",
MasterUsername="username",
MasterUserPassword="password",
IamRoles=iam_roles_arn,
)
response = client.describe_clusters(ClusterIdentifier=cluster_id)
cluster = response["Clusters"][0]
iam_roles = [role["IamRoleArn"] for role in cluster["IamRoles"]]
iam_roles_arn.should.equal(iam_roles)
@mock_redshift_deprecated
def test_create_cluster_with_parameter_group():
conn = boto.connect_redshift()
conn.create_cluster_parameter_group(
"my_parameter_group", "redshift-1.0", "This is my parameter group"
)
conn.create_cluster(
"my_cluster",
node_type="dw.hs1.xlarge",
master_username="username",
master_user_password="password",
cluster_parameter_group_name="my_parameter_group",
)
cluster_response = conn.describe_clusters("my_cluster")
cluster = cluster_response["DescribeClustersResponse"]["DescribeClustersResult"][
"Clusters"
][0]
cluster["ClusterParameterGroups"][0]["ParameterGroupName"].should.equal(
"my_parameter_group"
)
@mock_redshift_deprecated
def test_describe_non_existent_cluster():
conn = boto.redshift.connect_to_region("us-east-1")
conn.describe_clusters.when.called_with("not-a-cluster").should.throw(
ClusterNotFound
)
@mock_redshift_deprecated
def test_delete_cluster():
conn = boto.connect_redshift()
cluster_identifier = "my_cluster"
snapshot_identifier = "my_snapshot"
conn.create_cluster(
cluster_identifier,
node_type="single-node",
master_username="username",
master_user_password="password",
)
conn.delete_cluster.when.called_with(cluster_identifier, False).should.throw(
boto.exception.JSONResponseError
)
clusters = conn.describe_clusters()["DescribeClustersResponse"][
"DescribeClustersResult"
]["Clusters"]
list(clusters).should.have.length_of(1)
conn.delete_cluster(
cluster_identifier=cluster_identifier,
skip_final_cluster_snapshot=False,
final_cluster_snapshot_identifier=snapshot_identifier,
)
clusters = conn.describe_clusters()["DescribeClustersResponse"][
"DescribeClustersResult"
]["Clusters"]
list(clusters).should.have.length_of(0)
snapshots = conn.describe_cluster_snapshots()["DescribeClusterSnapshotsResponse"][
"DescribeClusterSnapshotsResult"
]["Snapshots"]
list(snapshots).should.have.length_of(1)
assert snapshot_identifier in snapshots[0]["SnapshotIdentifier"]
conn.delete_cluster.when.called_with("not-a-cluster").should.throw(ClusterNotFound)
@mock_redshift
def test_modify_cluster_vpc_routing():
iam_roles_arn = ["arn:aws:iam:::role/my-iam-role"]
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "my_cluster"
client.create_cluster(
ClusterIdentifier=cluster_identifier,
NodeType="single-node",
MasterUsername="username",
MasterUserPassword="password",
IamRoles=iam_roles_arn,
)
cluster_response = client.describe_clusters(ClusterIdentifier=cluster_identifier)
cluster = cluster_response["Clusters"][0]
cluster["EnhancedVpcRouting"].should.equal(False)
client.create_cluster_security_group(
ClusterSecurityGroupName="security_group", Description="security_group"
)
client.create_cluster_parameter_group(
ParameterGroupName="my_parameter_group",
ParameterGroupFamily="redshift-1.0",
Description="my_parameter_group",
)
client.modify_cluster(
ClusterIdentifier=cluster_identifier,
ClusterType="multi-node",
NodeType="ds2.8xlarge",
NumberOfNodes=3,
ClusterSecurityGroups=["security_group"],
MasterUserPassword="new_password",
ClusterParameterGroupName="my_parameter_group",
AutomatedSnapshotRetentionPeriod=7,
PreferredMaintenanceWindow="Tue:03:00-Tue:11:00",
AllowVersionUpgrade=False,
NewClusterIdentifier=cluster_identifier,
EnhancedVpcRouting=True,
)
cluster_response = client.describe_clusters(ClusterIdentifier=cluster_identifier)
cluster = cluster_response["Clusters"][0]
cluster["ClusterIdentifier"].should.equal(cluster_identifier)
cluster["NodeType"].should.equal("ds2.8xlarge")
cluster["PreferredMaintenanceWindow"].should.equal("Tue:03:00-Tue:11:00")
cluster["AutomatedSnapshotRetentionPeriod"].should.equal(7)
cluster["AllowVersionUpgrade"].should.equal(False)
cluster["NumberOfNodes"].should.equal(3)
cluster["EnhancedVpcRouting"].should.equal(True)
@mock_redshift_deprecated
def test_modify_cluster():
conn = boto.connect_redshift()
cluster_identifier = "my_cluster"
conn.create_cluster_security_group("security_group", "This is my security group")
conn.create_cluster_parameter_group(
"my_parameter_group", "redshift-1.0", "This is my parameter group"
)
conn.create_cluster(
cluster_identifier,
node_type="single-node",
master_username="username",
master_user_password="password",
)
cluster_response = conn.describe_clusters(cluster_identifier)
cluster = cluster_response["DescribeClustersResponse"]["DescribeClustersResult"][
"Clusters"
][0]
cluster["EnhancedVpcRouting"].should.equal(False)
conn.modify_cluster(
cluster_identifier,
cluster_type="multi-node",
number_of_nodes=4,
node_type="dw.hs1.xlarge",
cluster_security_groups="security_group",
master_user_password="new_password",
cluster_parameter_group_name="my_parameter_group",
automated_snapshot_retention_period=7,
preferred_maintenance_window="Tue:03:00-Tue:11:00",
allow_version_upgrade=False,
new_cluster_identifier=cluster_identifier,
)
cluster_response = conn.describe_clusters(cluster_identifier)
cluster = cluster_response["DescribeClustersResponse"]["DescribeClustersResult"][
"Clusters"
][0]
cluster["ClusterIdentifier"].should.equal(cluster_identifier)
cluster["NodeType"].should.equal("dw.hs1.xlarge")
cluster["ClusterSecurityGroups"][0]["ClusterSecurityGroupName"].should.equal(
"security_group"
)
cluster["PreferredMaintenanceWindow"].should.equal("Tue:03:00-Tue:11:00")
cluster["ClusterParameterGroups"][0]["ParameterGroupName"].should.equal(
"my_parameter_group"
)
cluster["AutomatedSnapshotRetentionPeriod"].should.equal(7)
cluster["AllowVersionUpgrade"].should.equal(False)
cluster["NumberOfNodes"].should.equal(4)
@mock_redshift
@mock_ec2
def test_create_cluster_subnet_group():
ec2 = boto3.resource("ec2", region_name="us-east-1")
vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16")
subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24")
subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.1.0/24")
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster_subnet_group(
ClusterSubnetGroupName="my_subnet_group",
Description="This is my subnet group",
SubnetIds=[subnet1.id, subnet2.id],
)
subnets_response = client.describe_cluster_subnet_groups(
ClusterSubnetGroupName="my_subnet_group"
)
my_subnet = subnets_response["ClusterSubnetGroups"][0]
my_subnet["ClusterSubnetGroupName"].should.equal("my_subnet_group")
my_subnet["Description"].should.equal("This is my subnet group")
subnet_ids = [subnet["SubnetIdentifier"] for subnet in my_subnet["Subnets"]]
set(subnet_ids).should.equal(set([subnet1.id, subnet2.id]))
@mock_redshift_deprecated
@mock_ec2_deprecated
def test_create_invalid_cluster_subnet_group():
redshift_conn = boto.connect_redshift()
redshift_conn.create_cluster_subnet_group.when.called_with(
"my_subnet", "This is my subnet group", subnet_ids=["subnet-1234"]
).should.throw(InvalidSubnet)
@mock_redshift_deprecated
def test_describe_non_existent_subnet_group():
conn = boto.redshift.connect_to_region("us-east-1")
conn.describe_cluster_subnet_groups.when.called_with(
"not-a-subnet-group"
).should.throw(ClusterSubnetGroupNotFound)
@mock_redshift
@mock_ec2
def test_delete_cluster_subnet_group():
ec2 = boto3.resource("ec2", region_name="us-east-1")
vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16")
subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24")
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster_subnet_group(
ClusterSubnetGroupName="my_subnet_group",
Description="This is my subnet group",
SubnetIds=[subnet.id],
)
subnets_response = client.describe_cluster_subnet_groups()
subnets = subnets_response["ClusterSubnetGroups"]
subnets.should.have.length_of(1)
client.delete_cluster_subnet_group(ClusterSubnetGroupName="my_subnet_group")
subnets_response = client.describe_cluster_subnet_groups()
subnets = subnets_response["ClusterSubnetGroups"]
subnets.should.have.length_of(0)
client.delete_cluster_subnet_group.when.called_with(
ClusterSubnetGroupName="not-a-subnet-group"
).should.throw(ClientError)
@mock_redshift_deprecated
def test_create_cluster_security_group():
conn = boto.connect_redshift()
conn.create_cluster_security_group("my_security_group", "This is my security group")
groups_response = conn.describe_cluster_security_groups("my_security_group")
my_group = groups_response["DescribeClusterSecurityGroupsResponse"][
"DescribeClusterSecurityGroupsResult"
]["ClusterSecurityGroups"][0]
my_group["ClusterSecurityGroupName"].should.equal("my_security_group")
my_group["Description"].should.equal("This is my security group")
list(my_group["IPRanges"]).should.equal([])
@mock_redshift_deprecated
def test_describe_non_existent_security_group():
conn = boto.redshift.connect_to_region("us-east-1")
conn.describe_cluster_security_groups.when.called_with(
"not-a-security-group"
).should.throw(ClusterSecurityGroupNotFound)
@mock_redshift_deprecated
def test_delete_cluster_security_group():
conn = boto.connect_redshift()
conn.create_cluster_security_group("my_security_group", "This is my security group")
groups_response = conn.describe_cluster_security_groups()
groups = groups_response["DescribeClusterSecurityGroupsResponse"][
"DescribeClusterSecurityGroupsResult"
]["ClusterSecurityGroups"]
groups.should.have.length_of(2)
conn.delete_cluster_security_group("my_security_group")
groups_response = conn.describe_cluster_security_groups()
groups = groups_response["DescribeClusterSecurityGroupsResponse"][
"DescribeClusterSecurityGroupsResult"
]["ClusterSecurityGroups"]
groups.should.have.length_of(1)
conn.delete_cluster_security_group.when.called_with(
"not-a-security-group"
).should.throw(ClusterSecurityGroupNotFound)
@mock_redshift_deprecated
def test_create_cluster_parameter_group():
conn = boto.connect_redshift()
conn.create_cluster_parameter_group(
"my_parameter_group", "redshift-1.0", "This is my parameter group"
)
groups_response = conn.describe_cluster_parameter_groups("my_parameter_group")
my_group = groups_response["DescribeClusterParameterGroupsResponse"][
"DescribeClusterParameterGroupsResult"
]["ParameterGroups"][0]
my_group["ParameterGroupName"].should.equal("my_parameter_group")
my_group["ParameterGroupFamily"].should.equal("redshift-1.0")
my_group["Description"].should.equal("This is my parameter group")
@mock_redshift_deprecated
def test_describe_non_existent_parameter_group():
conn = boto.redshift.connect_to_region("us-east-1")
conn.describe_cluster_parameter_groups.when.called_with(
"not-a-parameter-group"
).should.throw(ClusterParameterGroupNotFound)
@mock_redshift_deprecated
def test_delete_cluster_parameter_group():
conn = boto.connect_redshift()
conn.create_cluster_parameter_group(
"my_parameter_group", "redshift-1.0", "This is my parameter group"
)
groups_response = conn.describe_cluster_parameter_groups()
groups = groups_response["DescribeClusterParameterGroupsResponse"][
"DescribeClusterParameterGroupsResult"
]["ParameterGroups"]
groups.should.have.length_of(2)
conn.delete_cluster_parameter_group("my_parameter_group")
groups_response = conn.describe_cluster_parameter_groups()
groups = groups_response["DescribeClusterParameterGroupsResponse"][
"DescribeClusterParameterGroupsResult"
]["ParameterGroups"]
groups.should.have.length_of(1)
conn.delete_cluster_parameter_group.when.called_with(
"not-a-parameter-group"
).should.throw(ClusterParameterGroupNotFound)
@mock_redshift
def test_create_cluster_snapshot_of_non_existent_cluster():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "non-existent-cluster-id"
client.create_cluster_snapshot.when.called_with(
SnapshotIdentifier="snapshot-id", ClusterIdentifier=cluster_identifier
).should.throw(ClientError, "Cluster {} not found.".format(cluster_identifier))
@mock_redshift
def test_create_cluster_snapshot():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "my_cluster"
snapshot_identifier = "my_snapshot"
cluster_response = client.create_cluster(
DBName="test-db",
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
EnhancedVpcRouting=True,
)
cluster_response["Cluster"]["NodeType"].should.equal("ds2.xlarge")
snapshot_response = client.create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier,
ClusterIdentifier=cluster_identifier,
Tags=[{"Key": "test-tag-key", "Value": "test-tag-value"}],
)
snapshot = snapshot_response["Snapshot"]
snapshot["SnapshotIdentifier"].should.equal(snapshot_identifier)
snapshot["ClusterIdentifier"].should.equal(cluster_identifier)
snapshot["NumberOfNodes"].should.equal(1)
snapshot["NodeType"].should.equal("ds2.xlarge")
snapshot["MasterUsername"].should.equal("username")
@mock_redshift
def test_describe_cluster_snapshots():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "my_cluster"
snapshot_identifier_1 = "my_snapshot_1"
snapshot_identifier_2 = "my_snapshot_2"
client.create_cluster(
DBName="test-db",
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
)
client.create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier_1, ClusterIdentifier=cluster_identifier
)
client.create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier_2, ClusterIdentifier=cluster_identifier
)
resp_snap_1 = client.describe_cluster_snapshots(
SnapshotIdentifier=snapshot_identifier_1
)
snapshot_1 = resp_snap_1["Snapshots"][0]
snapshot_1["SnapshotIdentifier"].should.equal(snapshot_identifier_1)
snapshot_1["ClusterIdentifier"].should.equal(cluster_identifier)
snapshot_1["NumberOfNodes"].should.equal(1)
snapshot_1["NodeType"].should.equal("ds2.xlarge")
snapshot_1["MasterUsername"].should.equal("username")
resp_snap_2 = client.describe_cluster_snapshots(
SnapshotIdentifier=snapshot_identifier_2
)
snapshot_2 = resp_snap_2["Snapshots"][0]
snapshot_2["SnapshotIdentifier"].should.equal(snapshot_identifier_2)
snapshot_2["ClusterIdentifier"].should.equal(cluster_identifier)
snapshot_2["NumberOfNodes"].should.equal(1)
snapshot_2["NodeType"].should.equal("ds2.xlarge")
snapshot_2["MasterUsername"].should.equal("username")
resp_clust = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier)
resp_clust["Snapshots"][0].should.equal(resp_snap_1["Snapshots"][0])
resp_clust["Snapshots"][1].should.equal(resp_snap_2["Snapshots"][0])
@mock_redshift
def test_describe_cluster_snapshots_not_found_error():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "non-existent-cluster-id"
snapshot_identifier = "non-existent-snapshot-id"
resp = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier)
resp["Snapshots"].should.have.length_of(0)
client.describe_cluster_snapshots.when.called_with(
SnapshotIdentifier=snapshot_identifier
).should.throw(ClientError, "Snapshot {} not found.".format(snapshot_identifier))
@mock_redshift
def test_delete_cluster_snapshot():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "my_cluster"
snapshot_identifier = "my_snapshot"
client.create_cluster(
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
)
client.create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier, ClusterIdentifier=cluster_identifier
)
snapshots = client.describe_cluster_snapshots()["Snapshots"]
list(snapshots).should.have.length_of(1)
client.delete_cluster_snapshot(SnapshotIdentifier=snapshot_identifier)["Snapshot"][
"Status"
].should.equal("deleted")
snapshots = client.describe_cluster_snapshots()["Snapshots"]
list(snapshots).should.have.length_of(0)
client.delete_cluster_snapshot.when.called_with(
SnapshotIdentifier="non-existent"
).should.throw(ClientError, "Snapshot non-existent not found.")
@mock_redshift
def test_cluster_snapshot_already_exists():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "my_cluster"
snapshot_identifier = "my_snapshot"
client.create_cluster(
DBName="test-db",
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
)
client.create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier, ClusterIdentifier=cluster_identifier
)
client.create_cluster_snapshot.when.called_with(
SnapshotIdentifier=snapshot_identifier, ClusterIdentifier=cluster_identifier
).should.throw(ClientError, "{} already exists".format(snapshot_identifier))
@mock_redshift
def test_create_cluster_from_snapshot():
client = boto3.client("redshift", region_name="us-east-1")
original_cluster_identifier = "original-cluster"
original_snapshot_identifier = "original-snapshot"
new_cluster_identifier = "new-cluster"
client.create_cluster(
ClusterIdentifier=original_cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
EnhancedVpcRouting=True,
)
client.create_cluster_snapshot(
SnapshotIdentifier=original_snapshot_identifier,
ClusterIdentifier=original_cluster_identifier,
)
client.restore_from_cluster_snapshot.when.called_with(
ClusterIdentifier=original_cluster_identifier,
SnapshotIdentifier=original_snapshot_identifier,
).should.throw(ClientError, "ClusterAlreadyExists")
response = client.restore_from_cluster_snapshot(
ClusterIdentifier=new_cluster_identifier,
SnapshotIdentifier=original_snapshot_identifier,
Port=1234,
)
response["Cluster"]["ClusterStatus"].should.equal("creating")
response = client.describe_clusters(ClusterIdentifier=new_cluster_identifier)
new_cluster = response["Clusters"][0]
new_cluster["NodeType"].should.equal("ds2.xlarge")
new_cluster["MasterUsername"].should.equal("username")
new_cluster["Endpoint"]["Port"].should.equal(1234)
new_cluster["EnhancedVpcRouting"].should.equal(True)
@mock_redshift
def test_create_cluster_from_snapshot_with_waiter():
client = boto3.client("redshift", region_name="us-east-1")
original_cluster_identifier = "original-cluster"
original_snapshot_identifier = "original-snapshot"
new_cluster_identifier = "new-cluster"
client.create_cluster(
ClusterIdentifier=original_cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
EnhancedVpcRouting=True,
)
client.create_cluster_snapshot(
SnapshotIdentifier=original_snapshot_identifier,
ClusterIdentifier=original_cluster_identifier,
)
response = client.restore_from_cluster_snapshot(
ClusterIdentifier=new_cluster_identifier,
SnapshotIdentifier=original_snapshot_identifier,
Port=1234,
)
response["Cluster"]["ClusterStatus"].should.equal("creating")
client.get_waiter("cluster_restored").wait(
ClusterIdentifier=new_cluster_identifier,
WaiterConfig={"Delay": 1, "MaxAttempts": 2},
)
response = client.describe_clusters(ClusterIdentifier=new_cluster_identifier)
new_cluster = response["Clusters"][0]
new_cluster["NodeType"].should.equal("ds2.xlarge")
new_cluster["MasterUsername"].should.equal("username")
new_cluster["EnhancedVpcRouting"].should.equal(True)
new_cluster["Endpoint"]["Port"].should.equal(1234)
@mock_redshift
def test_create_cluster_from_non_existent_snapshot():
client = boto3.client("redshift", region_name="us-east-1")
client.restore_from_cluster_snapshot.when.called_with(
ClusterIdentifier="cluster-id", SnapshotIdentifier="non-existent-snapshot"
).should.throw(ClientError, "Snapshot non-existent-snapshot not found.")
@mock_redshift
def test_create_cluster_status_update():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "test-cluster"
response = client.create_cluster(
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
)
response["Cluster"]["ClusterStatus"].should.equal("creating")
response = client.describe_clusters(ClusterIdentifier=cluster_identifier)
response["Clusters"][0]["ClusterStatus"].should.equal("available")
@mock_redshift
def test_describe_tags_with_resource_type():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "my_cluster"
cluster_arn = "arn:aws:redshift:us-east-1:{}:" "cluster:{}".format(
ACCOUNT_ID, cluster_identifier
)
snapshot_identifier = "my_snapshot"
snapshot_arn = "arn:aws:redshift:us-east-1:{}:" "snapshot:{}/{}".format(
ACCOUNT_ID, cluster_identifier, snapshot_identifier
)
tag_key = "test-tag-key"
tag_value = "test-tag-value"
client.create_cluster(
DBName="test-db",
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
Tags=[{"Key": tag_key, "Value": tag_value}],
)
tags_response = client.describe_tags(ResourceType="cluster")
tagged_resources = tags_response["TaggedResources"]
list(tagged_resources).should.have.length_of(1)
tagged_resources[0]["ResourceType"].should.equal("cluster")
tagged_resources[0]["ResourceName"].should.equal(cluster_arn)
tag = tagged_resources[0]["Tag"]
tag["Key"].should.equal(tag_key)
tag["Value"].should.equal(tag_value)
client.create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier,
ClusterIdentifier=cluster_identifier,
Tags=[{"Key": tag_key, "Value": tag_value}],
)
tags_response = client.describe_tags(ResourceType="snapshot")
tagged_resources = tags_response["TaggedResources"]
list(tagged_resources).should.have.length_of(1)
tagged_resources[0]["ResourceType"].should.equal("snapshot")
tagged_resources[0]["ResourceName"].should.equal(snapshot_arn)
tag = tagged_resources[0]["Tag"]
tag["Key"].should.equal(tag_key)
tag["Value"].should.equal(tag_value)
@mock_redshift
def test_describe_tags_cannot_specify_resource_type_and_resource_name():
client = boto3.client("redshift", region_name="us-east-1")
resource_name = "arn:aws:redshift:us-east-1:{}:cluster:cluster-id".format(
ACCOUNT_ID
)
resource_type = "cluster"
client.describe_tags.when.called_with(
ResourceName=resource_name, ResourceType=resource_type
).should.throw(ClientError, "using either an ARN or a resource type")
@mock_redshift
def test_describe_tags_with_resource_name():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "cluster-id"
cluster_arn = "arn:aws:redshift:us-east-1:{}:" "cluster:{}".format(
ACCOUNT_ID, cluster_identifier
)
snapshot_identifier = "snapshot-id"
snapshot_arn = "arn:aws:redshift:us-east-1:{}:" "snapshot:{}/{}".format(
ACCOUNT_ID, cluster_identifier, snapshot_identifier
)
tag_key = "test-tag-key"
tag_value = "test-tag-value"
client.create_cluster(
DBName="test-db",
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
Tags=[{"Key": tag_key, "Value": tag_value}],
)
tags_response = client.describe_tags(ResourceName=cluster_arn)
tagged_resources = tags_response["TaggedResources"]
list(tagged_resources).should.have.length_of(1)
tagged_resources[0]["ResourceType"].should.equal("cluster")
tagged_resources[0]["ResourceName"].should.equal(cluster_arn)
tag = tagged_resources[0]["Tag"]
tag["Key"].should.equal(tag_key)
tag["Value"].should.equal(tag_value)
client.create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier,
ClusterIdentifier=cluster_identifier,
Tags=[{"Key": tag_key, "Value": tag_value}],
)
tags_response = client.describe_tags(ResourceName=snapshot_arn)
tagged_resources = tags_response["TaggedResources"]
list(tagged_resources).should.have.length_of(1)
tagged_resources[0]["ResourceType"].should.equal("snapshot")
tagged_resources[0]["ResourceName"].should.equal(snapshot_arn)
tag = tagged_resources[0]["Tag"]
tag["Key"].should.equal(tag_key)
tag["Value"].should.equal(tag_value)
@mock_redshift
def test_create_tags():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "cluster-id"
cluster_arn = "arn:aws:redshift:us-east-1:{}:" "cluster:{}".format(
ACCOUNT_ID, cluster_identifier
)
tag_key = "test-tag-key"
tag_value = "test-tag-value"
num_tags = 5
tags = []
for i in range(0, num_tags):
tag = {"Key": "{}-{}".format(tag_key, i), "Value": "{}-{}".format(tag_value, i)}
tags.append(tag)
client.create_cluster(
DBName="test-db",
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
)
client.create_tags(ResourceName=cluster_arn, Tags=tags)
response = client.describe_clusters(ClusterIdentifier=cluster_identifier)
cluster = response["Clusters"][0]
list(cluster["Tags"]).should.have.length_of(num_tags)
response = client.describe_tags(ResourceName=cluster_arn)
list(response["TaggedResources"]).should.have.length_of(num_tags)
@mock_redshift
def test_delete_tags():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "cluster-id"
cluster_arn = "arn:aws:redshift:us-east-1:{}:" "cluster:{}".format(
ACCOUNT_ID, cluster_identifier
)
tag_key = "test-tag-key"
tag_value = "test-tag-value"
tags = []
for i in range(1, 2):
tag = {"Key": "{}-{}".format(tag_key, i), "Value": "{}-{}".format(tag_value, i)}
tags.append(tag)
client.create_cluster(
DBName="test-db",
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="username",
MasterUserPassword="password",
Tags=tags,
)
client.delete_tags(
ResourceName=cluster_arn,
TagKeys=[tag["Key"] for tag in tags if tag["Key"] != "{}-1".format(tag_key)],
)
response = client.describe_clusters(ClusterIdentifier=cluster_identifier)
cluster = response["Clusters"][0]
list(cluster["Tags"]).should.have.length_of(1)
response = client.describe_tags(ResourceName=cluster_arn)
list(response["TaggedResources"]).should.have.length_of(1)
@mock_ec2
@mock_redshift
def test_describe_tags_all_resource_types():
ec2 = boto3.resource("ec2", region_name="us-east-1")
vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16")
subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24")
client = boto3.client("redshift", region_name="us-east-1")
response = client.describe_tags()
list(response["TaggedResources"]).should.have.length_of(0)
client.create_cluster_subnet_group(
ClusterSubnetGroupName="my_subnet_group",
Description="This is my subnet group",
SubnetIds=[subnet.id],
Tags=[{"Key": "tag_key", "Value": "tag_value"}],
)
client.create_cluster_security_group(
ClusterSecurityGroupName="security_group1",
Description="This is my security group",
Tags=[{"Key": "tag_key", "Value": "tag_value"}],
)
client.create_cluster(
DBName="test",
ClusterIdentifier="my_cluster",
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="user",
MasterUserPassword="password",
Tags=[{"Key": "tag_key", "Value": "tag_value"}],
)
client.create_cluster_snapshot(
SnapshotIdentifier="my_snapshot",
ClusterIdentifier="my_cluster",
Tags=[{"Key": "tag_key", "Value": "tag_value"}],
)
client.create_cluster_parameter_group(
ParameterGroupName="my_parameter_group",
ParameterGroupFamily="redshift-1.0",
Description="This is my parameter group",
Tags=[{"Key": "tag_key", "Value": "tag_value"}],
)
response = client.describe_tags()
expected_types = [
"cluster",
"parametergroup",
"securitygroup",
"snapshot",
"subnetgroup",
]
tagged_resources = response["TaggedResources"]
returned_types = [resource["ResourceType"] for resource in tagged_resources]
list(tagged_resources).should.have.length_of(len(expected_types))
set(returned_types).should.equal(set(expected_types))
@mock_redshift
def test_tagged_resource_not_found_error():
client = boto3.client("redshift", region_name="us-east-1")
cluster_arn = "arn:aws:redshift:us-east-1::cluster:fake"
client.describe_tags.when.called_with(ResourceName=cluster_arn).should.throw(
ClientError, "cluster (fake) not found."
)
snapshot_arn = "arn:aws:redshift:us-east-1::snapshot:cluster-id/snap-id"
client.delete_tags.when.called_with(
ResourceName=snapshot_arn, TagKeys=["test"]
).should.throw(ClientError, "snapshot (snap-id) not found.")
client.describe_tags.when.called_with(ResourceType="cluster").should.throw(
ClientError, "resource of type 'cluster' not found."
)
client.describe_tags.when.called_with(ResourceName="bad:arn").should.throw(
ClientError, "Tagging is not supported for this type of resource"
)
@mock_redshift
def test_enable_snapshot_copy():
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster(
ClusterIdentifier="test",
ClusterType="single-node",
DBName="test",
Encrypted=True,
MasterUsername="user",
MasterUserPassword="password",
NodeType="ds2.xlarge",
)
with pytest.raises(ClientError) as ex:
client.enable_snapshot_copy(
ClusterIdentifier="test", DestinationRegion="us-west-2", RetentionPeriod=3,
)
ex.value.response["Error"]["Code"].should.equal("InvalidParameterValue")
ex.value.response["Error"]["Message"].should.contain(
"SnapshotCopyGrantName is required for Snapshot Copy on KMS encrypted clusters."
)
with pytest.raises(ClientError) as ex:
client.enable_snapshot_copy(
ClusterIdentifier="test",
DestinationRegion="us-east-1",
RetentionPeriod=3,
SnapshotCopyGrantName="invalid-us-east-1-to-us-east-1",
)
ex.value.response["Error"]["Code"].should.equal("UnknownSnapshotCopyRegionFault")
ex.value.response["Error"]["Message"].should.contain("Invalid region us-east-1")
client.enable_snapshot_copy(
ClusterIdentifier="test",
DestinationRegion="us-west-2",
RetentionPeriod=3,
SnapshotCopyGrantName="copy-us-east-1-to-us-west-2",
)
response = client.describe_clusters(ClusterIdentifier="test")
cluster_snapshot_copy_status = response["Clusters"][0]["ClusterSnapshotCopyStatus"]
cluster_snapshot_copy_status["RetentionPeriod"].should.equal(3)
cluster_snapshot_copy_status["DestinationRegion"].should.equal("us-west-2")
cluster_snapshot_copy_status["SnapshotCopyGrantName"].should.equal(
"copy-us-east-1-to-us-west-2"
)
@mock_redshift
def test_enable_snapshot_copy_unencrypted():
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster(
ClusterIdentifier="test",
ClusterType="single-node",
DBName="test",
MasterUsername="user",
MasterUserPassword="password",
NodeType="ds2.xlarge",
)
client.enable_snapshot_copy(ClusterIdentifier="test", DestinationRegion="us-west-2")
response = client.describe_clusters(ClusterIdentifier="test")
cluster_snapshot_copy_status = response["Clusters"][0]["ClusterSnapshotCopyStatus"]
cluster_snapshot_copy_status["RetentionPeriod"].should.equal(7)
cluster_snapshot_copy_status["DestinationRegion"].should.equal("us-west-2")
@mock_redshift
def test_disable_snapshot_copy():
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster(
DBName="test",
ClusterIdentifier="test",
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="user",
MasterUserPassword="password",
)
client.enable_snapshot_copy(
ClusterIdentifier="test",
DestinationRegion="us-west-2",
RetentionPeriod=3,
SnapshotCopyGrantName="copy-us-east-1-to-us-west-2",
)
client.disable_snapshot_copy(ClusterIdentifier="test")
response = client.describe_clusters(ClusterIdentifier="test")
response["Clusters"][0].shouldnt.contain("ClusterSnapshotCopyStatus")
@mock_redshift
def test_modify_snapshot_copy_retention_period():
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster(
DBName="test",
ClusterIdentifier="test",
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="user",
MasterUserPassword="password",
)
client.enable_snapshot_copy(
ClusterIdentifier="test",
DestinationRegion="us-west-2",
RetentionPeriod=3,
SnapshotCopyGrantName="copy-us-east-1-to-us-west-2",
)
client.modify_snapshot_copy_retention_period(
ClusterIdentifier="test", RetentionPeriod=5
)
response = client.describe_clusters(ClusterIdentifier="test")
cluster_snapshot_copy_status = response["Clusters"][0]["ClusterSnapshotCopyStatus"]
cluster_snapshot_copy_status["RetentionPeriod"].should.equal(5)
@mock_redshift
def test_create_duplicate_cluster_fails():
kwargs = {
"ClusterIdentifier": "test",
"ClusterType": "single-node",
"DBName": "test",
"MasterUsername": "user",
"MasterUserPassword": "password",
"NodeType": "ds2.xlarge",
}
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster(**kwargs)
client.create_cluster.when.called_with(**kwargs).should.throw(
ClientError, "ClusterAlreadyExists"
)
@mock_redshift
def test_delete_cluster_with_final_snapshot():
client = boto3.client("redshift", region_name="us-east-1")
with pytest.raises(ClientError) as ex:
client.delete_cluster(ClusterIdentifier="non-existent")
ex.value.response["Error"]["Code"].should.equal("ClusterNotFound")
ex.value.response["Error"]["Message"].should.match(r"Cluster .+ not found.")
cluster_identifier = "my_cluster"
client.create_cluster(
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
DBName="test",
MasterUsername="user",
MasterUserPassword="password",
NodeType="ds2.xlarge",
)
with pytest.raises(ClientError) as ex:
client.delete_cluster(
ClusterIdentifier=cluster_identifier, SkipFinalClusterSnapshot=False
)
ex.value.response["Error"]["Code"].should.equal("InvalidParameterCombination")
ex.value.response["Error"]["Message"].should.contain(
"FinalClusterSnapshotIdentifier is required unless SkipFinalClusterSnapshot is specified."
)
snapshot_identifier = "my_snapshot"
client.delete_cluster(
ClusterIdentifier=cluster_identifier,
SkipFinalClusterSnapshot=False,
FinalClusterSnapshotIdentifier=snapshot_identifier,
)
resp = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier)
resp["Snapshots"].should.have.length_of(1)
resp["Snapshots"][0]["SnapshotIdentifier"].should.equal(snapshot_identifier)
resp["Snapshots"][0]["SnapshotType"].should.equal("manual")
with pytest.raises(ClientError) as ex:
client.describe_clusters(ClusterIdentifier=cluster_identifier)
ex.value.response["Error"]["Code"].should.equal("ClusterNotFound")
ex.value.response["Error"]["Message"].should.match(r"Cluster .+ not found.")
@mock_redshift
def test_delete_cluster_without_final_snapshot():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "my_cluster"
client.create_cluster(
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
DBName="test",
MasterUsername="user",
MasterUserPassword="password",
NodeType="ds2.xlarge",
)
client.delete_cluster(
ClusterIdentifier=cluster_identifier, SkipFinalClusterSnapshot=True
)
resp = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier)
resp["Snapshots"].should.have.length_of(0)
with pytest.raises(ClientError) as ex:
client.describe_clusters(ClusterIdentifier=cluster_identifier)
ex.value.response["Error"]["Code"].should.equal("ClusterNotFound")
ex.value.response["Error"]["Message"].should.match(r"Cluster .+ not found.")
@mock_redshift
def test_resize_cluster():
client = boto3.client("redshift", region_name="us-east-1")
resp = client.create_cluster(
DBName="test",
ClusterIdentifier="test",
ClusterType="single-node",
NodeType="ds2.xlarge",
MasterUsername="user",
MasterUserPassword="password",
)
resp["Cluster"]["NumberOfNodes"].should.equal(1)
client.modify_cluster(
ClusterIdentifier="test", ClusterType="multi-node", NumberOfNodes=2,
)
resp = client.describe_clusters(ClusterIdentifier="test")
resp["Clusters"][0]["NumberOfNodes"].should.equal(2)
client.modify_cluster(
ClusterIdentifier="test", ClusterType="single-node",
)
resp = client.describe_clusters(ClusterIdentifier="test")
resp["Clusters"][0]["NumberOfNodes"].should.equal(1)
with pytest.raises(ClientError) as ex:
client.modify_cluster(
ClusterIdentifier="test", ClusterType="multi-node", NumberOfNodes=1,
)
ex.value.response["Error"]["Code"].should.equal("InvalidParameterCombination")
ex.value.response["Error"]["Message"].should.contain(
"Number of nodes for cluster type multi-node must be greater than or equal to 2"
)
with pytest.raises(ClientError) as ex:
client.modify_cluster(
ClusterIdentifier="test",
ClusterType="invalid-cluster-type",
NumberOfNodes=1,
)
ex.value.response["Error"]["Code"].should.equal("InvalidParameterValue")
ex.value.response["Error"]["Message"].should.contain("Invalid cluster type")
@mock_redshift
def test_get_cluster_credentials_non_existent_cluster():
client = boto3.client("redshift", region_name="us-east-1")
with pytest.raises(ClientError) as ex:
client.get_cluster_credentials(ClusterIdentifier="non-existent")
ex.value.response["Error"]["Code"].should.equal("ClusterNotFound")
ex.value.response["Error"]["Message"].should.match(r"Cluster .+ not found.")
@mock_redshift
def test_get_cluster_credentials_non_existent_cluster():
client = boto3.client("redshift", region_name="us-east-1")
with pytest.raises(ClientError) as ex:
client.get_cluster_credentials(
ClusterIdentifier="non-existent", DbUser="some_user"
)
ex.value.response["Error"]["Code"].should.equal("ClusterNotFound")
ex.value.response["Error"]["Message"].should.match(r"Cluster .+ not found.")
@mock_redshift
def test_get_cluster_credentials_invalid_duration():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "my_cluster"
client.create_cluster(
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
DBName="test",
MasterUsername="user",
MasterUserPassword="password",
NodeType="ds2.xlarge",
)
db_user = "some_user"
with pytest.raises(ClientError) as ex:
client.get_cluster_credentials(
ClusterIdentifier=cluster_identifier, DbUser=db_user, DurationSeconds=899
)
ex.value.response["Error"]["Code"].should.equal("InvalidParameterValue")
ex.value.response["Error"]["Message"].should.contain(
"Token duration must be between 900 and 3600 seconds"
)
with pytest.raises(ClientError) as ex:
client.get_cluster_credentials(
ClusterIdentifier=cluster_identifier, DbUser=db_user, DurationSeconds=3601
)
ex.value.response["Error"]["Code"].should.equal("InvalidParameterValue")
ex.value.response["Error"]["Message"].should.contain(
"Token duration must be between 900 and 3600 seconds"
)
@mock_redshift
def test_get_cluster_credentials():
client = boto3.client("redshift", region_name="us-east-1")
cluster_identifier = "my_cluster"
client.create_cluster(
ClusterIdentifier=cluster_identifier,
ClusterType="single-node",
DBName="test",
MasterUsername="user",
MasterUserPassword="password",
NodeType="ds2.xlarge",
)
expected_expiration = time.mktime(
(datetime.datetime.now() + datetime.timedelta(0, 900)).timetuple()
)
db_user = "some_user"
response = client.get_cluster_credentials(
ClusterIdentifier=cluster_identifier, DbUser=db_user,
)
response["DbUser"].should.equal("IAM:%s" % db_user)
assert time.mktime((response["Expiration"]).timetuple()) == pytest.approx(
expected_expiration
)
response["DbPassword"].should.have.length_of(32)
response = client.get_cluster_credentials(
ClusterIdentifier=cluster_identifier, DbUser=db_user, AutoCreate=True
)
response["DbUser"].should.equal("IAMA:%s" % db_user)
response = client.get_cluster_credentials(
ClusterIdentifier=cluster_identifier, DbUser="some_other_user", AutoCreate=False
)
response["DbUser"].should.equal("IAM:%s" % "some_other_user")
expected_expiration = time.mktime(
(datetime.datetime.now() + datetime.timedelta(0, 3000)).timetuple()
)
response = client.get_cluster_credentials(
ClusterIdentifier=cluster_identifier, DbUser=db_user, DurationSeconds=3000,
)
assert time.mktime(response["Expiration"].timetuple()) == pytest.approx(
expected_expiration
)
| true
| true
|
1c42c06511b97c7aad9b785dc67f6719e18c7817
| 2,070
|
py
|
Python
|
webcrawling/crawl.py
|
py-paulo/EstanteVirtual-WebCrawling
|
8888857c3d97c6127a34ae8d83a1828eb9d9805d
|
[
"MIT"
] | null | null | null |
webcrawling/crawl.py
|
py-paulo/EstanteVirtual-WebCrawling
|
8888857c3d97c6127a34ae8d83a1828eb9d9805d
|
[
"MIT"
] | null | null | null |
webcrawling/crawl.py
|
py-paulo/EstanteVirtual-WebCrawling
|
8888857c3d97c6127a34ae8d83a1828eb9d9805d
|
[
"MIT"
] | null | null | null |
import urllib.request
from bs4 import BeautifulSoup
def _crawl(urlBase, query, headers, waitRequests, attrs, allBooks: list = []):
req = urllib.request.Request(urlBase+query, headers=headers)
with urllib.request.urlopen(req) as response:
html = response.read().decode('utf-8', errors='ignore')
soup = BeautifulSoup(html, 'html.parser')
for div in soup.find_all("div", {"class": "info-exemplar"}):
h2_book_name = div.find("h2", {"itemprop": "name"})
book_name = h2_book_name.attrs['data-enhanced-ecommerce-impression-name']
span_author = div.find("span", {"itemprop": "author"})
author = span_author.attrs['data-enhanced-ecommerce-impression-brand']
div_sub_info = div.find("div", {"class": "sub-info"})
div_year_editor = div_sub_info.find("div", {"class": "ano-editora"})
span_year_editor = div_year_editor.findChildren("span", recursive=False)[0]
year_editor = span_year_editor.text.split(':')[-1].strip()
div_publishing_company = div_sub_info.find("div", {"class": "nome-editora"})
span_publishing_company = div_publishing_company.findChildren("span", recursive=False)[0]
publishing_company = span_publishing_company.text.split(':')[-1].strip()
div_type = div_sub_info.find("span", {"class": "info-exemplar-tipo_peso"})
type_book, weight_book = [text.strip().split(':')[-1].strip() for text in div_type.text.split('\n')]
book = {
'name': book_name,
'author': author,
'release_year': year_editor,
'publishing_company': publishing_company,
'type': type_book,
'weight': weight_book
}
allBooks.append(book)
try:
nextQuery = soup.find_all("a", {"class": "next"})[0].attrs['href']
except (IndexError, KeyError):
nextQuery = None
if (query == nextQuery) or (nextQuery is None):
return allBooks
else:
print(nextQuery)
_crawl(urlBase, nextQuery, headers, waitRequests, attrs, allBooks)
| 40.588235
| 108
| 0.636715
|
import urllib.request
from bs4 import BeautifulSoup
def _crawl(urlBase, query, headers, waitRequests, attrs, allBooks: list = []):
req = urllib.request.Request(urlBase+query, headers=headers)
with urllib.request.urlopen(req) as response:
html = response.read().decode('utf-8', errors='ignore')
soup = BeautifulSoup(html, 'html.parser')
for div in soup.find_all("div", {"class": "info-exemplar"}):
h2_book_name = div.find("h2", {"itemprop": "name"})
book_name = h2_book_name.attrs['data-enhanced-ecommerce-impression-name']
span_author = div.find("span", {"itemprop": "author"})
author = span_author.attrs['data-enhanced-ecommerce-impression-brand']
div_sub_info = div.find("div", {"class": "sub-info"})
div_year_editor = div_sub_info.find("div", {"class": "ano-editora"})
span_year_editor = div_year_editor.findChildren("span", recursive=False)[0]
year_editor = span_year_editor.text.split(':')[-1].strip()
div_publishing_company = div_sub_info.find("div", {"class": "nome-editora"})
span_publishing_company = div_publishing_company.findChildren("span", recursive=False)[0]
publishing_company = span_publishing_company.text.split(':')[-1].strip()
div_type = div_sub_info.find("span", {"class": "info-exemplar-tipo_peso"})
type_book, weight_book = [text.strip().split(':')[-1].strip() for text in div_type.text.split('\n')]
book = {
'name': book_name,
'author': author,
'release_year': year_editor,
'publishing_company': publishing_company,
'type': type_book,
'weight': weight_book
}
allBooks.append(book)
try:
nextQuery = soup.find_all("a", {"class": "next"})[0].attrs['href']
except (IndexError, KeyError):
nextQuery = None
if (query == nextQuery) or (nextQuery is None):
return allBooks
else:
print(nextQuery)
_crawl(urlBase, nextQuery, headers, waitRequests, attrs, allBooks)
| true
| true
|
1c42c15ca0d1d2c95102c4142fea54eca013eabc
| 4,186
|
py
|
Python
|
classy_vision/dataset/__init__.py
|
jerryzh168/ClassyVision-1
|
6acfb00a77487a9015803fbaad805330081293a9
|
[
"MIT"
] | 1
|
2021-09-29T06:24:42.000Z
|
2021-09-29T06:24:42.000Z
|
classy_vision/dataset/__init__.py
|
pkassotis/ClassyVision
|
e8704ecaa59a15dbb2f4b0724e85d6e5cb2f704e
|
[
"MIT"
] | null | null | null |
classy_vision/dataset/__init__.py
|
pkassotis/ClassyVision
|
e8704ecaa59a15dbb2f4b0724e85d6e5cb2f704e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import traceback
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
from .classy_dataset import ClassyDataset
FILE_ROOT = Path(__file__).parent
DATASET_REGISTRY = {}
DATASET_REGISTRY_TB = {}
DATASET_CLASS_NAMES = set()
DATASET_CLASS_NAMES_TB = {}
def build_dataset(config, *args, **kwargs):
"""Builds a :class:`ClassyDataset` from a config.
This assumes a 'name' key in the config which is used to determine what
dataset class to instantiate. For instance, a config `{"name": "my_dataset",
"folder": "/data"}` will find a class that was registered as "my_dataset"
(see :func:`register_dataset`) and call .from_config on it."""
dataset = DATASET_REGISTRY[config["name"]].from_config(config, *args, **kwargs)
num_workers = config.get("num_workers")
if num_workers is not None:
dataset.set_num_workers(num_workers)
return dataset
def register_dataset(name):
"""Registers a :class:`ClassyDataset` subclass.
This decorator allows Classy Vision to instantiate a subclass of
ClassyDataset from a configuration file, even if the class itself is not
part of the Classy Vision framework. To use it, apply this decorator to a
ClassyDataset subclass like this:
.. code-block:: python
@register_dataset("my_dataset")
class MyDataset(ClassyDataset):
...
To instantiate a dataset from a configuration file, see
:func:`build_dataset`."""
def register_dataset_cls(cls):
if name in DATASET_REGISTRY:
msg = "Cannot register duplicate dataset ({}). Already registered at \n{}\n"
raise ValueError(msg.format(name, DATASET_REGISTRY_TB[name]))
if not issubclass(cls, ClassyDataset):
raise ValueError(
"Dataset ({}: {}) must extend ClassyDataset".format(name, cls.__name__)
)
if cls.__name__ in DATASET_CLASS_NAMES:
msg = (
"Cannot register dataset with duplicate class name({})."
+ "Previously registered at \n{}\n"
)
raise ValueError(
msg.format(cls.__name__, DATASET_CLASS_NAMES_TB[cls.__name__])
)
tb = "".join(traceback.format_stack())
DATASET_REGISTRY[name] = cls
DATASET_CLASS_NAMES.add(cls.__name__)
DATASET_REGISTRY_TB[name] = tb
DATASET_CLASS_NAMES_TB[cls.__name__] = tb
return cls
return register_dataset_cls
# automatically import any Python files in the dataset/ directory
import_all_modules(FILE_ROOT, "classy_vision.dataset")
from .classy_cifar import CIFARDataset # isort:skip
from .classy_hmdb51 import HMDB51Dataset # isort:skip
from .classy_kinetics400 import Kinetics400Dataset # isort:skip
from .classy_synthetic_image import SyntheticImageDataset # isort:skip
from .classy_synthetic_image_streaming import ( # isort:skip
SyntheticImageStreamingDataset, # isort:skip
) # isort:skip
from .classy_synthetic_video import SyntheticVideoDataset # isort:skip
from .classy_ucf101 import UCF101Dataset # isort:skip
from .classy_video_dataset import ClassyVideoDataset # isort:skip
from .dataloader_async_gpu_wrapper import DataloaderAsyncGPUWrapper # isort:skip
from .dataloader_limit_wrapper import DataloaderLimitWrapper # isort:skip
from .dataloader_skip_none_wrapper import DataloaderSkipNoneWrapper # isort:skip
from .dataloader_wrapper import DataloaderWrapper # isort:skip
from .image_path_dataset import ImagePathDataset # isort:skip
__all__ = [
"CIFARDataset",
"ClassyDataset",
"ClassyVideoDataset",
"DataloaderLimitWrapper",
"DataloaderSkipNoneWrapper",
"DataloaderWrapper",
"DataloaderAsyncGPUWrapper",
"HMDB51Dataset",
"ImagePathDataset",
"Kinetics400Dataset",
"SyntheticImageDataset",
"SyntheticImageStreamingDataset",
"SyntheticVideoDataset",
"UCF101Dataset",
"build_dataset",
"register_dataset",
]
| 35.777778
| 88
| 0.718586
|
import traceback
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
from .classy_dataset import ClassyDataset
FILE_ROOT = Path(__file__).parent
DATASET_REGISTRY = {}
DATASET_REGISTRY_TB = {}
DATASET_CLASS_NAMES = set()
DATASET_CLASS_NAMES_TB = {}
def build_dataset(config, *args, **kwargs):
dataset = DATASET_REGISTRY[config["name"]].from_config(config, *args, **kwargs)
num_workers = config.get("num_workers")
if num_workers is not None:
dataset.set_num_workers(num_workers)
return dataset
def register_dataset(name):
def register_dataset_cls(cls):
if name in DATASET_REGISTRY:
msg = "Cannot register duplicate dataset ({}). Already registered at \n{}\n"
raise ValueError(msg.format(name, DATASET_REGISTRY_TB[name]))
if not issubclass(cls, ClassyDataset):
raise ValueError(
"Dataset ({}: {}) must extend ClassyDataset".format(name, cls.__name__)
)
if cls.__name__ in DATASET_CLASS_NAMES:
msg = (
"Cannot register dataset with duplicate class name({})."
+ "Previously registered at \n{}\n"
)
raise ValueError(
msg.format(cls.__name__, DATASET_CLASS_NAMES_TB[cls.__name__])
)
tb = "".join(traceback.format_stack())
DATASET_REGISTRY[name] = cls
DATASET_CLASS_NAMES.add(cls.__name__)
DATASET_REGISTRY_TB[name] = tb
DATASET_CLASS_NAMES_TB[cls.__name__] = tb
return cls
return register_dataset_cls
import_all_modules(FILE_ROOT, "classy_vision.dataset")
from .classy_cifar import CIFARDataset
from .classy_hmdb51 import HMDB51Dataset
from .classy_kinetics400 import Kinetics400Dataset
from .classy_synthetic_image import SyntheticImageDataset
from .classy_synthetic_image_streaming import (
SyntheticImageStreamingDataset,
)
from .classy_synthetic_video import SyntheticVideoDataset
from .classy_ucf101 import UCF101Dataset
from .classy_video_dataset import ClassyVideoDataset
from .dataloader_async_gpu_wrapper import DataloaderAsyncGPUWrapper
from .dataloader_limit_wrapper import DataloaderLimitWrapper
from .dataloader_skip_none_wrapper import DataloaderSkipNoneWrapper
from .dataloader_wrapper import DataloaderWrapper
from .image_path_dataset import ImagePathDataset
__all__ = [
"CIFARDataset",
"ClassyDataset",
"ClassyVideoDataset",
"DataloaderLimitWrapper",
"DataloaderSkipNoneWrapper",
"DataloaderWrapper",
"DataloaderAsyncGPUWrapper",
"HMDB51Dataset",
"ImagePathDataset",
"Kinetics400Dataset",
"SyntheticImageDataset",
"SyntheticImageStreamingDataset",
"SyntheticVideoDataset",
"UCF101Dataset",
"build_dataset",
"register_dataset",
]
| true
| true
|
1c42c1d0a350755214fa2e9158a141b2ae61aa55
| 16,452
|
py
|
Python
|
django/contrib/messages/tests/base.py
|
laurilarjo/halvinbensa-appengine
|
82602835914e70b6c71993d4b570e1df32a0e71e
|
[
"BSD-3-Clause"
] | 2
|
2015-11-05T06:07:13.000Z
|
2019-01-04T07:35:59.000Z
|
django/contrib/messages/tests/base.py
|
alex/django-old
|
6f964c8f03e5d25c9e36898a001c8463f82fbb81
|
[
"BSD-3-Clause"
] | null | null | null |
django/contrib/messages/tests/base.py
|
alex/django-old
|
6f964c8f03e5d25c9e36898a001c8463f82fbb81
|
[
"BSD-3-Clause"
] | null | null | null |
import warnings
from django import http
from django.test import TestCase
from django.conf import settings
from django.utils.translation import ugettext_lazy
from django.contrib.messages import constants, utils, get_level, set_level
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.storage import default_storage, base
from django.contrib.messages.storage.base import Message
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
def add_level_messages(storage):
"""
Adds 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class BaseTest(TestCase):
storage_class = default_storage
restore_settings = ['MESSAGE_LEVEL', 'MESSAGE_TAGS']
urls = 'django.contrib.messages.tests.urls'
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self._remembered_settings = {}
for setting in self.restore_settings:
if hasattr(settings, setting):
self._remembered_settings[setting] = getattr(settings, setting)
delattr(settings._wrapped, setting)
# Backup these manually because we do not want them deleted.
self._middleware_classes = settings.MIDDLEWARE_CLASSES
self._template_context_processors = \
settings.TEMPLATE_CONTEXT_PROCESSORS
self._installed_apps = settings.INSTALLED_APPS
self._message_storage = settings.MESSAGE_STORAGE
settings.MESSAGE_STORAGE = '%s.%s' % (self.storage_class.__module__,
self.storage_class.__name__)
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.contrib.auth.models')
def tearDown(self):
for setting in self.restore_settings:
self.restore_setting(setting)
# Restore these manually (see above).
settings.MIDDLEWARE_CLASSES = self._middleware_classes
settings.TEMPLATE_CONTEXT_PROCESSORS = \
self._template_context_processors
settings.INSTALLED_APPS = self._installed_apps
settings.MESSAGE_STORAGE = self._message_storage
warnings.resetwarnings()
warnings.simplefilter('ignore', PendingDeprecationWarning)
def restore_setting(self, setting):
if setting in self._remembered_settings:
value = self._remembered_settings.pop(setting)
setattr(settings, setting, value)
elif hasattr(settings, setting):
delattr(settings._wrapped, setting)
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Returns the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assert_(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, ugettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, tests that messages are properly
stored and then retrieved across the full request/redirect/response
cycle.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
messages = [Message(self.levels[level], msg) for msg in
data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
def test_multiple_posts(self):
"""
Tests that messages persist properly when multiple POSTs are made
before a GET.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend([Message(self.levels[level], msg) for msg in
data['messages']])
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertTrue('messages' in response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
def test_middleware_disabled_auth_user(self):
"""
Tests that the messages API successfully falls back to using
user.message_set to store messages directly when the middleware is
disabled.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
user = User.objects.create_user('test', 'test@example.com', 'test')
self.client.login(username='test', password='test')
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.remove(
'django.contrib.messages',
)
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.remove(
'django.contrib.messages.middleware.MessageMiddleware',
)
settings.TEMPLATE_CONTEXT_PROCESSORS = \
list(settings.TEMPLATE_CONTEXT_PROCESSORS)
settings.TEMPLATE_CONTEXT_PROCESSORS.remove(
'django.contrib.messages.context_processors.messages',
)
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
context_messages = list(response.context['messages'])
for msg in data['messages']:
self.assertTrue(msg in context_messages)
self.assertContains(response, msg)
def test_middleware_disabled_anon_user(self):
"""
Tests that, when the middleware is disabled and a user is not logged
in, an exception is raised when one attempts to store a message.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.remove(
'django.contrib.messages',
)
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.remove(
'django.contrib.messages.middleware.MessageMiddleware',
)
settings.TEMPLATE_CONTEXT_PROCESSORS = \
list(settings.TEMPLATE_CONTEXT_PROCESSORS)
settings.TEMPLATE_CONTEXT_PROCESSORS.remove(
'django.contrib.messages.context_processors.messages',
)
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.assertRaises(MessageFailure, self.client.post, add_url,
data, follow=True)
def test_middleware_disabled_anon_user_fail_silently(self):
"""
Tests that, when the middleware is disabled and a user is not logged
in, an exception is not raised if 'fail_silently' = True
"""
settings.MESSAGE_LEVEL = constants.DEBUG
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.remove(
'django.contrib.messages',
)
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.remove(
'django.contrib.messages.middleware.MessageMiddleware',
)
settings.TEMPLATE_CONTEXT_PROCESSORS = \
list(settings.TEMPLATE_CONTEXT_PROCESSORS)
settings.TEMPLATE_CONTEXT_PROCESSORS.remove(
'django.contrib.messages.context_processors.messages',
)
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
'fail_silently': True,
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
self.assertEqual(list(response.context['messages']), [])
def stored_messages_count(self, storage, response):
"""
Returns the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2',
extra_tags='tag')])
def test_existing_read(self):
"""
Tests that reading the existing storage doesn't cause the data to be
lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assert_(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assert_(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assert_(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assert_(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
settings.MESSAGE_LEVEL = 29
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', '', 'extra-tag debug', 'warning', 'error',
'success'])
def test_custom_tags(self):
settings.MESSAGE_TAGS = {
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
}
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, we need to update that constant too.
base.LEVEL_TAGS = utils.get_level_tags()
try:
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', 'custom', 'extra-tag', '', 'bad', 'success'])
finally:
# Ensure the level tags constant is put back like we found it.
self.restore_setting('MESSAGE_TAGS')
base.LEVEL_TAGS = utils.get_level_tags()
| 40.224939
| 79
| 0.630075
|
import warnings
from django import http
from django.test import TestCase
from django.conf import settings
from django.utils.translation import ugettext_lazy
from django.contrib.messages import constants, utils, get_level, set_level
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.storage import default_storage, base
from django.contrib.messages.storage.base import Message
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
def add_level_messages(storage):
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class BaseTest(TestCase):
storage_class = default_storage
restore_settings = ['MESSAGE_LEVEL', 'MESSAGE_TAGS']
urls = 'django.contrib.messages.tests.urls'
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self._remembered_settings = {}
for setting in self.restore_settings:
if hasattr(settings, setting):
self._remembered_settings[setting] = getattr(settings, setting)
delattr(settings._wrapped, setting)
self._middleware_classes = settings.MIDDLEWARE_CLASSES
self._template_context_processors = \
settings.TEMPLATE_CONTEXT_PROCESSORS
self._installed_apps = settings.INSTALLED_APPS
self._message_storage = settings.MESSAGE_STORAGE
settings.MESSAGE_STORAGE = '%s.%s' % (self.storage_class.__module__,
self.storage_class.__name__)
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.contrib.auth.models')
def tearDown(self):
for setting in self.restore_settings:
self.restore_setting(setting)
settings.MIDDLEWARE_CLASSES = self._middleware_classes
settings.TEMPLATE_CONTEXT_PROCESSORS = \
self._template_context_processors
settings.INSTALLED_APPS = self._installed_apps
settings.MESSAGE_STORAGE = self._message_storage
warnings.resetwarnings()
warnings.simplefilter('ignore', PendingDeprecationWarning)
def restore_setting(self, setting):
if setting in self._remembered_settings:
value = self._remembered_settings.pop(setting)
setattr(settings, setting, value)
elif hasattr(settings, setting):
delattr(settings._wrapped, setting)
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assert_(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, ugettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage)
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage)
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_full_request_response_cycle(self):
settings.MESSAGE_LEVEL = constants.DEBUG
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
messages = [Message(self.levels[level], msg) for msg in
data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
def test_multiple_posts(self):
settings.MESSAGE_LEVEL = constants.DEBUG
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend([Message(self.levels[level], msg) for msg in
data['messages']])
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertTrue('messages' in response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
def test_middleware_disabled_auth_user(self):
settings.MESSAGE_LEVEL = constants.DEBUG
user = User.objects.create_user('test', 'test@example.com', 'test')
self.client.login(username='test', password='test')
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.remove(
'django.contrib.messages',
)
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.remove(
'django.contrib.messages.middleware.MessageMiddleware',
)
settings.TEMPLATE_CONTEXT_PROCESSORS = \
list(settings.TEMPLATE_CONTEXT_PROCESSORS)
settings.TEMPLATE_CONTEXT_PROCESSORS.remove(
'django.contrib.messages.context_processors.messages',
)
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
context_messages = list(response.context['messages'])
for msg in data['messages']:
self.assertTrue(msg in context_messages)
self.assertContains(response, msg)
def test_middleware_disabled_anon_user(self):
settings.MESSAGE_LEVEL = constants.DEBUG
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.remove(
'django.contrib.messages',
)
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.remove(
'django.contrib.messages.middleware.MessageMiddleware',
)
settings.TEMPLATE_CONTEXT_PROCESSORS = \
list(settings.TEMPLATE_CONTEXT_PROCESSORS)
settings.TEMPLATE_CONTEXT_PROCESSORS.remove(
'django.contrib.messages.context_processors.messages',
)
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.assertRaises(MessageFailure, self.client.post, add_url,
data, follow=True)
def test_middleware_disabled_anon_user_fail_silently(self):
settings.MESSAGE_LEVEL = constants.DEBUG
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.remove(
'django.contrib.messages',
)
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.remove(
'django.contrib.messages.middleware.MessageMiddleware',
)
settings.TEMPLATE_CONTEXT_PROCESSORS = \
list(settings.TEMPLATE_CONTEXT_PROCESSORS)
settings.TEMPLATE_CONTEXT_PROCESSORS.remove(
'django.contrib.messages.context_processors.messages',
)
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
'fail_silently': True,
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
self.assertEqual(list(response.context['messages']), [])
def stored_messages_count(self, storage, response):
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2',
extra_tags='tag')])
def test_existing_read(self):
storage = self.get_existing_storage()
self.assertFalse(storage.used)
data = list(storage)
self.assert_(storage.used)
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assert_(storage.added_new)
def test_default_level(self):
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assert_(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assert_(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
settings.MESSAGE_LEVEL = 29
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', '', 'extra-tag debug', 'warning', 'error',
'success'])
def test_custom_tags(self):
settings.MESSAGE_TAGS = {
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
}
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, we need to update that constant too.
base.LEVEL_TAGS = utils.get_level_tags()
try:
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', 'custom', 'extra-tag', '', 'bad', 'success'])
finally:
# Ensure the level tags constant is put back like we found it.
self.restore_setting('MESSAGE_TAGS')
base.LEVEL_TAGS = utils.get_level_tags()
| true
| true
|
1c42c274cc43a339dc57293f185ce46a81f01cbc
| 745
|
py
|
Python
|
parquet/parquet/generate_make.py
|
ZornitsaD/IMCtermite
|
c9f8097c9b40e3fca58e89ecbf62579cd2904d6c
|
[
"MIT"
] | 1
|
2021-08-06T12:09:07.000Z
|
2021-08-06T12:09:07.000Z
|
parquet/parquet/generate_make.py
|
ZornitsaD/IMCtermite
|
c9f8097c9b40e3fca58e89ecbf62579cd2904d6c
|
[
"MIT"
] | null | null | null |
parquet/parquet/generate_make.py
|
ZornitsaD/IMCtermite
|
c9f8097c9b40e3fca58e89ecbf62579cd2904d6c
|
[
"MIT"
] | null | null | null |
#-----------------------------------------------------------------------------#
import glob
from pathlib import Path
# find source files
srcpaths = Path("src/").rglob('*.cc')
deps =[ str(path) for path in srcpaths ]
print(deps)
with open('makefileobj','w') as fout :
for el in deps :
basnam = el.split('/')[-1]
print(str(el) + " : " + str(basnam) + " : " + str(basnam.split('.')[1]))
if basnam.split('.')[1] == 'cc' :
objfile = 'bin/' + basnam.replace('.cc','.o')
fout.write(objfile + " : " + el + "\n")
fout.write("\t" + "$(CPP) $(CPPFLAGS) -c $< $(LIBS) -o $@\n")
fout.write("\n")
#-----------------------------------------------------------------------------#
| 31.041667
| 80
| 0.387919
|
import glob
from pathlib import Path
srcpaths = Path("src/").rglob('*.cc')
deps =[ str(path) for path in srcpaths ]
print(deps)
with open('makefileobj','w') as fout :
for el in deps :
basnam = el.split('/')[-1]
print(str(el) + " : " + str(basnam) + " : " + str(basnam.split('.')[1]))
if basnam.split('.')[1] == 'cc' :
objfile = 'bin/' + basnam.replace('.cc','.o')
fout.write(objfile + " : " + el + "\n")
fout.write("\t" + "$(CPP) $(CPPFLAGS) -c $< $(LIBS) -o $@\n")
fout.write("\n")
| true
| true
|
1c42c337adbeca6fac36132c61930e57b93567cb
| 3,675
|
py
|
Python
|
base.py
|
HongtaoYang/mean-average-precision
|
84a4b72f07e9143948319b75a2b50f1d371a0b11
|
[
"MIT"
] | null | null | null |
base.py
|
HongtaoYang/mean-average-precision
|
84a4b72f07e9143948319b75a2b50f1d371a0b11
|
[
"MIT"
] | null | null | null |
base.py
|
HongtaoYang/mean-average-precision
|
84a4b72f07e9143948319b75a2b50f1d371a0b11
|
[
"MIT"
] | null | null | null |
from abc import abstractmethod
from typing import List, Any, Set
import numpy as np
class GroundTruthItem:
def __init__(self, *, clazz: str, location: Any = None) -> None:
"""
Args:
clazz: the class of the item.
location: the location of the item.
In the case of detection, this is the image id where the box come from.
"""
self.clazz = clazz
self.location = location
class PredictedItem:
def __init__(self, *, clazz: str, score: float, location: Any = None) -> None:
"""
Args:
clazz: the class of the item.
score: the score of the item for the class.
location: the location of the item.
In the case of detection, this is the image id where the box come from.
"""
self.clazz = clazz
self.score = score
self.location = location
class MeanAveragePrecision:
def __init__(self, gt_items: Set[GroundTruthItem], predicted_items: Set[PredictedItem]):
self.gt_items = gt_items
self.predicted_items = predicted_items
def mAP(self, **kwargs) -> float:
"""
Code modified from https://github.com/rafaelpadilla/Object-Detection-Metrics/blob/master/lib/Evaluator.py
"""
all_average_precisions = []
all_classes = {b.clazz for b in self.gt_items.union(self.predicted_items)}
for c in all_classes:
pred_items_single_class = [d for d in self.predicted_items if d.clazz == c]
ground_truths_single_class = [g for g in self.gt_items if g.clazz == c]
if not ground_truths_single_class and pred_items_single_class:
average_precision = 0.0
elif ground_truths_single_class and not pred_items_single_class:
average_precision = 0.0
elif not ground_truths_single_class and not pred_items_single_class:
average_precision = 1.0
else:
pred_items_single_class = sorted(pred_items_single_class, key=lambda d: d.score, reverse=True)
prediction_is_correct = self.assign(pred_items_single_class, ground_truths_single_class, **kwargs)
acc_TP = np.cumsum(prediction_is_correct)
acc_FP = np.cumsum(1 - prediction_is_correct)
rec = list(acc_TP / len(ground_truths_single_class))
prec = list(acc_TP / (acc_FP + acc_TP))
average_precision = self._average_precision(rec, prec)
all_average_precisions.append(average_precision)
return np.mean(all_average_precisions)
@abstractmethod
def assign(self, predicted_items_single_class, gt_items_single_class, **kwargs) -> np.ndarray:
"""
Args:
predicted_items_single_class: sorted list of PredictedItem of a single class.
gt_items_single_class: sorted list of GroundTruthItem of a single class.
Return:
A 1-d np.ndarray of the same length as predicted_items_single_class, with each value
being either 0 or 1, indicating whether the corresponding predicted item is correct or not.
"""
pass
@staticmethod
def _average_precision(rec: List[float], prec: List[float]) -> float:
recall_intervals = [r for r in [0] + rec]
precision_intervals = [p for p in [0] + prec]
average_precision = 0
for i in range(len(recall_intervals) - 1):
average_precision += (recall_intervals[i + 1] - recall_intervals[i]) * max(
precision_intervals[i + 1 :]
)
return average_precision
| 39.516129
| 114
| 0.631293
|
from abc import abstractmethod
from typing import List, Any, Set
import numpy as np
class GroundTruthItem:
def __init__(self, *, clazz: str, location: Any = None) -> None:
self.clazz = clazz
self.location = location
class PredictedItem:
def __init__(self, *, clazz: str, score: float, location: Any = None) -> None:
self.clazz = clazz
self.score = score
self.location = location
class MeanAveragePrecision:
def __init__(self, gt_items: Set[GroundTruthItem], predicted_items: Set[PredictedItem]):
self.gt_items = gt_items
self.predicted_items = predicted_items
def mAP(self, **kwargs) -> float:
all_average_precisions = []
all_classes = {b.clazz for b in self.gt_items.union(self.predicted_items)}
for c in all_classes:
pred_items_single_class = [d for d in self.predicted_items if d.clazz == c]
ground_truths_single_class = [g for g in self.gt_items if g.clazz == c]
if not ground_truths_single_class and pred_items_single_class:
average_precision = 0.0
elif ground_truths_single_class and not pred_items_single_class:
average_precision = 0.0
elif not ground_truths_single_class and not pred_items_single_class:
average_precision = 1.0
else:
pred_items_single_class = sorted(pred_items_single_class, key=lambda d: d.score, reverse=True)
prediction_is_correct = self.assign(pred_items_single_class, ground_truths_single_class, **kwargs)
acc_TP = np.cumsum(prediction_is_correct)
acc_FP = np.cumsum(1 - prediction_is_correct)
rec = list(acc_TP / len(ground_truths_single_class))
prec = list(acc_TP / (acc_FP + acc_TP))
average_precision = self._average_precision(rec, prec)
all_average_precisions.append(average_precision)
return np.mean(all_average_precisions)
@abstractmethod
def assign(self, predicted_items_single_class, gt_items_single_class, **kwargs) -> np.ndarray:
pass
@staticmethod
def _average_precision(rec: List[float], prec: List[float]) -> float:
recall_intervals = [r for r in [0] + rec]
precision_intervals = [p for p in [0] + prec]
average_precision = 0
for i in range(len(recall_intervals) - 1):
average_precision += (recall_intervals[i + 1] - recall_intervals[i]) * max(
precision_intervals[i + 1 :]
)
return average_precision
| true
| true
|
1c42c3380e433597471687a6ff54a59d9b36fc13
| 9,559
|
py
|
Python
|
sdk/python/pulumi_azure_native/apimanagement/v20170301/policy.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/apimanagement/v20170301/policy.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/apimanagement/v20170301/policy.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['PolicyArgs', 'Policy']
@pulumi.input_type
class PolicyArgs:
def __init__(__self__, *,
policy_content: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
service_name: pulumi.Input[str],
policy_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Policy resource.
:param pulumi.Input[str] policy_content: Json escaped Xml Encoded contents of the Policy.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[str] policy_id: The identifier of the Policy.
"""
pulumi.set(__self__, "policy_content", policy_content)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "service_name", service_name)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
@property
@pulumi.getter(name="policyContent")
def policy_content(self) -> pulumi.Input[str]:
"""
Json escaped Xml Encoded contents of the Policy.
"""
return pulumi.get(self, "policy_content")
@policy_content.setter
def policy_content(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_content", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> pulumi.Input[str]:
"""
The name of the API Management service.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: pulumi.Input[str]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
The identifier of the Policy.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
class Policy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
policy_content: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Policy Contract details.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] policy_content: Json escaped Xml Encoded contents of the Policy.
:param pulumi.Input[str] policy_id: The identifier of the Policy.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Policy Contract details.
:param str resource_name: The name of the resource.
:param PolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
policy_content: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PolicyArgs.__new__(PolicyArgs)
if policy_content is None and not opts.urn:
raise TypeError("Missing required property 'policy_content'")
__props__.__dict__["policy_content"] = policy_content
__props__.__dict__["policy_id"] = policy_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__.__dict__["service_name"] = service_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:Policy"), pulumi.Alias(type_="azure-native:apimanagement:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement:Policy"), pulumi.Alias(type_="azure-native:apimanagement/v20180101:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:Policy"), pulumi.Alias(type_="azure-native:apimanagement/v20180601preview:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:Policy"), pulumi.Alias(type_="azure-native:apimanagement/v20190101:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:Policy"), pulumi.Alias(type_="azure-native:apimanagement/v20191201:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201:Policy"), pulumi.Alias(type_="azure-native:apimanagement/v20191201preview:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:Policy"), pulumi.Alias(type_="azure-native:apimanagement/v20200601preview:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20200601preview:Policy"), pulumi.Alias(type_="azure-native:apimanagement/v20201201:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20201201:Policy"), pulumi.Alias(type_="azure-native:apimanagement/v20210101preview:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20210101preview:Policy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Policy, __self__).__init__(
'azure-native:apimanagement/v20170301:Policy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Policy':
"""
Get an existing Policy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PolicyArgs.__new__(PolicyArgs)
__props__.__dict__["name"] = None
__props__.__dict__["policy_content"] = None
__props__.__dict__["type"] = None
return Policy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="policyContent")
def policy_content(self) -> pulumi.Output[str]:
"""
Json escaped Xml Encoded contents of the Policy.
"""
return pulumi.get(self, "policy_content")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
| 46.178744
| 1,372
| 0.660529
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['PolicyArgs', 'Policy']
@pulumi.input_type
class PolicyArgs:
def __init__(__self__, *,
policy_content: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
service_name: pulumi.Input[str],
policy_id: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "policy_content", policy_content)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "service_name", service_name)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
@property
@pulumi.getter(name="policyContent")
def policy_content(self) -> pulumi.Input[str]:
return pulumi.get(self, "policy_content")
@policy_content.setter
def policy_content(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_content", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: pulumi.Input[str]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
class Policy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
policy_content: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: PolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
policy_content: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PolicyArgs.__new__(PolicyArgs)
if policy_content is None and not opts.urn:
raise TypeError("Missing required property 'policy_content'")
__props__.__dict__["policy_content"] = policy_content
__props__.__dict__["policy_id"] = policy_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__.__dict__["service_name"] = service_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:Policy"), pulumi.Alias(type_="azure-native:apimanagement:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement:Policy"), pulumi.Alias(type_="azure-native:apimanagement/v20180101:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:Policy"), pulumi.Alias(type_="azure-native:apimanagement/v20180601preview:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:Policy"), pulumi.Alias(type_="azure-native:apimanagement/v20190101:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:Policy"), pulumi.Alias(type_="azure-native:apimanagement/v20191201:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201:Policy"), pulumi.Alias(type_="azure-native:apimanagement/v20191201preview:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:Policy"), pulumi.Alias(type_="azure-native:apimanagement/v20200601preview:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20200601preview:Policy"), pulumi.Alias(type_="azure-native:apimanagement/v20201201:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20201201:Policy"), pulumi.Alias(type_="azure-native:apimanagement/v20210101preview:Policy"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20210101preview:Policy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Policy, __self__).__init__(
'azure-native:apimanagement/v20170301:Policy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Policy':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PolicyArgs.__new__(PolicyArgs)
__props__.__dict__["name"] = None
__props__.__dict__["policy_content"] = None
__props__.__dict__["type"] = None
return Policy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="policyContent")
def policy_content(self) -> pulumi.Output[str]:
return pulumi.get(self, "policy_content")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
| true
| true
|
1c42c346b7d8ebd384f360f78261259aca83cf1f
| 4,471
|
py
|
Python
|
splitter/tracking/contour.py
|
splitter-research/splitter
|
94e5e3073b4f383ba50397168ddb8bcd5fc48da4
|
[
"MIT"
] | null | null | null |
splitter/tracking/contour.py
|
splitter-research/splitter
|
94e5e3073b4f383ba50397168ddb8bcd5fc48da4
|
[
"MIT"
] | null | null | null |
splitter/tracking/contour.py
|
splitter-research/splitter
|
94e5e3073b4f383ba50397168ddb8bcd5fc48da4
|
[
"MIT"
] | null | null | null |
"""This file is part of Splitter which is released under MIT License.
contour.py defines geometric vision primitives.
"""
from splitter.dataflow.map import Map
from timeit import default_timer as timer
import cv2
import numpy as np
class KeyPoints(Map):
"""KeyPoints uses a canny edge detector for identifying any object
(but not particular classes). You can tag these detections with a
generic label "unknown" or "object" or whatever you want.
"""
def __init__(self, \
blur=5, \
edge_low=225, \
edge_high=250, \
area_thresh=10,
label="object"):
"""The constructor takes in some parameters for the detector.
"""
self.blur = blur
self.edge_low = edge_low
self.edge_high = edge_high
self.area_thresh = area_thresh
self.label = label
self.scale = 1.0
"""the map function creates bounding boxes of the form x,y,x,y to identify detection points
"""
def map(self, data):
ff = data
#print(ff['data'].shape)
#now = timer()
if len(ff['data'].shape) < 3:
gray = ff['data']
else:
gray = ff['data'][:,:,0]
#print('Copy Elapsed: ', timer() - now)
blurred = cv2.GaussianBlur(gray, (self.blur, self.blur), 0)
tight = cv2.Canny(blurred, self.edge_low, self.edge_high)
contours, _ = cv2.findContours(tight.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
rtn = []
for cnt in contours:
if cv2.contourArea(cnt) > self.area_thresh:
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
rtn.append((self.label,(cx,cy,cx,cy)))
#print(len(rtn))
ff['bounding_boxes'] = rtn
return ff
def _serialize(self):
return {'blur': self.blur,
'edge_low': self.edge_low,
'edge_high': self.edge_high,
'area_thresh': self.area_thresh,
'label': self.label}
class SizeMovementDetector(KeyPoints):
"""Categorizes Moving Objects By Size
"""
def __init__(self, \
blur=5, \
bilat=150,
edge_low=40, \
edge_high=50, \
area_thresh=500): #min size
"""The constructor takes in some parameters for the detector.
"""
self.blur = blur
self.edge_low = edge_low
self.edge_high = edge_high
self.area_thresh = area_thresh
self.bilat = bilat
self.scale = 1.0
def __iter__(self):
self.prev = None
return super().__iter__()
"""the map function creates bounding boxes of the form x,y,x,y to identify detection points
"""
def map(self, data):
ff = data
if len(ff['data'].shape) < 3:
gray = ff['data']
else:
gray = ff['data'][:,:,0]
blurred = cv2.GaussianBlur(gray, (self.blur, self.blur), 0)
tight = cv2.Canny(blurred, self.edge_low, self.edge_high)
if not (self.prev is None):
mask = np.abs((self.prev - tight) > 0).astype(np.uint8)
self.prev = tight
img = tight*mask
tight = cv2.bilateralFilter(img,7,self.bilat,self.bilat)
else:
self.prev = tight
contours, _ = cv2.findContours(tight.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
rtn = []
for cnt in contours:
if cv2.contourArea(cnt) > self.area_thresh:
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
rtn.append(('object',(cx,cy,cx,cy)))
#print(len(rtn))
ff['bounding_boxes'] = rtn
return ff
def _serialize(self):
return {'blur': self.blur,
'edge_low': self.edge_low,
'edge_high': self.edge_high,
'area_thresh': self.area_thresh,
'label': self.label}
class GoodKeyPoints(KeyPoints):
def __init__(self, \
maxCorners = 1500,\
qualityLevel = 0.2,\
minDistance = 25,\
blockSize = 9,\
blur=1):
self.maxCorners = maxCorners
self.qualityLevel = qualityLevel
self.minDistance = minDistance
self.blockSize = blockSize
self.blur = blur
self.area_thresh = 1
def map(self, data):
ff = data
if len(ff['data'].shape) < 3:
gray = ff['data']
else:
gray = ff['data'][:,:,0]
feature_params = dict( maxCorners = self.maxCorners,
qualityLevel = self.qualityLevel,
minDistance = self.minDistance,
blockSize = self.blockSize )
gray = cv2.cvtColor(ff['data'], cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(gray, mask = None, **feature_params)
bounding_boxes = []
if p0 is not None:
for i in p0:
bounding_boxes.append(('object',(i[0,0], i[0,1], i[0,0],i[0,1])))
ff['bounding_boxes'] = bounding_boxes
return ff
| 23.286458
| 92
| 0.634086
|
from splitter.dataflow.map import Map
from timeit import default_timer as timer
import cv2
import numpy as np
class KeyPoints(Map):
def __init__(self, \
blur=5, \
edge_low=225, \
edge_high=250, \
area_thresh=10,
label="object"):
self.blur = blur
self.edge_low = edge_low
self.edge_high = edge_high
self.area_thresh = area_thresh
self.label = label
self.scale = 1.0
def map(self, data):
ff = data
if len(ff['data'].shape) < 3:
gray = ff['data']
else:
gray = ff['data'][:,:,0]
blurred = cv2.GaussianBlur(gray, (self.blur, self.blur), 0)
tight = cv2.Canny(blurred, self.edge_low, self.edge_high)
contours, _ = cv2.findContours(tight.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
rtn = []
for cnt in contours:
if cv2.contourArea(cnt) > self.area_thresh:
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
rtn.append((self.label,(cx,cy,cx,cy)))
ff['bounding_boxes'] = rtn
return ff
def _serialize(self):
return {'blur': self.blur,
'edge_low': self.edge_low,
'edge_high': self.edge_high,
'area_thresh': self.area_thresh,
'label': self.label}
class SizeMovementDetector(KeyPoints):
def __init__(self, \
blur=5, \
bilat=150,
edge_low=40, \
edge_high=50, \
area_thresh=500):
self.blur = blur
self.edge_low = edge_low
self.edge_high = edge_high
self.area_thresh = area_thresh
self.bilat = bilat
self.scale = 1.0
def __iter__(self):
self.prev = None
return super().__iter__()
def map(self, data):
ff = data
if len(ff['data'].shape) < 3:
gray = ff['data']
else:
gray = ff['data'][:,:,0]
blurred = cv2.GaussianBlur(gray, (self.blur, self.blur), 0)
tight = cv2.Canny(blurred, self.edge_low, self.edge_high)
if not (self.prev is None):
mask = np.abs((self.prev - tight) > 0).astype(np.uint8)
self.prev = tight
img = tight*mask
tight = cv2.bilateralFilter(img,7,self.bilat,self.bilat)
else:
self.prev = tight
contours, _ = cv2.findContours(tight.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
rtn = []
for cnt in contours:
if cv2.contourArea(cnt) > self.area_thresh:
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
rtn.append(('object',(cx,cy,cx,cy)))
ff['bounding_boxes'] = rtn
return ff
def _serialize(self):
return {'blur': self.blur,
'edge_low': self.edge_low,
'edge_high': self.edge_high,
'area_thresh': self.area_thresh,
'label': self.label}
class GoodKeyPoints(KeyPoints):
def __init__(self, \
maxCorners = 1500,\
qualityLevel = 0.2,\
minDistance = 25,\
blockSize = 9,\
blur=1):
self.maxCorners = maxCorners
self.qualityLevel = qualityLevel
self.minDistance = minDistance
self.blockSize = blockSize
self.blur = blur
self.area_thresh = 1
def map(self, data):
ff = data
if len(ff['data'].shape) < 3:
gray = ff['data']
else:
gray = ff['data'][:,:,0]
feature_params = dict( maxCorners = self.maxCorners,
qualityLevel = self.qualityLevel,
minDistance = self.minDistance,
blockSize = self.blockSize )
gray = cv2.cvtColor(ff['data'], cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(gray, mask = None, **feature_params)
bounding_boxes = []
if p0 is not None:
for i in p0:
bounding_boxes.append(('object',(i[0,0], i[0,1], i[0,0],i[0,1])))
ff['bounding_boxes'] = bounding_boxes
return ff
| true
| true
|
1c42c40cb76b0a6c0c2b120e061b0d8dbe4119a3
| 400
|
py
|
Python
|
sentiment/sentiment/urls.py
|
Ernesttt/sentiment-analysis
|
60b8924457d35228d5a752d99dd6e786fb49ce55
|
[
"BSD-3-Clause"
] | 1
|
2017-07-19T09:19:56.000Z
|
2017-07-19T09:19:56.000Z
|
sentiment/sentiment/urls.py
|
Ernesttt/sentiment-analysis
|
60b8924457d35228d5a752d99dd6e786fb49ce55
|
[
"BSD-3-Clause"
] | null | null | null |
sentiment/sentiment/urls.py
|
Ernesttt/sentiment-analysis
|
60b8924457d35228d5a752d99dd6e786fb49ce55
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'tutorial.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('comments.urls')),
url(r'^docs/', include('rest_framework_swagger.urls')),
)
| 26.666667
| 59
| 0.6475
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('comments.urls')),
url(r'^docs/', include('rest_framework_swagger.urls')),
)
| true
| true
|
1c42c45221e6945826ce4c91f329d98e3103d776
| 2,061
|
py
|
Python
|
integration/experiment/energy_efficiency/run_barrier_frequency_sweep_nekbone.py
|
scoumeri/geopm
|
2406b8cca92d8eb32d4dc26d24bb2273164f186c
|
[
"BSD-3-Clause"
] | null | null | null |
integration/experiment/energy_efficiency/run_barrier_frequency_sweep_nekbone.py
|
scoumeri/geopm
|
2406b8cca92d8eb32d4dc26d24bb2273164f186c
|
[
"BSD-3-Clause"
] | null | null | null |
integration/experiment/energy_efficiency/run_barrier_frequency_sweep_nekbone.py
|
scoumeri/geopm
|
2406b8cca92d8eb32d4dc26d24bb2273164f186c
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) 2015 - 2022, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
'''
Frequency map experiment comparing nekbone with added barriers run
at a lower frequency to the baseline with no added barriers.
'''
from experiment.energy_efficiency import barrier_frequency_sweep
from apps.nekbone import nekbone
if __name__ == '__main__':
app_conf_ref = nekbone.NekboneAppConf(add_barriers=False)
app_conf = nekbone.NekboneAppConf(add_barriers=True)
barrier_frequency_sweep.main(app_conf_ref, app_conf)
| 42.9375
| 74
| 0.761281
|
from experiment.energy_efficiency import barrier_frequency_sweep
from apps.nekbone import nekbone
if __name__ == '__main__':
app_conf_ref = nekbone.NekboneAppConf(add_barriers=False)
app_conf = nekbone.NekboneAppConf(add_barriers=True)
barrier_frequency_sweep.main(app_conf_ref, app_conf)
| true
| true
|
1c42c466300530f7aafff809cb4089893d0cfaa9
| 20,223
|
py
|
Python
|
tests/rest/client/test_third_party_rules.py
|
sowieta/synapse
|
bfd7a9b65c5e092c6a7ccdd46e59a278b1cbbd57
|
[
"Apache-2.0"
] | 1
|
2021-12-30T23:47:29.000Z
|
2021-12-30T23:47:29.000Z
|
tests/rest/client/test_third_party_rules.py
|
sowieta/synapse
|
bfd7a9b65c5e092c6a7ccdd46e59a278b1cbbd57
|
[
"Apache-2.0"
] | null | null | null |
tests/rest/client/test_third_party_rules.py
|
sowieta/synapse
|
bfd7a9b65c5e092c6a7ccdd46e59a278b1cbbd57
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from typing import TYPE_CHECKING, Dict, Optional, Tuple
from unittest.mock import Mock
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import SynapseError
from synapse.events import EventBase
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
from synapse.rest import admin
from synapse.rest.client import login, room
from synapse.types import JsonDict, Requester, StateMap
from synapse.util.frozenutils import unfreeze
from tests import unittest
from tests.test_utils import make_awaitable
if TYPE_CHECKING:
from synapse.module_api import ModuleApi
thread_local = threading.local()
class LegacyThirdPartyRulesTestModule:
def __init__(self, config: Dict, module_api: "ModuleApi"):
# keep a record of the "current" rules module, so that the test can patch
# it if desired.
thread_local.rules_module = self
self.module_api = module_api
async def on_create_room(
self, requester: Requester, config: dict, is_requester_admin: bool
):
return True
async def check_event_allowed(self, event: EventBase, state: StateMap[EventBase]):
return True
@staticmethod
def parse_config(config):
return config
class LegacyDenyNewRooms(LegacyThirdPartyRulesTestModule):
def __init__(self, config: Dict, module_api: "ModuleApi"):
super().__init__(config, module_api)
def on_create_room(
self, requester: Requester, config: dict, is_requester_admin: bool
):
return False
class LegacyChangeEvents(LegacyThirdPartyRulesTestModule):
def __init__(self, config: Dict, module_api: "ModuleApi"):
super().__init__(config, module_api)
async def check_event_allowed(self, event: EventBase, state: StateMap[EventBase]):
d = event.get_dict()
content = unfreeze(event.content)
content["foo"] = "bar"
d["content"] = content
return d
class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
servlets = [
admin.register_servlets,
login.register_servlets,
room.register_servlets,
]
def make_homeserver(self, reactor, clock):
hs = self.setup_test_homeserver()
load_legacy_third_party_event_rules(hs)
# We're not going to be properly signing events as our remote homeserver is fake,
# therefore disable event signature checks.
# Note that these checks are not relevant to this test case.
# Have this homeserver auto-approve all event signature checking.
async def approve_all_signature_checking(_, pdu):
return pdu
hs.get_federation_server()._check_sigs_and_hash = approve_all_signature_checking
# Have this homeserver skip event auth checks. This is necessary due to
# event auth checks ensuring that events were signed by the sender's homeserver.
async def _check_event_auth(origin, event, context, *args, **kwargs):
return context
hs.get_federation_event_handler()._check_event_auth = _check_event_auth
return hs
def prepare(self, reactor, clock, homeserver):
# Create some users and a room to play with during the tests
self.user_id = self.register_user("kermit", "monkey")
self.invitee = self.register_user("invitee", "hackme")
self.tok = self.login("kermit", "monkey")
# Some tests might prevent room creation on purpose.
try:
self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok)
except Exception:
pass
def test_third_party_rules(self):
"""Tests that a forbidden event is forbidden from being sent, but an allowed one
can be sent.
"""
# patch the rules module with a Mock which will return False for some event
# types
async def check(ev, state):
return ev.type != "foo.bar.forbidden", None
callback = Mock(spec=[], side_effect=check)
self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [
callback
]
channel = self.make_request(
"PUT",
"/_matrix/client/r0/rooms/%s/send/foo.bar.allowed/1" % self.room_id,
{},
access_token=self.tok,
)
self.assertEquals(channel.result["code"], b"200", channel.result)
callback.assert_called_once()
# there should be various state events in the state arg: do some basic checks
state_arg = callback.call_args[0][1]
for k in (("m.room.create", ""), ("m.room.member", self.user_id)):
self.assertIn(k, state_arg)
ev = state_arg[k]
self.assertEqual(ev.type, k[0])
self.assertEqual(ev.state_key, k[1])
channel = self.make_request(
"PUT",
"/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/2" % self.room_id,
{},
access_token=self.tok,
)
self.assertEquals(channel.result["code"], b"403", channel.result)
def test_third_party_rules_workaround_synapse_errors_pass_through(self):
"""
Tests that the workaround introduced by https://github.com/matrix-org/synapse/pull/11042
is functional: that SynapseErrors are passed through from check_event_allowed
and bubble up to the web resource.
NEW MODULES SHOULD NOT MAKE USE OF THIS WORKAROUND!
This is a temporary workaround!
"""
class NastyHackException(SynapseError):
def error_dict(self):
"""
This overrides SynapseError's `error_dict` to nastily inject
JSON into the error response.
"""
result = super().error_dict()
result["nasty"] = "very"
return result
# add a callback that will raise our hacky exception
async def check(ev, state) -> Tuple[bool, Optional[JsonDict]]:
raise NastyHackException(429, "message")
self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check]
# Make a request
channel = self.make_request(
"PUT",
"/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/2" % self.room_id,
{},
access_token=self.tok,
)
# Check the error code
self.assertEquals(channel.result["code"], b"429", channel.result)
# Check the JSON body has had the `nasty` key injected
self.assertEqual(
channel.json_body,
{"errcode": "M_UNKNOWN", "error": "message", "nasty": "very"},
)
def test_cannot_modify_event(self):
"""cannot accidentally modify an event before it is persisted"""
# first patch the event checker so that it will try to modify the event
async def check(ev: EventBase, state):
ev.content = {"x": "y"}
return True, None
self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check]
# now send the event
channel = self.make_request(
"PUT",
"/_matrix/client/r0/rooms/%s/send/modifyme/1" % self.room_id,
{"x": "x"},
access_token=self.tok,
)
# check_event_allowed has some error handling, so it shouldn't 500 just because a
# module did something bad.
self.assertEqual(channel.code, 200, channel.result)
event_id = channel.json_body["event_id"]
channel = self.make_request(
"GET",
"/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, event_id),
access_token=self.tok,
)
self.assertEqual(channel.code, 200, channel.result)
ev = channel.json_body
self.assertEqual(ev["content"]["x"], "x")
def test_modify_event(self):
"""The module can return a modified version of the event"""
# first patch the event checker so that it will modify the event
async def check(ev: EventBase, state):
d = ev.get_dict()
d["content"] = {"x": "y"}
return True, d
self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check]
# now send the event
channel = self.make_request(
"PUT",
"/_matrix/client/r0/rooms/%s/send/modifyme/1" % self.room_id,
{"x": "x"},
access_token=self.tok,
)
self.assertEqual(channel.result["code"], b"200", channel.result)
event_id = channel.json_body["event_id"]
# ... and check that it got modified
channel = self.make_request(
"GET",
"/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, event_id),
access_token=self.tok,
)
self.assertEqual(channel.result["code"], b"200", channel.result)
ev = channel.json_body
self.assertEqual(ev["content"]["x"], "y")
def test_message_edit(self):
"""Ensure that the module doesn't cause issues with edited messages."""
# first patch the event checker so that it will modify the event
async def check(ev: EventBase, state):
d = ev.get_dict()
d["content"] = {
"msgtype": "m.text",
"body": d["content"]["body"].upper(),
}
return True, d
self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check]
# Send an event, then edit it.
channel = self.make_request(
"PUT",
"/_matrix/client/r0/rooms/%s/send/modifyme/1" % self.room_id,
{
"msgtype": "m.text",
"body": "Original body",
},
access_token=self.tok,
)
self.assertEqual(channel.result["code"], b"200", channel.result)
orig_event_id = channel.json_body["event_id"]
channel = self.make_request(
"PUT",
"/_matrix/client/r0/rooms/%s/send/m.room.message/2" % self.room_id,
{
"m.new_content": {"msgtype": "m.text", "body": "Edited body"},
"m.relates_to": {
"rel_type": "m.replace",
"event_id": orig_event_id,
},
"msgtype": "m.text",
"body": "Edited body",
},
access_token=self.tok,
)
self.assertEqual(channel.result["code"], b"200", channel.result)
edited_event_id = channel.json_body["event_id"]
# ... and check that they both got modified
channel = self.make_request(
"GET",
"/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, orig_event_id),
access_token=self.tok,
)
self.assertEqual(channel.result["code"], b"200", channel.result)
ev = channel.json_body
self.assertEqual(ev["content"]["body"], "ORIGINAL BODY")
channel = self.make_request(
"GET",
"/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, edited_event_id),
access_token=self.tok,
)
self.assertEqual(channel.result["code"], b"200", channel.result)
ev = channel.json_body
self.assertEqual(ev["content"]["body"], "EDITED BODY")
def test_send_event(self):
"""Tests that a module can send an event into a room via the module api"""
content = {
"msgtype": "m.text",
"body": "Hello!",
}
event_dict = {
"room_id": self.room_id,
"type": "m.room.message",
"content": content,
"sender": self.user_id,
}
event: EventBase = self.get_success(
self.hs.get_module_api().create_and_send_event_into_room(event_dict)
)
self.assertEquals(event.sender, self.user_id)
self.assertEquals(event.room_id, self.room_id)
self.assertEquals(event.type, "m.room.message")
self.assertEquals(event.content, content)
@unittest.override_config(
{
"third_party_event_rules": {
"module": __name__ + ".LegacyChangeEvents",
"config": {},
}
}
)
def test_legacy_check_event_allowed(self):
"""Tests that the wrapper for legacy check_event_allowed callbacks works
correctly.
"""
channel = self.make_request(
"PUT",
"/_matrix/client/r0/rooms/%s/send/m.room.message/1" % self.room_id,
{
"msgtype": "m.text",
"body": "Original body",
},
access_token=self.tok,
)
self.assertEqual(channel.result["code"], b"200", channel.result)
event_id = channel.json_body["event_id"]
channel = self.make_request(
"GET",
"/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, event_id),
access_token=self.tok,
)
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertIn("foo", channel.json_body["content"].keys())
self.assertEqual(channel.json_body["content"]["foo"], "bar")
@unittest.override_config(
{
"third_party_event_rules": {
"module": __name__ + ".LegacyDenyNewRooms",
"config": {},
}
}
)
def test_legacy_on_create_room(self):
"""Tests that the wrapper for legacy on_create_room callbacks works
correctly.
"""
self.helper.create_room_as(self.user_id, tok=self.tok, expect_code=403)
def test_sent_event_end_up_in_room_state(self):
"""Tests that a state event sent by a module while processing another state event
doesn't get dropped from the state of the room. This is to guard against a bug
where Synapse has been observed doing so, see https://github.com/matrix-org/synapse/issues/10830
"""
event_type = "org.matrix.test_state"
# This content will be updated later on, and since we actually use a reference on
# the dict it does the right thing. It's a bit hacky but a handy way of making
# sure the state actually gets updated.
event_content = {"i": -1}
api = self.hs.get_module_api()
# Define a callback that sends a custom event on power levels update.
async def test_fn(event: EventBase, state_events):
if event.is_state and event.type == EventTypes.PowerLevels:
await api.create_and_send_event_into_room(
{
"room_id": event.room_id,
"sender": event.sender,
"type": event_type,
"content": event_content,
"state_key": "",
}
)
return True, None
self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [test_fn]
# Sometimes the bug might not happen the first time the event type is added
# to the state but might happen when an event updates the state of the room for
# that type, so we test updating the state several times.
for i in range(5):
# Update the content of the custom state event to be sent by the callback.
event_content["i"] = i
# Update the room's power levels with a different value each time so Synapse
# doesn't consider an update redundant.
self._update_power_levels(event_default=i)
# Check that the new event made it to the room's state.
channel = self.make_request(
method="GET",
path="/rooms/" + self.room_id + "/state/" + event_type,
access_token=self.tok,
)
self.assertEqual(channel.code, 200)
self.assertEqual(channel.json_body["i"], i)
def test_on_new_event(self):
"""Test that the on_new_event callback is called on new events"""
on_new_event = Mock(make_awaitable(None))
self.hs.get_third_party_event_rules()._on_new_event_callbacks.append(
on_new_event
)
# Send a message event to the room and check that the callback is called.
self.helper.send(room_id=self.room_id, tok=self.tok)
self.assertEqual(on_new_event.call_count, 1)
# Check that the callback is also called on membership updates.
self.helper.invite(
room=self.room_id,
src=self.user_id,
targ=self.invitee,
tok=self.tok,
)
self.assertEqual(on_new_event.call_count, 2)
args, _ = on_new_event.call_args
self.assertEqual(args[0].membership, Membership.INVITE)
self.assertEqual(args[0].state_key, self.invitee)
# Check that the invitee's membership is correct in the state that's passed down
# to the callback.
self.assertEqual(
args[1][(EventTypes.Member, self.invitee)].membership,
Membership.INVITE,
)
# Send an event over federation and check that the callback is also called.
self._send_event_over_federation()
self.assertEqual(on_new_event.call_count, 3)
def _send_event_over_federation(self) -> None:
"""Send a dummy event over federation and check that the request succeeds."""
body = {
"origin": self.hs.config.server.server_name,
"origin_server_ts": self.clock.time_msec(),
"pdus": [
{
"sender": self.user_id,
"type": EventTypes.Message,
"state_key": "",
"content": {"body": "hello world", "msgtype": "m.text"},
"room_id": self.room_id,
"depth": 0,
"origin_server_ts": self.clock.time_msec(),
"prev_events": [],
"auth_events": [],
"signatures": {},
"unsigned": {},
}
],
}
channel = self.make_request(
method="PUT",
path="/_matrix/federation/v1/send/1",
content=body,
federation_auth_origin=self.hs.config.server.server_name.encode("utf8"),
)
self.assertEqual(channel.code, 200, channel.result)
def _update_power_levels(self, event_default: int = 0):
"""Updates the room's power levels.
Args:
event_default: Value to use for 'events_default'.
"""
self.helper.send_state(
room_id=self.room_id,
event_type=EventTypes.PowerLevels,
body={
"ban": 50,
"events": {
"m.room.avatar": 50,
"m.room.canonical_alias": 50,
"m.room.encryption": 100,
"m.room.history_visibility": 100,
"m.room.name": 50,
"m.room.power_levels": 100,
"m.room.server_acl": 100,
"m.room.tombstone": 100,
},
"events_default": event_default,
"invite": 0,
"kick": 50,
"redact": 50,
"state_default": 50,
"users": {self.user_id: 100},
"users_default": 0,
},
tok=self.tok,
)
| 37.106422
| 104
| 0.590615
|
import threading
from typing import TYPE_CHECKING, Dict, Optional, Tuple
from unittest.mock import Mock
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import SynapseError
from synapse.events import EventBase
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
from synapse.rest import admin
from synapse.rest.client import login, room
from synapse.types import JsonDict, Requester, StateMap
from synapse.util.frozenutils import unfreeze
from tests import unittest
from tests.test_utils import make_awaitable
if TYPE_CHECKING:
from synapse.module_api import ModuleApi
thread_local = threading.local()
class LegacyThirdPartyRulesTestModule:
def __init__(self, config: Dict, module_api: "ModuleApi"):
thread_local.rules_module = self
self.module_api = module_api
async def on_create_room(
self, requester: Requester, config: dict, is_requester_admin: bool
):
return True
async def check_event_allowed(self, event: EventBase, state: StateMap[EventBase]):
return True
@staticmethod
def parse_config(config):
return config
class LegacyDenyNewRooms(LegacyThirdPartyRulesTestModule):
def __init__(self, config: Dict, module_api: "ModuleApi"):
super().__init__(config, module_api)
def on_create_room(
self, requester: Requester, config: dict, is_requester_admin: bool
):
return False
class LegacyChangeEvents(LegacyThirdPartyRulesTestModule):
def __init__(self, config: Dict, module_api: "ModuleApi"):
super().__init__(config, module_api)
async def check_event_allowed(self, event: EventBase, state: StateMap[EventBase]):
d = event.get_dict()
content = unfreeze(event.content)
content["foo"] = "bar"
d["content"] = content
return d
class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
servlets = [
admin.register_servlets,
login.register_servlets,
room.register_servlets,
]
def make_homeserver(self, reactor, clock):
hs = self.setup_test_homeserver()
load_legacy_third_party_event_rules(hs)
# therefore disable event signature checks.
# Note that these checks are not relevant to this test case.
# Have this homeserver auto-approve all event signature checking.
async def approve_all_signature_checking(_, pdu):
return pdu
hs.get_federation_server()._check_sigs_and_hash = approve_all_signature_checking
# Have this homeserver skip event auth checks. This is necessary due to
# event auth checks ensuring that events were signed by the sender's homeserver.
async def _check_event_auth(origin, event, context, *args, **kwargs):
return context
hs.get_federation_event_handler()._check_event_auth = _check_event_auth
return hs
def prepare(self, reactor, clock, homeserver):
self.user_id = self.register_user("kermit", "monkey")
self.invitee = self.register_user("invitee", "hackme")
self.tok = self.login("kermit", "monkey")
try:
self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok)
except Exception:
pass
def test_third_party_rules(self):
async def check(ev, state):
return ev.type != "foo.bar.forbidden", None
callback = Mock(spec=[], side_effect=check)
self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [
callback
]
channel = self.make_request(
"PUT",
"/_matrix/client/r0/rooms/%s/send/foo.bar.allowed/1" % self.room_id,
{},
access_token=self.tok,
)
self.assertEquals(channel.result["code"], b"200", channel.result)
callback.assert_called_once()
state_arg = callback.call_args[0][1]
for k in (("m.room.create", ""), ("m.room.member", self.user_id)):
self.assertIn(k, state_arg)
ev = state_arg[k]
self.assertEqual(ev.type, k[0])
self.assertEqual(ev.state_key, k[1])
channel = self.make_request(
"PUT",
"/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/2" % self.room_id,
{},
access_token=self.tok,
)
self.assertEquals(channel.result["code"], b"403", channel.result)
def test_third_party_rules_workaround_synapse_errors_pass_through(self):
class NastyHackException(SynapseError):
def error_dict(self):
result = super().error_dict()
result["nasty"] = "very"
return result
async def check(ev, state) -> Tuple[bool, Optional[JsonDict]]:
raise NastyHackException(429, "message")
self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check]
channel = self.make_request(
"PUT",
"/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/2" % self.room_id,
{},
access_token=self.tok,
)
self.assertEquals(channel.result["code"], b"429", channel.result)
self.assertEqual(
channel.json_body,
{"errcode": "M_UNKNOWN", "error": "message", "nasty": "very"},
)
def test_cannot_modify_event(self):
async def check(ev: EventBase, state):
ev.content = {"x": "y"}
return True, None
self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check]
channel = self.make_request(
"PUT",
"/_matrix/client/r0/rooms/%s/send/modifyme/1" % self.room_id,
{"x": "x"},
access_token=self.tok,
)
# module did something bad.
self.assertEqual(channel.code, 200, channel.result)
event_id = channel.json_body["event_id"]
channel = self.make_request(
"GET",
"/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, event_id),
access_token=self.tok,
)
self.assertEqual(channel.code, 200, channel.result)
ev = channel.json_body
self.assertEqual(ev["content"]["x"], "x")
def test_modify_event(self):
# first patch the event checker so that it will modify the event
async def check(ev: EventBase, state):
d = ev.get_dict()
d["content"] = {"x": "y"}
return True, d
self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check]
# now send the event
channel = self.make_request(
"PUT",
"/_matrix/client/r0/rooms/%s/send/modifyme/1" % self.room_id,
{"x": "x"},
access_token=self.tok,
)
self.assertEqual(channel.result["code"], b"200", channel.result)
event_id = channel.json_body["event_id"]
# ... and check that it got modified
channel = self.make_request(
"GET",
"/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, event_id),
access_token=self.tok,
)
self.assertEqual(channel.result["code"], b"200", channel.result)
ev = channel.json_body
self.assertEqual(ev["content"]["x"], "y")
def test_message_edit(self):
# first patch the event checker so that it will modify the event
async def check(ev: EventBase, state):
d = ev.get_dict()
d["content"] = {
"msgtype": "m.text",
"body": d["content"]["body"].upper(),
}
return True, d
self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check]
# Send an event, then edit it.
channel = self.make_request(
"PUT",
"/_matrix/client/r0/rooms/%s/send/modifyme/1" % self.room_id,
{
"msgtype": "m.text",
"body": "Original body",
},
access_token=self.tok,
)
self.assertEqual(channel.result["code"], b"200", channel.result)
orig_event_id = channel.json_body["event_id"]
channel = self.make_request(
"PUT",
"/_matrix/client/r0/rooms/%s/send/m.room.message/2" % self.room_id,
{
"m.new_content": {"msgtype": "m.text", "body": "Edited body"},
"m.relates_to": {
"rel_type": "m.replace",
"event_id": orig_event_id,
},
"msgtype": "m.text",
"body": "Edited body",
},
access_token=self.tok,
)
self.assertEqual(channel.result["code"], b"200", channel.result)
edited_event_id = channel.json_body["event_id"]
# ... and check that they both got modified
channel = self.make_request(
"GET",
"/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, orig_event_id),
access_token=self.tok,
)
self.assertEqual(channel.result["code"], b"200", channel.result)
ev = channel.json_body
self.assertEqual(ev["content"]["body"], "ORIGINAL BODY")
channel = self.make_request(
"GET",
"/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, edited_event_id),
access_token=self.tok,
)
self.assertEqual(channel.result["code"], b"200", channel.result)
ev = channel.json_body
self.assertEqual(ev["content"]["body"], "EDITED BODY")
def test_send_event(self):
content = {
"msgtype": "m.text",
"body": "Hello!",
}
event_dict = {
"room_id": self.room_id,
"type": "m.room.message",
"content": content,
"sender": self.user_id,
}
event: EventBase = self.get_success(
self.hs.get_module_api().create_and_send_event_into_room(event_dict)
)
self.assertEquals(event.sender, self.user_id)
self.assertEquals(event.room_id, self.room_id)
self.assertEquals(event.type, "m.room.message")
self.assertEquals(event.content, content)
@unittest.override_config(
{
"third_party_event_rules": {
"module": __name__ + ".LegacyChangeEvents",
"config": {},
}
}
)
def test_legacy_check_event_allowed(self):
channel = self.make_request(
"PUT",
"/_matrix/client/r0/rooms/%s/send/m.room.message/1" % self.room_id,
{
"msgtype": "m.text",
"body": "Original body",
},
access_token=self.tok,
)
self.assertEqual(channel.result["code"], b"200", channel.result)
event_id = channel.json_body["event_id"]
channel = self.make_request(
"GET",
"/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, event_id),
access_token=self.tok,
)
self.assertEqual(channel.result["code"], b"200", channel.result)
self.assertIn("foo", channel.json_body["content"].keys())
self.assertEqual(channel.json_body["content"]["foo"], "bar")
@unittest.override_config(
{
"third_party_event_rules": {
"module": __name__ + ".LegacyDenyNewRooms",
"config": {},
}
}
)
def test_legacy_on_create_room(self):
self.helper.create_room_as(self.user_id, tok=self.tok, expect_code=403)
def test_sent_event_end_up_in_room_state(self):
event_type = "org.matrix.test_state"
# This content will be updated later on, and since we actually use a reference on
# the dict it does the right thing. It's a bit hacky but a handy way of making
event_content = {"i": -1}
api = self.hs.get_module_api()
async def test_fn(event: EventBase, state_events):
if event.is_state and event.type == EventTypes.PowerLevels:
await api.create_and_send_event_into_room(
{
"room_id": event.room_id,
"sender": event.sender,
"type": event_type,
"content": event_content,
"state_key": "",
}
)
return True, None
self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [test_fn]
for i in range(5):
event_content["i"] = i
# doesn't consider an update redundant.
self._update_power_levels(event_default=i)
channel = self.make_request(
method="GET",
path="/rooms/" + self.room_id + "/state/" + event_type,
access_token=self.tok,
)
self.assertEqual(channel.code, 200)
self.assertEqual(channel.json_body["i"], i)
def test_on_new_event(self):
on_new_event = Mock(make_awaitable(None))
self.hs.get_third_party_event_rules()._on_new_event_callbacks.append(
on_new_event
)
# Send a message event to the room and check that the callback is called.
self.helper.send(room_id=self.room_id, tok=self.tok)
self.assertEqual(on_new_event.call_count, 1)
# Check that the callback is also called on membership updates.
self.helper.invite(
room=self.room_id,
src=self.user_id,
targ=self.invitee,
tok=self.tok,
)
self.assertEqual(on_new_event.call_count, 2)
args, _ = on_new_event.call_args
self.assertEqual(args[0].membership, Membership.INVITE)
self.assertEqual(args[0].state_key, self.invitee)
# Check that the invitee's membership is correct in the state that's passed down
# to the callback.
self.assertEqual(
args[1][(EventTypes.Member, self.invitee)].membership,
Membership.INVITE,
)
# Send an event over federation and check that the callback is also called.
self._send_event_over_federation()
self.assertEqual(on_new_event.call_count, 3)
def _send_event_over_federation(self) -> None:
body = {
"origin": self.hs.config.server.server_name,
"origin_server_ts": self.clock.time_msec(),
"pdus": [
{
"sender": self.user_id,
"type": EventTypes.Message,
"state_key": "",
"content": {"body": "hello world", "msgtype": "m.text"},
"room_id": self.room_id,
"depth": 0,
"origin_server_ts": self.clock.time_msec(),
"prev_events": [],
"auth_events": [],
"signatures": {},
"unsigned": {},
}
],
}
channel = self.make_request(
method="PUT",
path="/_matrix/federation/v1/send/1",
content=body,
federation_auth_origin=self.hs.config.server.server_name.encode("utf8"),
)
self.assertEqual(channel.code, 200, channel.result)
def _update_power_levels(self, event_default: int = 0):
self.helper.send_state(
room_id=self.room_id,
event_type=EventTypes.PowerLevels,
body={
"ban": 50,
"events": {
"m.room.avatar": 50,
"m.room.canonical_alias": 50,
"m.room.encryption": 100,
"m.room.history_visibility": 100,
"m.room.name": 50,
"m.room.power_levels": 100,
"m.room.server_acl": 100,
"m.room.tombstone": 100,
},
"events_default": event_default,
"invite": 0,
"kick": 50,
"redact": 50,
"state_default": 50,
"users": {self.user_id: 100},
"users_default": 0,
},
tok=self.tok,
)
| true
| true
|
1c42c4720905e5eb9eed4993cfdc917e4749e9a9
| 201
|
py
|
Python
|
Download Manager/download_append.py
|
guptachetan1997/crawling-projects
|
36b9f568cd246a1d8d25b89ad83b33ba0c67bf4d
|
[
"MIT"
] | 69
|
2016-06-16T02:25:31.000Z
|
2022-03-03T09:36:15.000Z
|
Youtube Download Manager/download_append.py
|
MohanSha/PyWebCrawling
|
be4d87c750ab2017bbc28ec48a345384073bab23
|
[
"MIT"
] | 1
|
2018-09-21T12:27:00.000Z
|
2018-09-21T12:27:00.000Z
|
Download Manager/download_append.py
|
guptachetan1997/crawling-projects
|
36b9f568cd246a1d8d25b89ad83b33ba0c67bf4d
|
[
"MIT"
] | 24
|
2016-11-06T14:03:56.000Z
|
2022-03-25T14:16:11.000Z
|
import sys
def main():
try:
url = str(sys.argv[1]) + '\n'
with open('list.txt', 'a') as file:
file.write(url)
except:
print("Print some error occured")
if __name__ == '__main__':
main()
| 16.75
| 37
| 0.606965
|
import sys
def main():
try:
url = str(sys.argv[1]) + '\n'
with open('list.txt', 'a') as file:
file.write(url)
except:
print("Print some error occured")
if __name__ == '__main__':
main()
| true
| true
|
1c42c4c0cfc60ac65e3178923c0cf37f8e02a12b
| 20
|
py
|
Python
|
skinnywms/__init__.py
|
cosunae/skinnywms
|
43092858ec6faa8b3723c54d5abc910cafe22f05
|
[
"Apache-2.0"
] | null | null | null |
skinnywms/__init__.py
|
cosunae/skinnywms
|
43092858ec6faa8b3723c54d5abc910cafe22f05
|
[
"Apache-2.0"
] | null | null | null |
skinnywms/__init__.py
|
cosunae/skinnywms
|
43092858ec6faa8b3723c54d5abc910cafe22f05
|
[
"Apache-2.0"
] | null | null | null |
__version__ ="0.1.3"
| 20
| 20
| 0.7
|
__version__ ="0.1.3"
| true
| true
|
1c42c4f659f5cb271c6a09894212a8ed333056c9
| 11,707
|
py
|
Python
|
veriloggen/stream/stream.py
|
leonardt/veriloggen
|
bc3dacaa6a3e0b0652763881d0edf0421c6d3189
|
[
"Apache-2.0"
] | null | null | null |
veriloggen/stream/stream.py
|
leonardt/veriloggen
|
bc3dacaa6a3e0b0652763881d0edf0421c6d3189
|
[
"Apache-2.0"
] | null | null | null |
veriloggen/stream/stream.py
|
leonardt/veriloggen
|
bc3dacaa6a3e0b0652763881d0edf0421c6d3189
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import copy
import functools
from collections import OrderedDict
import veriloggen.core.vtypes as vtypes
from veriloggen.core.module import Module
from veriloggen.seq.seq import Seq
from . import visitor
from . import stypes
from . import mul
from . import scheduler
from . import allocator
from . import graph
# ID counter for 'Stream'
_stream_counter = 0
def reset():
global _stream_counter
_stream_counter = 0
stypes._object_counter = 0
mul.reset()
def StreamManager(module, clock, reset,
ivalid=None, iready=None,
ovalid=None, oready=None,
aswire=True, no_hook=False):
return Stream(module=module, clock=clock, reset=reset,
ivalid=ivalid, iready=iready,
ovalid=ovalid, oready=oready,
aswire=aswire, no_hook=no_hook)
class Stream(object):
def __init__(self, *nodes, **opts):
# ID for manager reuse and merge
global _stream_counter
self.object_id = _stream_counter
_stream_counter += 1
self.nodes = set()
self.named_numerics = OrderedDict()
self.add(*nodes)
self.max_stage = 0
self.last_input = None
self.last_output = None
self.module = opts['module'] if 'module' in opts else None
self.clock = opts['clock'] if 'clock' in opts else None
self.reset = opts['reset'] if 'reset' in opts else None
self.ivalid = opts['ivalid'] if 'ivalid' in opts else None
self.iready = opts['iready'] if 'iready' in opts else None
self.ovalid = opts['ovalid'] if 'ovalid' in opts else None
self.oready = opts['oready'] if 'oready' in opts else None
self.aswire = opts['aswire'] if 'aswire' in opts else True
self.seq = None
self.has_control = False
self.implemented = False
if (self.module is not None and
self.clock is not None and self.reset is not None):
no_hook = opts['no_hook'] if 'no_hook' in opts else False
if not no_hook:
self.module.add_hook(self.implement)
seq_name = (opts['seq_name'] if 'seq_name' in opts else
'_stream_seq_%d' % self.object_id)
self.seq = Seq(self.module, seq_name, self.clock, self.reset)
#-------------------------------------------------------------------------
def add(self, *nodes):
self.nodes.update(set(nodes))
for node in nodes:
if hasattr(node, 'input_data'):
if isinstance(node.input_data, str):
name = node.input_data
else:
name = node.input_data.name
self.named_numerics[name] = node
elif hasattr(node, 'output_data'):
if node.output_data is None:
continue
if isinstance(node.output_data, str):
name = node.output_data
else:
name = node.output_data.name
self.named_numerics[name] = node
#-------------------------------------------------------------------------
def to_module(self, name, clock='CLK', reset='RST', aswire=False, seq_name=None):
""" generate a Module definion """
m = Module(name)
clk = m.Input(clock)
rst = m.Input(reset)
m = self.implement(m, clk, rst, aswire=aswire, seq_name=seq_name)
return m
#-------------------------------------------------------------------------
def implement(self, m=None, clock=None, reset=None, aswire=None, seq_name=None):
""" implemente actual registers and operations in Verilog """
if self.implemented:
if m is None:
return self.module
raise ValueError('already implemented.')
self.implemented = True
if m is None:
m = self.module
if self.module is None:
self.module = m
if clock is None:
clock = self.clock
if reset is None:
reset = self.reset
if self.seq is None:
if seq_name is None:
seq_name = '_stream_seq_%d' % self.object_id
seq = Seq(m, seq_name, clock, reset)
else:
seq = self.seq
if aswire is None:
aswire = self.aswire
self.add_control(aswire=aswire)
self.has_control = True
# for mult and div
m._clock = clock
m._reset = reset
stream_nodes = self.nodes
input_visitor = visitor.InputVisitor()
input_vars = set()
for node in sorted(stream_nodes, key=lambda x: x.object_id):
input_vars.update(input_visitor.visit(node))
output_visitor = visitor.OutputVisitor()
output_vars = set()
for node in sorted(stream_nodes, key=lambda x: x.object_id):
output_vars.update(output_visitor.visit(node))
# add input ports
for input_var in sorted(input_vars, key=lambda x: x.object_id):
input_var._implement_input(m, seq, aswire)
# schedule
sched = scheduler.ASAPScheduler()
sched.schedule(output_vars)
# balance output stage depth
max_stage = 0
for output_var in sorted(output_vars, key=lambda x: x.object_id):
max_stage = stypes._max(max_stage, output_var.end_stage)
self.max_stage = max_stage
output_vars = sched.balance_output(output_vars, max_stage)
# get all vars
all_visitor = visitor.AllVisitor()
all_vars = set()
for output_var in sorted(output_vars, key=lambda x: x.object_id):
all_vars.update(all_visitor.visit(output_var))
# control (valid and ready)
if not self.has_control:
self.add_control(aswire)
self.implement_control(seq)
# allocate (implement signals)
alloc = allocator.Allocator()
alloc.allocate(m, seq, all_vars, self.valid_list, self.senable)
# set default module information
for var in sorted(all_vars, key=lambda x: x.object_id):
var._set_module(m)
var._set_strm(self)
if var.seq is not None:
seq.update(var.seq)
var._set_seq(seq)
# add output ports
for output_var in sorted(output_vars, key=lambda x: x.object_id):
output_var._implement_output(m, seq, aswire)
# save schedule result
self.last_input = input_vars
self.last_output = output_vars
return m
#-------------------------------------------------------------------------
def add_control(self, aswire=True):
if self.ivalid is not None and isinstance(self.ivalid, str):
if aswire:
self.ivalid = self.module.Wire(self.ivalid)
else:
self.ivalid = self.module.Input(self.ivalid)
if self.iready is not None and isinstance(self.iready, str):
if aswire:
self.iready = self.module.Wire(self.iready)
else:
self.iready = self.module.Output(self.iready)
if self.ovalid is not None and isinstance(self.ovalid, str):
if aswire:
self.ovalid = self.module.Wire(self.ovalid)
else:
self.ovalid = self.module.Output(self.ovalid)
if self.oready is not None and isinstance(self.oready, str):
if aswire:
self.oready = self.module.Wire(self.oready)
else:
self.oready = self.module.Input(self.oready)
def implement_control(self, seq):
self.valid_list = None
if self.ivalid is None and self.oready is None:
if self.ovalid is not None:
self.ovalid.assign(1)
if self.iready is not None:
self.iready.assign(1)
self.senable = None
return
if self.oready is None:
self._make_valid_chain(seq)
self.senable = None
return
if self.ivalid is None:
self.iready.assign(self.oready)
self.senable = self.oready
return
cond = vtypes.OrList(vtypes.Not(self.ovalid), self.oready)
self.senable = self.module.TmpWire()
self.senable.assign(cond)
self._make_valid_chain(seq, self.senable)
self.iready.assign(self.senable)
def _make_valid_chain(self, seq, cond=None):
self.valid_list = []
self.valid_list.append(self.ivalid)
name = self.ivalid.name
prev = self.ivalid
for i in range(self.max_stage):
v = self.module.Reg("_{}_{}".format(name, i), initval=0)
self.valid_list.append(v)
seq(v(prev), cond=cond)
prev = v
if self.ovalid is not None:
self.ovalid.assign(prev)
#-------------------------------------------------------------------------
def draw_graph(self, filename='out.png', prog='dot', rankdir='LR', approx=False):
if self.last_output is None:
self.to_module()
graph.draw_graph(self.last_output, filename=filename, prog=prog,
rankdir=rankdir, approx=approx)
def enable_draw_graph(self, filename='out.png', prog='dot', rankdir='LR', approx=False):
self.module.add_hook(self.draw_graph,
kwargs={'filename': filename, 'prog': prog,
'rankdir': rankdir, 'approx': approx})
#-------------------------------------------------------------------------
def get_input(self):
if self.last_input is None:
return OrderedDict()
ret = OrderedDict()
for input_var in sorted(self.last_input, key=lambda x: x.object_id):
key = str(input_var.input_data)
value = input_var
ret[key] = value
return ret
def get_output(self):
if self.last_output is None:
return OrderedDict()
ret = OrderedDict()
for output_var in sorted(self.last_output, key=lambda x: x.object_id):
key = str(output_var.output_data)
value = output_var
ret[key] = value
return ret
#-------------------------------------------------------------------------
def pipeline_depth(self):
return self.max_stage
#-------------------------------------------------------------------------
def __getattr__(self, attr):
try:
return object.__getattr__(self, attr)
except AttributeError as e:
if attr.startswith('__') or attr not in dir(stypes):
raise e
func = getattr(stypes, attr)
@functools.wraps(func)
def wrapper(*args, **kwargs):
v = func(*args, **kwargs)
if isinstance(v, (tuple, list)):
for item in v:
self._set_info(item)
else:
self._set_info(v)
return v
return wrapper
def _set_info(self, v):
if isinstance(v, stypes._Numeric):
v._set_module(self.module)
v._set_strm(self)
v._set_seq(self.seq)
self.add(v)
def get_named_numeric(self, name):
if name not in self.named_numerics:
raise NameError("Numeric '%s' is not defined." % name)
return self.named_numerics[name]
| 31.47043
| 92
| 0.544717
|
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import copy
import functools
from collections import OrderedDict
import veriloggen.core.vtypes as vtypes
from veriloggen.core.module import Module
from veriloggen.seq.seq import Seq
from . import visitor
from . import stypes
from . import mul
from . import scheduler
from . import allocator
from . import graph
_stream_counter = 0
def reset():
global _stream_counter
_stream_counter = 0
stypes._object_counter = 0
mul.reset()
def StreamManager(module, clock, reset,
ivalid=None, iready=None,
ovalid=None, oready=None,
aswire=True, no_hook=False):
return Stream(module=module, clock=clock, reset=reset,
ivalid=ivalid, iready=iready,
ovalid=ovalid, oready=oready,
aswire=aswire, no_hook=no_hook)
class Stream(object):
def __init__(self, *nodes, **opts):
global _stream_counter
self.object_id = _stream_counter
_stream_counter += 1
self.nodes = set()
self.named_numerics = OrderedDict()
self.add(*nodes)
self.max_stage = 0
self.last_input = None
self.last_output = None
self.module = opts['module'] if 'module' in opts else None
self.clock = opts['clock'] if 'clock' in opts else None
self.reset = opts['reset'] if 'reset' in opts else None
self.ivalid = opts['ivalid'] if 'ivalid' in opts else None
self.iready = opts['iready'] if 'iready' in opts else None
self.ovalid = opts['ovalid'] if 'ovalid' in opts else None
self.oready = opts['oready'] if 'oready' in opts else None
self.aswire = opts['aswire'] if 'aswire' in opts else True
self.seq = None
self.has_control = False
self.implemented = False
if (self.module is not None and
self.clock is not None and self.reset is not None):
no_hook = opts['no_hook'] if 'no_hook' in opts else False
if not no_hook:
self.module.add_hook(self.implement)
seq_name = (opts['seq_name'] if 'seq_name' in opts else
'_stream_seq_%d' % self.object_id)
self.seq = Seq(self.module, seq_name, self.clock, self.reset)
def add(self, *nodes):
self.nodes.update(set(nodes))
for node in nodes:
if hasattr(node, 'input_data'):
if isinstance(node.input_data, str):
name = node.input_data
else:
name = node.input_data.name
self.named_numerics[name] = node
elif hasattr(node, 'output_data'):
if node.output_data is None:
continue
if isinstance(node.output_data, str):
name = node.output_data
else:
name = node.output_data.name
self.named_numerics[name] = node
def to_module(self, name, clock='CLK', reset='RST', aswire=False, seq_name=None):
m = Module(name)
clk = m.Input(clock)
rst = m.Input(reset)
m = self.implement(m, clk, rst, aswire=aswire, seq_name=seq_name)
return m
def implement(self, m=None, clock=None, reset=None, aswire=None, seq_name=None):
if self.implemented:
if m is None:
return self.module
raise ValueError('already implemented.')
self.implemented = True
if m is None:
m = self.module
if self.module is None:
self.module = m
if clock is None:
clock = self.clock
if reset is None:
reset = self.reset
if self.seq is None:
if seq_name is None:
seq_name = '_stream_seq_%d' % self.object_id
seq = Seq(m, seq_name, clock, reset)
else:
seq = self.seq
if aswire is None:
aswire = self.aswire
self.add_control(aswire=aswire)
self.has_control = True
m._clock = clock
m._reset = reset
stream_nodes = self.nodes
input_visitor = visitor.InputVisitor()
input_vars = set()
for node in sorted(stream_nodes, key=lambda x: x.object_id):
input_vars.update(input_visitor.visit(node))
output_visitor = visitor.OutputVisitor()
output_vars = set()
for node in sorted(stream_nodes, key=lambda x: x.object_id):
output_vars.update(output_visitor.visit(node))
for input_var in sorted(input_vars, key=lambda x: x.object_id):
input_var._implement_input(m, seq, aswire)
sched = scheduler.ASAPScheduler()
sched.schedule(output_vars)
max_stage = 0
for output_var in sorted(output_vars, key=lambda x: x.object_id):
max_stage = stypes._max(max_stage, output_var.end_stage)
self.max_stage = max_stage
output_vars = sched.balance_output(output_vars, max_stage)
all_visitor = visitor.AllVisitor()
all_vars = set()
for output_var in sorted(output_vars, key=lambda x: x.object_id):
all_vars.update(all_visitor.visit(output_var))
if not self.has_control:
self.add_control(aswire)
self.implement_control(seq)
alloc = allocator.Allocator()
alloc.allocate(m, seq, all_vars, self.valid_list, self.senable)
for var in sorted(all_vars, key=lambda x: x.object_id):
var._set_module(m)
var._set_strm(self)
if var.seq is not None:
seq.update(var.seq)
var._set_seq(seq)
for output_var in sorted(output_vars, key=lambda x: x.object_id):
output_var._implement_output(m, seq, aswire)
self.last_input = input_vars
self.last_output = output_vars
return m
def add_control(self, aswire=True):
if self.ivalid is not None and isinstance(self.ivalid, str):
if aswire:
self.ivalid = self.module.Wire(self.ivalid)
else:
self.ivalid = self.module.Input(self.ivalid)
if self.iready is not None and isinstance(self.iready, str):
if aswire:
self.iready = self.module.Wire(self.iready)
else:
self.iready = self.module.Output(self.iready)
if self.ovalid is not None and isinstance(self.ovalid, str):
if aswire:
self.ovalid = self.module.Wire(self.ovalid)
else:
self.ovalid = self.module.Output(self.ovalid)
if self.oready is not None and isinstance(self.oready, str):
if aswire:
self.oready = self.module.Wire(self.oready)
else:
self.oready = self.module.Input(self.oready)
def implement_control(self, seq):
self.valid_list = None
if self.ivalid is None and self.oready is None:
if self.ovalid is not None:
self.ovalid.assign(1)
if self.iready is not None:
self.iready.assign(1)
self.senable = None
return
if self.oready is None:
self._make_valid_chain(seq)
self.senable = None
return
if self.ivalid is None:
self.iready.assign(self.oready)
self.senable = self.oready
return
cond = vtypes.OrList(vtypes.Not(self.ovalid), self.oready)
self.senable = self.module.TmpWire()
self.senable.assign(cond)
self._make_valid_chain(seq, self.senable)
self.iready.assign(self.senable)
def _make_valid_chain(self, seq, cond=None):
self.valid_list = []
self.valid_list.append(self.ivalid)
name = self.ivalid.name
prev = self.ivalid
for i in range(self.max_stage):
v = self.module.Reg("_{}_{}".format(name, i), initval=0)
self.valid_list.append(v)
seq(v(prev), cond=cond)
prev = v
if self.ovalid is not None:
self.ovalid.assign(prev)
def draw_graph(self, filename='out.png', prog='dot', rankdir='LR', approx=False):
if self.last_output is None:
self.to_module()
graph.draw_graph(self.last_output, filename=filename, prog=prog,
rankdir=rankdir, approx=approx)
def enable_draw_graph(self, filename='out.png', prog='dot', rankdir='LR', approx=False):
self.module.add_hook(self.draw_graph,
kwargs={'filename': filename, 'prog': prog,
'rankdir': rankdir, 'approx': approx})
def get_input(self):
if self.last_input is None:
return OrderedDict()
ret = OrderedDict()
for input_var in sorted(self.last_input, key=lambda x: x.object_id):
key = str(input_var.input_data)
value = input_var
ret[key] = value
return ret
def get_output(self):
if self.last_output is None:
return OrderedDict()
ret = OrderedDict()
for output_var in sorted(self.last_output, key=lambda x: x.object_id):
key = str(output_var.output_data)
value = output_var
ret[key] = value
return ret
def pipeline_depth(self):
return self.max_stage
def __getattr__(self, attr):
try:
return object.__getattr__(self, attr)
except AttributeError as e:
if attr.startswith('__') or attr not in dir(stypes):
raise e
func = getattr(stypes, attr)
@functools.wraps(func)
def wrapper(*args, **kwargs):
v = func(*args, **kwargs)
if isinstance(v, (tuple, list)):
for item in v:
self._set_info(item)
else:
self._set_info(v)
return v
return wrapper
def _set_info(self, v):
if isinstance(v, stypes._Numeric):
v._set_module(self.module)
v._set_strm(self)
v._set_seq(self.seq)
self.add(v)
def get_named_numeric(self, name):
if name not in self.named_numerics:
raise NameError("Numeric '%s' is not defined." % name)
return self.named_numerics[name]
| true
| true
|
1c42c5c925d219979a79b7f44ab8b58d315251c5
| 645
|
py
|
Python
|
legacy/dx/simulator/sim.py
|
GaloisInc/adapt
|
2ccff778d3e77505899266572f8f7caacb5b630f
|
[
"BSD-3-Clause"
] | 2
|
2020-04-09T13:04:25.000Z
|
2021-09-24T14:17:26.000Z
|
legacy/dx/simulator/sim.py
|
GaloisInc/adapt
|
2ccff778d3e77505899266572f8f7caacb5b630f
|
[
"BSD-3-Clause"
] | null | null | null |
legacy/dx/simulator/sim.py
|
GaloisInc/adapt
|
2ccff778d3e77505899266572f8f7caacb5b630f
|
[
"BSD-3-Clause"
] | 3
|
2019-09-20T20:49:54.000Z
|
2021-09-02T17:33:47.000Z
|
import sys
import simulator_diagnoser as sd
if __name__ == "__main__":
config = sd.ConfigParser()
grammar = config.get_grammar()
graph = config.get_graph()
symptoms = config.get_symptoms()
dx = sd.SimpleDiagnoser(grammar)
dxs = dx.diagnose(graph, symptoms)
if len(sys.argv) == 1:
print("Reduced diagnosis: ", dxs)
dxs.print_dx()
else:
if(sys.argv[1] == 'pdf'):
writer = sd.PdfWriter()
writer.append_dx(graph, dxs)
writer.write('sim.pdf')
else:
if(sys.argv[1] == 'json'):
graph.print_json(dxs.reduced_diagnosis())
| 25.8
| 57
| 0.578295
|
import sys
import simulator_diagnoser as sd
if __name__ == "__main__":
config = sd.ConfigParser()
grammar = config.get_grammar()
graph = config.get_graph()
symptoms = config.get_symptoms()
dx = sd.SimpleDiagnoser(grammar)
dxs = dx.diagnose(graph, symptoms)
if len(sys.argv) == 1:
print("Reduced diagnosis: ", dxs)
dxs.print_dx()
else:
if(sys.argv[1] == 'pdf'):
writer = sd.PdfWriter()
writer.append_dx(graph, dxs)
writer.write('sim.pdf')
else:
if(sys.argv[1] == 'json'):
graph.print_json(dxs.reduced_diagnosis())
| true
| true
|
1c42c62f498f527ae7dec66fc029f21f4ea4bd7d
| 3,322
|
py
|
Python
|
code_v3/edges_style.py
|
souleater42/MMP-Robotic-Artist
|
2a67b611c2a3af5feb34276c0d3d30340667f1fa
|
[
"MIT"
] | 1
|
2020-02-20T05:11:31.000Z
|
2020-02-20T05:11:31.000Z
|
code_v3/edges_style.py
|
souleater42/MMP-Robotic-Artist
|
2a67b611c2a3af5feb34276c0d3d30340667f1fa
|
[
"MIT"
] | null | null | null |
code_v3/edges_style.py
|
souleater42/MMP-Robotic-Artist
|
2a67b611c2a3af5feb34276c0d3d30340667f1fa
|
[
"MIT"
] | null | null | null |
"""
Summary => will apply the EdgesStlye to the image given.
Description => This class is going to control the proccessing of images for
the EdgesStlye. It will take a the 'takenPicture.jpg'
from the Image folder and then stlye it. The output will
be a list of x and y coordinates for the plotter to print out
later on.
Author => Matthew Howard (mah60).
Version => 0.1 - 20/04/2018 - create the basic class for the edges
algorithm. This code is yet to be complete.
0.2 - 21/04/2018 - removed ui from __init_- method as not
used
"""
from __future__ import division
from image_processor import ImageProcessor
import numpy as np
import cv2
class EdgesStyle(ImageProcessor):
"""
Summary => will apply the dithering algorithm to the image given.
Description => This class is going to control the proccessing of images for
the dithering algorithm. It will take a the 'takenPicture.jpg'
from the Image folder and then stlye it. The output will
be a list of x and y coordinates for the plotter to print out
later on.
This class inherits Imageprocessor and will take on the
individual classes for it.
args => None
return => None
"""
def __init__(self):
"""
Summary => will initialize the image processor.
Description => will initialize the images processor, to be used later
on.
args => None
return => None
"""
super(EdgesStyle, self).__init__()
def run(self):
"""
Summary => will find the boarders in the image taken.
Description => will find the boarders in the image take, using
opencv. The will work by making the image graystyle, then
using a GaussianBlur to filter the image. Then using sobal
to calculate where the edges are.
After this we use a threshold to swap the black and white
colours around.
args => None
return => None
"""
# get the image to be processed
img = cv2.imread('Images/takenPicture.jpg', 0)
# resize img given
img = self.compress_image(img, 3)
# img_edges = cv2.Canny(img, 80, 80)
# blur the image so we can tell where the key boarders are
blur = cv2.GaussianBlur(img, (5, 5), 0)
# create a sobal diratives
sobal = cv2.Sobel(blur, cv2.CV_64F, 1, 1, ksize=5)
# convert the sobal diratives to canny_style
# to view the edges of the image
# sobalCopy = np.uint8(sobal) # https://stackoverflow.com/questions
# /19103933/depth-error-in-2d-image-with-opencv-python
# canny = cv2.Canny(img, 25, 100, L2gradient=False)
# save the final output
# change image type to unit 8
sobal = np.uint8(sobal)
# creates a threshold to create a black and white image
ret, threshold = cv2.threshold(sobal, 25, 255, cv2.THRESH_BINARY_INV)
cv2.imwrite("Images/processedImage.jpg", threshold)
# cv2.imwrite("Images/edges_style_example.jpg", threshold)
self.calculate_coordinates(threshold)
| 35.72043
| 79
| 0.612884
|
from __future__ import division
from image_processor import ImageProcessor
import numpy as np
import cv2
class EdgesStyle(ImageProcessor):
def __init__(self):
super(EdgesStyle, self).__init__()
def run(self):
img = cv2.imread('Images/takenPicture.jpg', 0)
img = self.compress_image(img, 3)
blur = cv2.GaussianBlur(img, (5, 5), 0)
sobal = cv2.Sobel(blur, cv2.CV_64F, 1, 1, ksize=5)
sobal = np.uint8(sobal)
ret, threshold = cv2.threshold(sobal, 25, 255, cv2.THRESH_BINARY_INV)
cv2.imwrite("Images/processedImage.jpg", threshold)
self.calculate_coordinates(threshold)
| true
| true
|
1c42c73b027b903d89ebc1a31c82e9ad56719dc8
| 874
|
py
|
Python
|
setup.py
|
alehuo/pyoidc-redis-session-backend
|
a24af967e9e5fa59aaa2511190db355b53d7d2dd
|
[
"MIT"
] | 3
|
2020-07-22T11:14:13.000Z
|
2022-02-28T21:22:30.000Z
|
setup.py
|
alehuo/pyoidc-redis-session-backend
|
a24af967e9e5fa59aaa2511190db355b53d7d2dd
|
[
"MIT"
] | null | null | null |
setup.py
|
alehuo/pyoidc-redis-session-backend
|
a24af967e9e5fa59aaa2511190db355b53d7d2dd
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pyoidc-redis-session-backend",
version="1.0.3",
author="alehuo",
author_email="aleksi.huotala@helsinki.fi",
description="Redis-based session storage for oic library",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/alehuo/pyoidc-redis-session-backend",
packages=['pyoidc_redis_session_backend'],
py_modules=['pyoidc_redis_session_backend.RedisSessionBackend'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
license="MIT",
install_requires=['oic', 'jsonpickle', 'redis', 'pycryptodome', 'pycryptodomex'],
python_requires='>=3.6',
)
| 34.96
| 85
| 0.687643
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pyoidc-redis-session-backend",
version="1.0.3",
author="alehuo",
author_email="aleksi.huotala@helsinki.fi",
description="Redis-based session storage for oic library",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/alehuo/pyoidc-redis-session-backend",
packages=['pyoidc_redis_session_backend'],
py_modules=['pyoidc_redis_session_backend.RedisSessionBackend'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
license="MIT",
install_requires=['oic', 'jsonpickle', 'redis', 'pycryptodome', 'pycryptodomex'],
python_requires='>=3.6',
)
| true
| true
|
1c42c7432f025b78306fefdf7615f8f1c304bccc
| 747
|
py
|
Python
|
blog/migrations/0005_auto_20201017_2048.py
|
flo-ui/codingforengineers
|
b4bee0feec51e3cb7c06b6b493593ae01256b77d
|
[
"Apache-2.0"
] | null | null | null |
blog/migrations/0005_auto_20201017_2048.py
|
flo-ui/codingforengineers
|
b4bee0feec51e3cb7c06b6b493593ae01256b77d
|
[
"Apache-2.0"
] | 7
|
2020-10-07T09:18:05.000Z
|
2021-09-22T19:41:25.000Z
|
blog/migrations/0005_auto_20201017_2048.py
|
flo-ui/codingforengineers
|
b4bee0feec51e3cb7c06b6b493593ae01256b77d
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.2 on 2020-10-17 20:48
from django.db import migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0003_taggeditem_add_unique_index'),
('blog', '0004_auto_20200924_0914'),
]
operations = [
migrations.RemoveField(
model_name='blogpost',
name='labels',
),
migrations.AddField(
model_name='blogpost',
name='tags',
field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
migrations.DeleteModel(
name='BlogPostLabel',
),
]
| 25.758621
| 162
| 0.603748
|
from django.db import migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0003_taggeditem_add_unique_index'),
('blog', '0004_auto_20200924_0914'),
]
operations = [
migrations.RemoveField(
model_name='blogpost',
name='labels',
),
migrations.AddField(
model_name='blogpost',
name='tags',
field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
migrations.DeleteModel(
name='BlogPostLabel',
),
]
| true
| true
|
1c42c7f86de3c444721f82b1b3dde3b0e837b579
| 355
|
py
|
Python
|
hopechannelfi/settings/production.py
|
AdventistChurchFinland/hopechannel-wagtail
|
b5b06e0696a929d5d2e29a368002d27f54a8ff75
|
[
"MIT"
] | null | null | null |
hopechannelfi/settings/production.py
|
AdventistChurchFinland/hopechannel-wagtail
|
b5b06e0696a929d5d2e29a368002d27f54a8ff75
|
[
"MIT"
] | 9
|
2020-06-05T23:26:12.000Z
|
2021-06-17T20:23:14.000Z
|
hopechannelfi/settings/production.py
|
AdventistChurchFinland/hopechannel-wagtail
|
b5b06e0696a929d5d2e29a368002d27f54a8ff75
|
[
"MIT"
] | null | null | null |
from .base import *
DEBUG = False
# Security settings
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_DOMAIN = "cms.hopechannel.fi"
CSRF_COOKIE_SECURE = True
CSRF_TRUSTED_ORIGINS = ['cms.hopechannel.fi']
try:
from .local import *
except ImportError:
pass
| 18.684211
| 45
| 0.785915
|
from .base import *
DEBUG = False
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_DOMAIN = "cms.hopechannel.fi"
CSRF_COOKIE_SECURE = True
CSRF_TRUSTED_ORIGINS = ['cms.hopechannel.fi']
try:
from .local import *
except ImportError:
pass
| true
| true
|
1c42c853d3067b192d5b242f63fcef0af32997c1
| 16,441
|
py
|
Python
|
fabfile.py
|
blowUA/mezz
|
caf909ad6dd48a61e735bbff7203573f0a61c0d7
|
[
"MIT"
] | 209
|
2015-02-06T02:24:22.000Z
|
2022-03-07T23:39:28.000Z
|
fabfile.py
|
blowUA/mezz
|
caf909ad6dd48a61e735bbff7203573f0a61c0d7
|
[
"MIT"
] | 12
|
2015-08-25T19:06:27.000Z
|
2021-12-26T09:46:30.000Z
|
fabfile.py
|
blowUA/mezz
|
caf909ad6dd48a61e735bbff7203573f0a61c0d7
|
[
"MIT"
] | 92
|
2015-03-04T11:13:55.000Z
|
2020-10-23T06:46:42.000Z
|
from __future__ import print_function, unicode_literals
from future.builtins import input, open
import os
import re
import sys
from functools import wraps
from getpass import getpass, getuser
from glob import glob
from contextlib import contextmanager
from posixpath import join
from fabric.api import env, cd, prefix, sudo as _sudo, run as _run, hide, task
from fabric.contrib.files import exists, upload_template
from fabric.colors import yellow, green, blue, red
################
# Config setup #
################
conf = {}
if sys.argv[0].split(os.sep)[-1] in ("fab", "fab-script.py"):
# Ensure we import settings from the current dir
try:
conf = __import__("settings", globals(), locals(), [], 0).FABRIC
try:
conf["HOSTS"][0]
except (KeyError, ValueError):
raise ImportError
except (ImportError, AttributeError):
print("Aborting, no hosts defined.")
exit()
env.db_pass = conf.get("DB_PASS", None)
env.admin_pass = conf.get("ADMIN_PASS", None)
env.user = conf.get("SSH_USER", getuser())
env.password = conf.get("SSH_PASS", None)
env.key_filename = conf.get("SSH_KEY_PATH", None)
env.hosts = conf.get("HOSTS", [""])
env.proj_name = conf.get("PROJECT_NAME", os.getcwd().split(os.sep)[-1])
env.venv_home = conf.get("VIRTUALENV_HOME", "/home/%s" % env.user)
env.venv_path = "%s/%s" % (env.venv_home, env.proj_name)
env.proj_dirname = "project"
env.proj_path = "%s/%s" % (env.venv_path, env.proj_dirname)
env.manage = "%s/bin/python %s/project/manage.py" % ((env.venv_path,) * 2)
env.domains = conf.get("DOMAINS", [conf.get("LIVE_HOSTNAME", env.hosts[0])])
env.domains_nginx = " ".join(env.domains)
env.domains_python = ", ".join(["'%s'" % s for s in env.domains])
env.ssl_disabled = "#" if len(env.domains) > 1 else ""
env.repo_url = conf.get("REPO_URL", "")
env.git = env.repo_url.startswith("git") or env.repo_url.endswith(".git")
env.reqs_path = conf.get("REQUIREMENTS_PATH", None)
env.gunicorn_port = conf.get("GUNICORN_PORT", 8000)
env.locale = conf.get("LOCALE", "en_US.UTF-8")
env.secret_key = conf.get("SECRET_KEY", "")
env.nevercache_key = conf.get("NEVERCACHE_KEY", "")
##################
# Template setup #
##################
# Each template gets uploaded at deploy time, only if their
# contents has changed, in which case, the reload command is
# also run.
templates = {
"nginx": {
"local_path": "deploy/nginx.conf",
"remote_path": "/etc/nginx/sites-enabled/%(proj_name)s.conf",
"reload_command": "service nginx restart",
},
"supervisor": {
"local_path": "deploy/supervisor.conf",
"remote_path": "/etc/supervisor/conf.d/%(proj_name)s.conf",
"reload_command": "supervisorctl reload",
},
"cron": {
"local_path": "deploy/crontab",
"remote_path": "/etc/cron.d/%(proj_name)s",
"owner": "root",
"mode": "600",
},
"gunicorn": {
"local_path": "deploy/gunicorn.conf.py.template",
"remote_path": "%(proj_path)s/gunicorn.conf.py",
},
"settings": {
"local_path": "deploy/local_settings.py.template",
"remote_path": "%(proj_path)s/local_settings.py",
},
}
######################################
# Context for virtualenv and project #
######################################
@contextmanager
def virtualenv():
"""
Runs commands within the project's virtualenv.
"""
with cd(env.venv_path):
with prefix("source %s/bin/activate" % env.venv_path):
yield
@contextmanager
def project():
"""
Runs commands within the project's directory.
"""
with virtualenv():
with cd(env.proj_dirname):
yield
@contextmanager
def update_changed_requirements():
"""
Checks for changes in the requirements file across an update,
and gets new requirements if changes have occurred.
"""
reqs_path = join(env.proj_path, env.reqs_path)
get_reqs = lambda: run("cat %s" % reqs_path, show=False)
old_reqs = get_reqs() if env.reqs_path else ""
yield
if old_reqs:
new_reqs = get_reqs()
if old_reqs == new_reqs:
# Unpinned requirements should always be checked.
for req in new_reqs.split("\n"):
if req.startswith("-e"):
if "@" not in req:
# Editable requirement without pinned commit.
break
elif req.strip() and not req.startswith("#"):
if not set(">=<") & set(req):
# PyPI requirement without version.
break
else:
# All requirements are pinned.
return
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
###########################################
# Utils and wrappers for various commands #
###########################################
def _print(output):
print()
print(output)
print()
def print_command(command):
_print(blue("$ ", bold=True) +
yellow(command, bold=True) +
red(" ->", bold=True))
@task
def run(command, show=True):
"""
Runs a shell comand on the remote server.
"""
if show:
print_command(command)
with hide("running"):
return _run(command)
@task
def sudo(command, show=True):
"""
Runs a command as sudo.
"""
if show:
print_command(command)
with hide("running"):
return _sudo(command)
def log_call(func):
@wraps(func)
def logged(*args, **kawrgs):
header = "-" * len(func.__name__)
_print(green("\n".join([header, func.__name__, header]), bold=True))
return func(*args, **kawrgs)
return logged
def get_templates():
"""
Returns each of the templates with env vars injected.
"""
injected = {}
for name, data in templates.items():
injected[name] = dict([(k, v % env) for k, v in data.items()])
return injected
def upload_template_and_reload(name):
"""
Uploads a template only if it has changed, and if so, reload a
related service.
"""
template = get_templates()[name]
local_path = template["local_path"]
if not os.path.exists(local_path):
project_root = os.path.dirname(os.path.abspath(__file__))
local_path = os.path.join(project_root, local_path)
remote_path = template["remote_path"]
reload_command = template.get("reload_command")
owner = template.get("owner")
mode = template.get("mode")
remote_data = ""
if exists(remote_path):
with hide("stdout"):
remote_data = sudo("cat %s" % remote_path, show=False)
with open(local_path, "r") as f:
local_data = f.read()
# Escape all non-string-formatting-placeholder occurrences of '%':
local_data = re.sub(r"%(?!\(\w+\)s)", "%%", local_data)
if "%(db_pass)s" in local_data:
env.db_pass = db_pass()
local_data %= env
clean = lambda s: s.replace("\n", "").replace("\r", "").strip()
if clean(remote_data) == clean(local_data):
return
upload_template(local_path, remote_path, env, use_sudo=True, backup=False)
if owner:
sudo("chown %s %s" % (owner, remote_path))
if mode:
sudo("chmod %s %s" % (mode, remote_path))
if reload_command:
sudo(reload_command)
def db_pass():
"""
Prompts for the database password if unknown.
"""
if not env.db_pass:
env.db_pass = getpass("Enter the database password: ")
return env.db_pass
@task
def apt(packages):
"""
Installs one or more system packages via apt.
"""
return sudo("apt-get install -y -q " + packages)
@task
def pip(packages):
"""
Installs one or more Python packages within the virtual environment.
"""
with virtualenv():
return sudo("pip install %s" % packages)
def postgres(command):
"""
Runs the given command as the postgres user.
"""
show = not command.startswith("psql")
return run("sudo -u root sudo -u postgres %s" % command, show=show)
@task
def psql(sql, show=True):
"""
Runs SQL against the project's database.
"""
out = postgres('psql -c "%s"' % sql)
if show:
print_command(sql)
return out
@task
def backup(filename):
"""
Backs up the database.
"""
return postgres("pg_dump -Fc %s > %s" % (env.proj_name, filename))
@task
def restore(filename):
"""
Restores the database.
"""
return postgres("pg_restore -c -d %s %s" % (env.proj_name, filename))
@task
def python(code, show=True):
"""
Runs Python code in the project's virtual environment, with Django loaded.
"""
setup = "import os; os.environ[\'DJANGO_SETTINGS_MODULE\']=\'settings\';"
full_code = 'python -c "%s%s"' % (setup, code.replace("`", "\\\`"))
with project():
result = run(full_code, show=False)
if show:
print_command(code)
return result
def static():
"""
Returns the live STATIC_ROOT directory.
"""
return python("from django.conf import settings;"
"print settings.STATIC_ROOT", show=False).split("\n")[-1]
@task
def manage(command):
"""
Runs a Django management command.
"""
return run("%s %s" % (env.manage, command))
#########################
# Install and configure #
#########################
@task
@log_call
def install():
"""
Installs the base system and Python requirements for the entire server.
"""
locale = "LC_ALL=%s" % env.locale
with hide("stdout"):
if locale not in sudo("cat /etc/default/locale"):
sudo("update-locale %s" % locale)
run("exit")
sudo("apt-get update -y -q")
apt("nginx libjpeg-dev python-dev python-setuptools git-core "
"postgresql libpq-dev memcached supervisor")
sudo("easy_install pip")
sudo("pip install virtualenv mercurial")
@task
@log_call
def create():
"""
Create a new virtual environment for a project.
Pulls the project's repo from version control, adds system-level
configs for the project, and initialises the database with the
live host.
"""
# Create virtualenv
with cd(env.venv_home):
if exists(env.proj_name):
prompt = input("\nVirtualenv exists: %s"
"\nWould you like to replace it? (yes/no) "
% env.proj_name)
if prompt.lower() != "yes":
print("\nAborting!")
return False
remove()
run("virtualenv %s --distribute" % env.proj_name)
vcs = "git" if env.git else "hg"
run("%s clone %s %s" % (vcs, env.repo_url, env.proj_path))
# Create DB and DB user.
pw = db_pass()
user_sql_args = (env.proj_name, pw.replace("'", "\'"))
user_sql = "CREATE USER %s WITH ENCRYPTED PASSWORD '%s';" % user_sql_args
psql(user_sql, show=False)
shadowed = "*" * len(pw)
print_command(user_sql.replace("'%s'" % pw, "'%s'" % shadowed))
psql("CREATE DATABASE %s WITH OWNER %s ENCODING = 'UTF8' "
"LC_CTYPE = '%s' LC_COLLATE = '%s' TEMPLATE template0;" %
(env.proj_name, env.proj_name, env.locale, env.locale))
# Set up SSL certificate.
if not env.ssl_disabled:
conf_path = "/etc/nginx/conf"
if not exists(conf_path):
sudo("mkdir %s" % conf_path)
with cd(conf_path):
crt_file = env.proj_name + ".crt"
key_file = env.proj_name + ".key"
if not exists(crt_file) and not exists(key_file):
try:
crt_local, = glob(join("deploy", "*.crt"))
key_local, = glob(join("deploy", "*.key"))
except ValueError:
parts = (crt_file, key_file, env.domains[0])
sudo("openssl req -new -x509 -nodes -out %s -keyout %s "
"-subj '/CN=%s' -days 3650" % parts)
else:
upload_template(crt_local, crt_file, use_sudo=True)
upload_template(key_local, key_file, use_sudo=True)
# Set up project.
upload_template_and_reload("settings")
with project():
if env.reqs_path:
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
pip("gunicorn setproctitle south psycopg2 "
"django-compressor python-memcached")
manage("createdb --noinput --nodata")
python("from django.conf import settings;"
"from django.contrib.sites.models import Site;"
"Site.objects.filter(id=settings.SITE_ID).update(domain='%s');"
% env.domains[0])
for domain in env.domains:
python("from django.contrib.sites.models import Site;"
"Site.objects.get_or_create(domain='%s');" % domain)
if env.admin_pass:
pw = env.admin_pass
user_py = ("from mezzanine.utils.models import get_user_model;"
"User = get_user_model();"
"u, _ = User.objects.get_or_create(username='admin');"
"u.is_staff = u.is_superuser = True;"
"u.set_password('%s');"
"u.save();" % pw)
python(user_py, show=False)
shadowed = "*" * len(pw)
print_command(user_py.replace("'%s'" % pw, "'%s'" % shadowed))
return True
@task
@log_call
def remove():
"""
Blow away the current project.
"""
if exists(env.venv_path):
sudo("rm -rf %s" % env.venv_path)
for template in get_templates().values():
remote_path = template["remote_path"]
if exists(remote_path):
sudo("rm %s" % remote_path)
psql("DROP DATABASE IF EXISTS %s;" % env.proj_name)
psql("DROP USER IF EXISTS %s;" % env.proj_name)
##############
# Deployment #
##############
@task
@log_call
def restart():
"""
Restart gunicorn worker processes for the project.
"""
pid_path = "%s/gunicorn.pid" % env.proj_path
if exists(pid_path):
sudo("kill -HUP `cat %s`" % pid_path)
else:
start_args = (env.proj_name, env.proj_name)
sudo("supervisorctl start %s:gunicorn_%s" % start_args)
@task
@log_call
def deploy():
"""
Deploy latest version of the project.
Check out the latest version of the project from version
control, install new requirements, sync and migrate the database,
collect any new static assets, and restart gunicorn's work
processes for the project.
"""
if not exists(env.venv_path):
prompt = input("\nVirtualenv doesn't exist: %s"
"\nWould you like to create it? (yes/no) "
% env.proj_name)
if prompt.lower() != "yes":
print("\nAborting!")
return False
create()
for name in get_templates():
upload_template_and_reload(name)
with project():
backup("last.db")
static_dir = static()
if exists(static_dir):
run("tar -cf last.tar %s" % static_dir)
git = env.git
last_commit = "git rev-parse HEAD" if git else "hg id -i"
run("%s > last.commit" % last_commit)
with update_changed_requirements():
run("git pull origin master -f" if git else "hg pull && hg up -C")
manage("collectstatic -v 0 --noinput")
manage("syncdb --noinput")
manage("migrate --noinput")
restart()
return True
@task
@log_call
def rollback():
"""
Reverts project state to the last deploy.
When a deploy is performed, the current state of the project is
backed up. This includes the last commit checked out, the database,
and all static files. Calling rollback will revert all of these to
their state prior to the last deploy.
"""
with project():
with update_changed_requirements():
update = "git checkout" if env.git else "hg up -C"
run("%s `cat last.commit`" % update)
with cd(join(static(), "..")):
run("tar -xf %s" % join(env.proj_path, "last.tar"))
restore("last.db")
restart()
@task
@log_call
def all():
"""
Installs everything required on a new system and deploy.
From the base software, up to the deployed project.
"""
install()
if create():
deploy()
| 30.222426
| 78
| 0.58342
|
from __future__ import print_function, unicode_literals
from future.builtins import input, open
import os
import re
import sys
from functools import wraps
from getpass import getpass, getuser
from glob import glob
from contextlib import contextmanager
from posixpath import join
from fabric.api import env, cd, prefix, sudo as _sudo, run as _run, hide, task
from fabric.contrib.files import exists, upload_template
from fabric.colors import yellow, green, blue, red
):
raise ImportError
except (ImportError, AttributeError):
print("Aborting, no hosts defined.")
exit()
env.db_pass = conf.get("DB_PASS", None)
env.admin_pass = conf.get("ADMIN_PASS", None)
env.user = conf.get("SSH_USER", getuser())
env.password = conf.get("SSH_PASS", None)
env.key_filename = conf.get("SSH_KEY_PATH", None)
env.hosts = conf.get("HOSTS", [""])
env.proj_name = conf.get("PROJECT_NAME", os.getcwd().split(os.sep)[-1])
env.venv_home = conf.get("VIRTUALENV_HOME", "/home/%s" % env.user)
env.venv_path = "%s/%s" % (env.venv_home, env.proj_name)
env.proj_dirname = "project"
env.proj_path = "%s/%s" % (env.venv_path, env.proj_dirname)
env.manage = "%s/bin/python %s/project/manage.py" % ((env.venv_path,) * 2)
env.domains = conf.get("DOMAINS", [conf.get("LIVE_HOSTNAME", env.hosts[0])])
env.domains_nginx = " ".join(env.domains)
env.domains_python = ", ".join(["'%s'" % s for s in env.domains])
env.ssl_disabled = "#" if len(env.domains) > 1 else ""
env.repo_url = conf.get("REPO_URL", "")
env.git = env.repo_url.startswith("git") or env.repo_url.endswith(".git")
env.reqs_path = conf.get("REQUIREMENTS_PATH", None)
env.gunicorn_port = conf.get("GUNICORN_PORT", 8000)
env.locale = conf.get("LOCALE", "en_US.UTF-8")
env.secret_key = conf.get("SECRET_KEY", "")
env.nevercache_key = conf.get("NEVERCACHE_KEY", "")
pervisor/conf.d/%(proj_name)s.conf",
"reload_command": "supervisorctl reload",
},
"cron": {
"local_path": "deploy/crontab",
"remote_path": "/etc/cron.d/%(proj_name)s",
"owner": "root",
"mode": "600",
},
"gunicorn": {
"local_path": "deploy/gunicorn.conf.py.template",
"remote_path": "%(proj_path)s/gunicorn.conf.py",
},
"settings": {
"local_path": "deploy/local_settings.py.template",
"remote_path": "%(proj_path)s/local_settings.py",
},
}
ss = getpass("Enter the database password: ")
return env.db_pass
@task
def apt(packages):
return sudo("apt-get install -y -q " + packages)
@task
def pip(packages):
with virtualenv():
return sudo("pip install %s" % packages)
def postgres(command):
show = not command.startswith("psql")
return run("sudo -u root sudo -u postgres %s" % command, show=show)
@task
def psql(sql, show=True):
out = postgres('psql -c "%s"' % sql)
if show:
print_command(sql)
return out
@task
def backup(filename):
return postgres("pg_dump -Fc %s > %s" % (env.proj_name, filename))
@task
def restore(filename):
return postgres("pg_restore -c -d %s %s" % (env.proj_name, filename))
@task
def python(code, show=True):
setup = "import os; os.environ[\'DJANGO_SETTINGS_MODULE\']=\'settings\';"
full_code = 'python -c "%s%s"' % (setup, code.replace("`", "\\\`"))
with project():
result = run(full_code, show=False)
if show:
print_command(code)
return result
def static():
return python("from django.conf import settings;"
"print settings.STATIC_ROOT", show=False).split("\n")[-1]
@task
def manage(command):
return run("%s %s" % (env.manage, command))
%s"
"\nWould you like to replace it? (yes/no) "
% env.proj_name)
if prompt.lower() != "yes":
print("\nAborting!")
return False
remove()
run("virtualenv %s --distribute" % env.proj_name)
vcs = "git" if env.git else "hg"
run("%s clone %s %s" % (vcs, env.repo_url, env.proj_path))
pw = db_pass()
user_sql_args = (env.proj_name, pw.replace("'", "\'"))
user_sql = "CREATE USER %s WITH ENCRYPTED PASSWORD '%s';" % user_sql_args
psql(user_sql, show=False)
shadowed = "*" * len(pw)
print_command(user_sql.replace("'%s'" % pw, "'%s'" % shadowed))
psql("CREATE DATABASE %s WITH OWNER %s ENCODING = 'UTF8' "
"LC_CTYPE = '%s' LC_COLLATE = '%s' TEMPLATE template0;" %
(env.proj_name, env.proj_name, env.locale, env.locale))
if not env.ssl_disabled:
conf_path = "/etc/nginx/conf"
if not exists(conf_path):
sudo("mkdir %s" % conf_path)
with cd(conf_path):
crt_file = env.proj_name + ".crt"
key_file = env.proj_name + ".key"
if not exists(crt_file) and not exists(key_file):
try:
crt_local, = glob(join("deploy", "*.crt"))
key_local, = glob(join("deploy", "*.key"))
except ValueError:
parts = (crt_file, key_file, env.domains[0])
sudo("openssl req -new -x509 -nodes -out %s -keyout %s "
"-subj '/CN=%s' -days 3650" % parts)
else:
upload_template(crt_local, crt_file, use_sudo=True)
upload_template(key_local, key_file, use_sudo=True)
upload_template_and_reload("settings")
with project():
if env.reqs_path:
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
pip("gunicorn setproctitle south psycopg2 "
"django-compressor python-memcached")
manage("createdb --noinput --nodata")
python("from django.conf import settings;"
"from django.contrib.sites.models import Site;"
"Site.objects.filter(id=settings.SITE_ID).update(domain='%s');"
% env.domains[0])
for domain in env.domains:
python("from django.contrib.sites.models import Site;"
"Site.objects.get_or_create(domain='%s');" % domain)
if env.admin_pass:
pw = env.admin_pass
user_py = ("from mezzanine.utils.models import get_user_model;"
"User = get_user_model();"
"u, _ = User.objects.get_or_create(username='admin');"
"u.is_staff = u.is_superuser = True;"
"u.set_password('%s');"
"u.save();" % pw)
python(user_py, show=False)
shadowed = "*" * len(pw)
print_command(user_py.replace("'%s'" % pw, "'%s'" % shadowed))
return True
@task
@log_call
def remove():
if exists(env.venv_path):
sudo("rm -rf %s" % env.venv_path)
for template in get_templates().values():
remote_path = template["remote_path"]
if exists(remote_path):
sudo("rm %s" % remote_path)
psql("DROP DATABASE IF EXISTS %s;" % env.proj_name)
psql("DROP USER IF EXISTS %s;" % env.proj_name)
= (env.proj_name, env.proj_name)
sudo("supervisorctl start %s:gunicorn_%s" % start_args)
@task
@log_call
def deploy():
if not exists(env.venv_path):
prompt = input("\nVirtualenv doesn't exist: %s"
"\nWould you like to create it? (yes/no) "
% env.proj_name)
if prompt.lower() != "yes":
print("\nAborting!")
return False
create()
for name in get_templates():
upload_template_and_reload(name)
with project():
backup("last.db")
static_dir = static()
if exists(static_dir):
run("tar -cf last.tar %s" % static_dir)
git = env.git
last_commit = "git rev-parse HEAD" if git else "hg id -i"
run("%s > last.commit" % last_commit)
with update_changed_requirements():
run("git pull origin master -f" if git else "hg pull && hg up -C")
manage("collectstatic -v 0 --noinput")
manage("syncdb --noinput")
manage("migrate --noinput")
restart()
return True
@task
@log_call
def rollback():
with project():
with update_changed_requirements():
update = "git checkout" if env.git else "hg up -C"
run("%s `cat last.commit`" % update)
with cd(join(static(), "..")):
run("tar -xf %s" % join(env.proj_path, "last.tar"))
restore("last.db")
restart()
@task
@log_call
def all():
install()
if create():
deploy()
| true
| true
|
1c42c91e8d558d79cc62fc0ff2d24ade178577e2
| 7,537
|
py
|
Python
|
brats/train2d.py
|
vuhoangminh/medical-segmentation
|
4a2a663d1f2d6de5c78bc521f6ed2aa1681a8804
|
[
"MIT"
] | 1
|
2018-12-06T09:17:26.000Z
|
2018-12-06T09:17:26.000Z
|
brats/train2d.py
|
vuhoangminh/medical-segmentation
|
4a2a663d1f2d6de5c78bc521f6ed2aa1681a8804
|
[
"MIT"
] | null | null | null |
brats/train2d.py
|
vuhoangminh/medical-segmentation
|
4a2a663d1f2d6de5c78bc521f6ed2aa1681a8804
|
[
"MIT"
] | 2
|
2019-05-07T10:07:33.000Z
|
2019-05-20T12:50:37.000Z
|
from comet_ml import Experiment
# to compute memory consumption ----------------------------------
# import tensorflow as tf
# from keras.backend.tensorflow_backend import set_session
# config_tf = tf.ConfigProto()
# config_tf.gpu_options.per_process_gpu_memory_fraction = 0.015
# config_tf.gpu_options.visible_device_list = "0"
# set_session(tf.Session(config=config_tf))
# to compute memory consumption ----------------------------------
from brats.config import config, config_unet
from unet3d.utils.print_utils import print_section
from brats.prepare_data import prepare_data
import unet3d.utils.args_utils as get_args
from unet3d.utils.path_utils import get_training_h5_paths
from unet3d.utils.path_utils import get_shape_from_string
from unet3d.utils.path_utils import get_project_dir
from unet3d.training import train_model
from unet2d.model import *
from unet2d.generator import get_training_and_validation_and_testing_generators2d
from unet3d.data import open_data_file
import os
import unet3d.utils.path_utils as path_utils
# os.environ["CUDA_VISIBLE_DEVICES"] = "0" # run on server
# pp = pprint.PrettyPrinter(indent=4)
# # pp.pprint(config)
config.update(config_unet)
# pp.pprint(config)
CURRENT_WORKING_DIR = os.path.realpath(__file__)
PROJECT_DIR = get_project_dir(CURRENT_WORKING_DIR, config["project_name"])
BRATS_DIR = os.path.join(PROJECT_DIR, config["brats_folder"])
DATASET_DIR = os.path.join(PROJECT_DIR, config["dataset_folder"])
def train(args):
data_path, trainids_path, validids_path, testids_path, model_path = get_training_h5_paths(
brats_dir=BRATS_DIR, args=args)
config["data_file"] = data_path
config["model_file"] = model_path
config["training_file"] = trainids_path
config["validation_file"] = validids_path
config["testing_file"] = testids_path
config["patch_shape"] = get_shape_from_string(args.patch_shape)
config["input_shape"] = tuple(
[config["nb_channels"]] + list(config["patch_shape"]))
if "casnet" in args.model:
config["data_type_generator"] = 'cascaded'
elif "sepnet" in args.model:
config["data_type_generator"] = 'separated'
else:
config["data_type_generator"] = 'combined'
if args.overwrite or not os.path.exists(data_path):
prepare_data(args)
print_section("Open file")
data_file_opened = open_data_file(config["data_file"])
print_section("get training and testing generators")
train_generator, validation_generator, n_train_steps, n_validation_steps = get_training_and_validation_and_testing_generators2d(
data_file_opened,
batch_size=args.batch_size,
data_split=config["validation_split"],
overwrite=args.overwrite,
validation_keys_file=config["validation_file"],
training_keys_file=config["training_file"],
testing_keys_file=config["testing_file"],
n_labels=config["n_labels"],
labels=config["labels"],
patch_shape=config["patch_shape"],
validation_batch_size=args.batch_size,
validation_patch_overlap=config["validation_patch_overlap"],
training_patch_start_offset=config["training_patch_start_offset"],
augment_flipud=config["augment_flipud"],
augment_fliplr=config["augment_fliplr"],
augment_elastic=config["augment_elastic"],
augment_rotation=config["augment_rotation"],
augment_shift=config["augment_shift"],
augment_shear=config["augment_shear"],
augment_zoom=config["augment_zoom"],
n_augment=config["n_augment"],
skip_blank=config["skip_blank"],
is_test=args.is_test,
data_type_generator=config["data_type_generator"])
print("-"*60)
print("# Load or init model")
print("-"*60)
config["input_shape"] = config["input_shape"][0:len(
config["input_shape"])-1]
if not args.overwrite and os.path.exists(config["model_file"]):
print("load old model")
from unet3d.utils.model_utils import generate_model
if "casnet" in args.model:
args.loss = "casweighted"
model = generate_model(
config["model_file"], loss_function=args.loss, labels=config["labels"])
else:
# instantiate new model
if args.model == "isensee":
print("init isensee model")
model = isensee2d_model(input_shape=config["input_shape"],
n_labels=config["n_labels"],
initial_learning_rate=config["initial_learning_rate"],
loss_function=args.loss,
labels=config["labels"])
elif args.model == "unet":
print("init unet model")
model = unet_model_2d(input_shape=config["input_shape"],
n_labels=config["n_labels"],
initial_learning_rate=config["initial_learning_rate"],
deconvolution=config["deconvolution"],
depth=args.depth_unet,
n_base_filters=args.n_base_filters_unet,
loss_function=args.loss,
labels=config["labels"])
elif args.model == "segnet":
print("init segnet model")
model = segnet2d(input_shape=config["input_shape"],
n_labels=config["n_labels"],
initial_learning_rate=config["initial_learning_rate"],
depth=args.depth_unet,
n_base_filters=args.n_base_filters_unet,
loss_function=args.loss,
labels=config["labels"])
else:
raise ValueError("Model is NotImplemented. Please check")
model.summary()
print("-"*60)
print("# start training")
print("-"*60)
# run training
if args.is_test == "0":
experiment = Experiment(api_key="AgTGwIoRULRgnfVR5M8mZ5AfS",
project_name="train",
workspace="vuhoangminh")
else:
experiment = None
if args.model == "isensee":
config["initial_learning_rate"] = 1e-6
print(config["initial_learning_rate"], config["learning_rate_drop"])
train_model(experiment=experiment,
model=model,
model_file=config["model_file"],
training_generator=train_generator,
validation_generator=validation_generator,
steps_per_epoch=n_train_steps,
validation_steps=n_validation_steps,
initial_learning_rate=config["initial_learning_rate"],
learning_rate_drop=config["learning_rate_drop"],
learning_rate_patience=config["patience"],
early_stopping_patience=config["early_stop"],
n_epochs=config["n_epochs"]
)
if args.is_test == "0":
experiment.log_parameters(config)
data_file_opened.close()
from keras import backend as K
K.clear_session()
def main():
global config
args = get_args.train2d()
config = path_utils.update_is_augment(args, config)
data_path, _, _, _, _ = path_utils.get_training_h5_paths(BRATS_DIR, args)
if args.overwrite or not os.path.exists(data_path):
prepare_data(args)
train(args)
if __name__ == "__main__":
main()
| 39.051813
| 132
| 0.641104
|
from comet_ml import Experiment
from brats.config import config, config_unet
from unet3d.utils.print_utils import print_section
from brats.prepare_data import prepare_data
import unet3d.utils.args_utils as get_args
from unet3d.utils.path_utils import get_training_h5_paths
from unet3d.utils.path_utils import get_shape_from_string
from unet3d.utils.path_utils import get_project_dir
from unet3d.training import train_model
from unet2d.model import *
from unet2d.generator import get_training_and_validation_and_testing_generators2d
from unet3d.data import open_data_file
import os
import unet3d.utils.path_utils as path_utils
CURRENT_WORKING_DIR = os.path.realpath(__file__)
PROJECT_DIR = get_project_dir(CURRENT_WORKING_DIR, config["project_name"])
BRATS_DIR = os.path.join(PROJECT_DIR, config["brats_folder"])
DATASET_DIR = os.path.join(PROJECT_DIR, config["dataset_folder"])
def train(args):
data_path, trainids_path, validids_path, testids_path, model_path = get_training_h5_paths(
brats_dir=BRATS_DIR, args=args)
config["data_file"] = data_path
config["model_file"] = model_path
config["training_file"] = trainids_path
config["validation_file"] = validids_path
config["testing_file"] = testids_path
config["patch_shape"] = get_shape_from_string(args.patch_shape)
config["input_shape"] = tuple(
[config["nb_channels"]] + list(config["patch_shape"]))
if "casnet" in args.model:
config["data_type_generator"] = 'cascaded'
elif "sepnet" in args.model:
config["data_type_generator"] = 'separated'
else:
config["data_type_generator"] = 'combined'
if args.overwrite or not os.path.exists(data_path):
prepare_data(args)
print_section("Open file")
data_file_opened = open_data_file(config["data_file"])
print_section("get training and testing generators")
train_generator, validation_generator, n_train_steps, n_validation_steps = get_training_and_validation_and_testing_generators2d(
data_file_opened,
batch_size=args.batch_size,
data_split=config["validation_split"],
overwrite=args.overwrite,
validation_keys_file=config["validation_file"],
training_keys_file=config["training_file"],
testing_keys_file=config["testing_file"],
n_labels=config["n_labels"],
labels=config["labels"],
patch_shape=config["patch_shape"],
validation_batch_size=args.batch_size,
validation_patch_overlap=config["validation_patch_overlap"],
training_patch_start_offset=config["training_patch_start_offset"],
augment_flipud=config["augment_flipud"],
augment_fliplr=config["augment_fliplr"],
augment_elastic=config["augment_elastic"],
augment_rotation=config["augment_rotation"],
augment_shift=config["augment_shift"],
augment_shear=config["augment_shear"],
augment_zoom=config["augment_zoom"],
n_augment=config["n_augment"],
skip_blank=config["skip_blank"],
is_test=args.is_test,
data_type_generator=config["data_type_generator"])
print("-"*60)
print("# Load or init model")
print("-"*60)
config["input_shape"] = config["input_shape"][0:len(
config["input_shape"])-1]
if not args.overwrite and os.path.exists(config["model_file"]):
print("load old model")
from unet3d.utils.model_utils import generate_model
if "casnet" in args.model:
args.loss = "casweighted"
model = generate_model(
config["model_file"], loss_function=args.loss, labels=config["labels"])
else:
if args.model == "isensee":
print("init isensee model")
model = isensee2d_model(input_shape=config["input_shape"],
n_labels=config["n_labels"],
initial_learning_rate=config["initial_learning_rate"],
loss_function=args.loss,
labels=config["labels"])
elif args.model == "unet":
print("init unet model")
model = unet_model_2d(input_shape=config["input_shape"],
n_labels=config["n_labels"],
initial_learning_rate=config["initial_learning_rate"],
deconvolution=config["deconvolution"],
depth=args.depth_unet,
n_base_filters=args.n_base_filters_unet,
loss_function=args.loss,
labels=config["labels"])
elif args.model == "segnet":
print("init segnet model")
model = segnet2d(input_shape=config["input_shape"],
n_labels=config["n_labels"],
initial_learning_rate=config["initial_learning_rate"],
depth=args.depth_unet,
n_base_filters=args.n_base_filters_unet,
loss_function=args.loss,
labels=config["labels"])
else:
raise ValueError("Model is NotImplemented. Please check")
model.summary()
print("-"*60)
print("# start training")
print("-"*60)
if args.is_test == "0":
experiment = Experiment(api_key="AgTGwIoRULRgnfVR5M8mZ5AfS",
project_name="train",
workspace="vuhoangminh")
else:
experiment = None
if args.model == "isensee":
config["initial_learning_rate"] = 1e-6
print(config["initial_learning_rate"], config["learning_rate_drop"])
train_model(experiment=experiment,
model=model,
model_file=config["model_file"],
training_generator=train_generator,
validation_generator=validation_generator,
steps_per_epoch=n_train_steps,
validation_steps=n_validation_steps,
initial_learning_rate=config["initial_learning_rate"],
learning_rate_drop=config["learning_rate_drop"],
learning_rate_patience=config["patience"],
early_stopping_patience=config["early_stop"],
n_epochs=config["n_epochs"]
)
if args.is_test == "0":
experiment.log_parameters(config)
data_file_opened.close()
from keras import backend as K
K.clear_session()
def main():
global config
args = get_args.train2d()
config = path_utils.update_is_augment(args, config)
data_path, _, _, _, _ = path_utils.get_training_h5_paths(BRATS_DIR, args)
if args.overwrite or not os.path.exists(data_path):
prepare_data(args)
train(args)
if __name__ == "__main__":
main()
| true
| true
|
1c42cc21158e0c14963552c6317818dfbff51627
| 943
|
py
|
Python
|
stdplugins/ding.py
|
spiderthehacker/PornHub
|
216535af2cf0ae052fe975c28ad37b422c7ef813
|
[
"Apache-2.0"
] | null | null | null |
stdplugins/ding.py
|
spiderthehacker/PornHub
|
216535af2cf0ae052fe975c28ad37b422c7ef813
|
[
"Apache-2.0"
] | null | null | null |
stdplugins/ding.py
|
spiderthehacker/PornHub
|
216535af2cf0ae052fe975c28ad37b422c7ef813
|
[
"Apache-2.0"
] | null | null | null |
"""Emoji
Available Commands:
.ding"""
from telethon import events
import asyncio
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 10)
input_str = event.pattern_match.group(1)
if input_str == "ding":
await event.edit(input_str)
animation_chars = [
"🔴⬛⬛⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬛⬜⬜⬜\n🔴⬜⬜⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬜⬛⬜⬜\n⬜⬜🔴⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬜⬜⬛⬜\n⬜⬜⬜⬜🔴",
"⬜⬜⬛⬛🔴\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬜⬜⬛⬜\n⬜⬜⬜⬜🔴",
"⬜⬜⬛⬜⬜\n⬜⬜⬛⬜⬜\n⬜⬜🔴⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬛⬜⬜⬜\n🔴⬜⬜⬜⬜",
"🔴⬛⬛⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬜⬜⬜⬜⬜\n⬜ [@spider_encrypted] ⬜\n⬜⬜⬜⬜⬜"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 10])
| 17.792453
| 61
| 0.412513
|
from telethon import events
import asyncio
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 10)
input_str = event.pattern_match.group(1)
if input_str == "ding":
await event.edit(input_str)
animation_chars = [
"🔴⬛⬛⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬛⬜⬜⬜\n🔴⬜⬜⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬜⬛⬜⬜\n⬜⬜🔴⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬜⬜⬛⬜\n⬜⬜⬜⬜🔴",
"⬜⬜⬛⬛🔴\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬜⬜⬛⬜\n⬜⬜⬜⬜🔴",
"⬜⬜⬛⬜⬜\n⬜⬜⬛⬜⬜\n⬜⬜🔴⬜⬜",
"⬜⬜⬛⬜⬜\n⬜⬛⬜⬜⬜\n🔴⬜⬜⬜⬜",
"🔴⬛⬛⬜⬜\n⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜",
"⬜⬜⬜⬜⬜\n⬜ [@spider_encrypted] ⬜\n⬜⬜⬜⬜⬜"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 10])
| true
| true
|
1c42cc6a3f8c0764f09509d8cc4221f81b2264eb
| 5,078
|
py
|
Python
|
onlinecourse/models.py
|
jalsop24/django_project
|
40aaa5d82d4b9ad36136d6ca2811002d901895f4
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/models.py
|
jalsop24/django_project
|
40aaa5d82d4b9ad36136d6ca2811002d901895f4
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/models.py
|
jalsop24/django_project
|
40aaa5d82d4b9ad36136d6ca2811002d901895f4
|
[
"Apache-2.0"
] | null | null | null |
import sys
from django.utils.timezone import now
try:
from django.db import models
except Exception:
print("There was an error loading django modules. Do you have django installed?")
sys.exit()
from django.conf import settings
import uuid
# Instructor model
class Instructor(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
full_time = models.BooleanField(default=True)
total_learners = models.IntegerField()
def __str__(self):
return self.user.username
# Learner model
class Learner(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
STUDENT = 'student'
DEVELOPER = 'developer'
DATA_SCIENTIST = 'data_scientist'
DATABASE_ADMIN = 'dba'
OCCUPATION_CHOICES = [
(STUDENT, 'Student'),
(DEVELOPER, 'Developer'),
(DATA_SCIENTIST, 'Data Scientist'),
(DATABASE_ADMIN, 'Database Admin')
]
occupation = models.CharField(
null=False,
max_length=20,
choices=OCCUPATION_CHOICES,
default=STUDENT
)
social_link = models.URLField(max_length=200)
def __str__(self):
return self.user.username + "," + \
self.occupation
# Course model
class Course(models.Model):
name = models.CharField(null=False, max_length=30, default='online course')
image = models.ImageField(upload_to='course_images/')
description = models.CharField(max_length=1000)
pub_date = models.DateField(null=True)
instructors = models.ManyToManyField(Instructor)
users = models.ManyToManyField(settings.AUTH_USER_MODEL, through='Enrollment')
total_enrollment = models.IntegerField(default=0)
is_enrolled = False
def __str__(self):
return "Name: " + self.name + "," + \
"Description: " + self.description
# Lesson model
class Lesson(models.Model):
title = models.CharField(max_length=200, default="title")
order = models.IntegerField(default=0)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
content = models.TextField()
def __str__(self) -> str:
return f"Lesson: '{self.title}'"
# Enrollment model
# <HINT> Once a user enrolled a class, an enrollment entry should be created between the user and course
# And we could use the enrollment to track information such as exam submissions
class Enrollment(models.Model):
AUDIT = 'audit'
HONOR = 'honor'
BETA = 'BETA'
COURSE_MODES = [
(AUDIT, 'Audit'),
(HONOR, 'Honor'),
(BETA, 'BETA')
]
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
date_enrolled = models.DateField(default=now)
mode = models.CharField(max_length=5, choices=COURSE_MODES, default=AUDIT)
rating = models.FloatField(default=5.0)
# <HINT> Create a Question Model with:
# Used to persist question content for a course
# Has a One-To-Many (or Many-To-Many if you want to reuse questions) relationship with course
# Has a grade point for each question
# Has question content
# Other fields and methods you would like to design
class Question(models.Model):
#Foreign key to lesson
#question text
#question grade/mark
question_text = models.TextField()
grade = models.IntegerField(default=1)
lesson_id = models.ForeignKey(Lesson, on_delete=models.CASCADE)
course = models.ManyToManyField(Course)
def __str__(self) -> str:
return f"'{self.question_text}'"
# <HINT> A sample model method to calculate if learner get the score of the question
# def is_get_score(self, selected_ids):
# all_answers = self.choice_set.filter(is_correct=True).count()
# selected_correct = self.choice_set.filter(is_correct=True, id__in=selected_ids).count()
# if all_answers == selected_correct:
# return True
# else:
# return False
# <HINT> Create a Choice Model with:
# Used to persist choice content for a question
# One-To-Many (or Many-To-Many if you want to reuse choices) relationship with Question
# Choice content
# Indicate if this choice of the question is a correct one or not
# Other fields and methods you would like to design
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.TextField()
is_correct = models.BooleanField(default=False)
def __str__(self) -> str:
return f"ID: <{self.pk}> Q: {str(self.question)} A: \"{self.choice_text}\""
# <HINT> The submission model
# One enrollment could have multiple submission
# One submission could have multiple choices
# One choice could belong to multiple submissions
class Submission(models.Model):
enrollment = models.ForeignKey(Enrollment, on_delete=models.CASCADE)
choices = models.ManyToManyField(Choice)
#Other fields and methods you would like to design
| 33.189542
| 104
| 0.690823
|
import sys
from django.utils.timezone import now
try:
from django.db import models
except Exception:
print("There was an error loading django modules. Do you have django installed?")
sys.exit()
from django.conf import settings
import uuid
class Instructor(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
full_time = models.BooleanField(default=True)
total_learners = models.IntegerField()
def __str__(self):
return self.user.username
class Learner(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
STUDENT = 'student'
DEVELOPER = 'developer'
DATA_SCIENTIST = 'data_scientist'
DATABASE_ADMIN = 'dba'
OCCUPATION_CHOICES = [
(STUDENT, 'Student'),
(DEVELOPER, 'Developer'),
(DATA_SCIENTIST, 'Data Scientist'),
(DATABASE_ADMIN, 'Database Admin')
]
occupation = models.CharField(
null=False,
max_length=20,
choices=OCCUPATION_CHOICES,
default=STUDENT
)
social_link = models.URLField(max_length=200)
def __str__(self):
return self.user.username + "," + \
self.occupation
class Course(models.Model):
name = models.CharField(null=False, max_length=30, default='online course')
image = models.ImageField(upload_to='course_images/')
description = models.CharField(max_length=1000)
pub_date = models.DateField(null=True)
instructors = models.ManyToManyField(Instructor)
users = models.ManyToManyField(settings.AUTH_USER_MODEL, through='Enrollment')
total_enrollment = models.IntegerField(default=0)
is_enrolled = False
def __str__(self):
return "Name: " + self.name + "," + \
"Description: " + self.description
class Lesson(models.Model):
title = models.CharField(max_length=200, default="title")
order = models.IntegerField(default=0)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
content = models.TextField()
def __str__(self) -> str:
return f"Lesson: '{self.title}'"
class Enrollment(models.Model):
AUDIT = 'audit'
HONOR = 'honor'
BETA = 'BETA'
COURSE_MODES = [
(AUDIT, 'Audit'),
(HONOR, 'Honor'),
(BETA, 'BETA')
]
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
date_enrolled = models.DateField(default=now)
mode = models.CharField(max_length=5, choices=COURSE_MODES, default=AUDIT)
rating = models.FloatField(default=5.0)
class Question(models.Model):
question_text = models.TextField()
grade = models.IntegerField(default=1)
lesson_id = models.ForeignKey(Lesson, on_delete=models.CASCADE)
course = models.ManyToManyField(Course)
def __str__(self) -> str:
return f"'{self.question_text}'"
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.TextField()
is_correct = models.BooleanField(default=False)
def __str__(self) -> str:
return f"ID: <{self.pk}> Q: {str(self.question)} A: \"{self.choice_text}\""
class Submission(models.Model):
enrollment = models.ForeignKey(Enrollment, on_delete=models.CASCADE)
choices = models.ManyToManyField(Choice)
| true
| true
|
1c42cc84c8ff433444247cd51154d826166f9214
| 4,303
|
py
|
Python
|
classes/asset_content.py
|
CodeWringer/cookbookpy
|
5b9fb44d591154962509aed3a2a7cbbc56ecd130
|
[
"MIT"
] | null | null | null |
classes/asset_content.py
|
CodeWringer/cookbookpy
|
5b9fb44d591154962509aed3a2a7cbbc56ecd130
|
[
"MIT"
] | null | null | null |
classes/asset_content.py
|
CodeWringer/cookbookpy
|
5b9fb44d591154962509aed3a2a7cbbc56ecd130
|
[
"MIT"
] | null | null | null |
from classes.asset import Asset
import os
import utility.io
from utility.io import get_new_ext
from utility.url import get_url
class AssetContent(Asset):
"""Base class for recipe/markdown assets."""
def __init__(self, path):
super().__init__(path)
self.parent = None # A Category object.
self.title = self.get_section('Title')[0]
self.dest_name = os.path.splitext(self.name)[0] + '.html'
self.navigation = None # A Navigation object.
self.see_also = self.get_see_also() # NavigationForPath objects.
print('[Asset] Acquired %s see also entries for %s' % (str(len(self.see_also)), self.name))
def get_see_also(self):
"""Returns a list of NavigationForPath objects for every entry in the see_also section."""
lines = self.get_section('See Also')
see_also = []
for line in lines:
pass
# TODO
# url = NavigationForPath(self, line)
# see_also.append(url)
return see_also
def get_section(self, section_name):
"""Returns all lines, that belong to the section with the given name.
Parameters
---------
section_name : str
Name of the section to get.
"""
print('[Asset] Reading section "%s"' % (section_name))
sectionLines = []
indexSectionStart = -1
indexSectionEnd = -1
# Get start and end index of section
for line in self.text_content:
if (line.startswith('!' + section_name) or
line.startswith('!' + _(section_name))):
indexSectionStart = self.text_content.index(line) + 1
elif line.startswith('!') and indexSectionStart >= 0:
indexSectionEnd = self.text_content.index(line)
break
if indexSectionStart > 0 and indexSectionEnd < 0:
indexSectionEnd = len(self.text_content)
# Get section
for i in range(indexSectionStart, indexSectionEnd):
lineContent = self.text_content[i]
lineContent = lineContent.strip()
# Check if line is empty string
if not lineContent:
continue
sectionLines.append(lineContent)
print('[Asset] Returning %s lines for section "%s"' % (str(len(sectionLines)), section_name))
return sectionLines
def render(self, generator, dest_dir):
"""Renders and writes out this asset."""
# Render self.
rendered = self.get_rendered(generator)
# Write self to destination directory.
dest_file_path = os.path.join(dest_dir, self.dest_name)
utility.io.ensure_dir(dest_dir)
with open(dest_file_path, mode='wb') as outfile:
outfile.write(rendered.encode('utf-8'))
def get_neighbor_next(self):
"""Returns the first next neighbor,
or None, if there isn't one."""
if len(self.navigation.neighbors_next) > 0:
neighbor = self.navigation.neighbors_next[0]
return {
'title': neighbor.title,
'url': get_url(self.path,
get_new_ext(neighbor.path, 'html'))
}
else:
return None
def get_neighbor_prev(self):
"""Returns the first previous neighbor,
or None, if there isn't one."""
if len(self.navigation.neighbors_prev) > 0:
neighbor = self.navigation.neighbors_prev[0]
return {
'title': neighbor.title,
'url': get_url(self.path,
get_new_ext(neighbor.path, 'html'))
}
else:
return None
def get_categories(self, generator):
"""Returns a list of root categories.
Parameters
---------
generator : classes.Generator
Generator object whose root categories to get.
"""
categories = []
for category in generator.root_category.children:
categories.append({ 'name': category.name,
'url': get_url(self.path,
category.file_path) })
return categories
def get_rendered(self, generator):
pass
| 36.466102
| 101
| 0.573553
|
from classes.asset import Asset
import os
import utility.io
from utility.io import get_new_ext
from utility.url import get_url
class AssetContent(Asset):
def __init__(self, path):
super().__init__(path)
self.parent = None
self.title = self.get_section('Title')[0]
self.dest_name = os.path.splitext(self.name)[0] + '.html'
self.navigation = None
self.see_also = self.get_see_also()
print('[Asset] Acquired %s see also entries for %s' % (str(len(self.see_also)), self.name))
def get_see_also(self):
lines = self.get_section('See Also')
see_also = []
for line in lines:
pass
return see_also
def get_section(self, section_name):
print('[Asset] Reading section "%s"' % (section_name))
sectionLines = []
indexSectionStart = -1
indexSectionEnd = -1
for line in self.text_content:
if (line.startswith('!' + section_name) or
line.startswith('!' + _(section_name))):
indexSectionStart = self.text_content.index(line) + 1
elif line.startswith('!') and indexSectionStart >= 0:
indexSectionEnd = self.text_content.index(line)
break
if indexSectionStart > 0 and indexSectionEnd < 0:
indexSectionEnd = len(self.text_content)
for i in range(indexSectionStart, indexSectionEnd):
lineContent = self.text_content[i]
lineContent = lineContent.strip()
if not lineContent:
continue
sectionLines.append(lineContent)
print('[Asset] Returning %s lines for section "%s"' % (str(len(sectionLines)), section_name))
return sectionLines
def render(self, generator, dest_dir):
rendered = self.get_rendered(generator)
dest_file_path = os.path.join(dest_dir, self.dest_name)
utility.io.ensure_dir(dest_dir)
with open(dest_file_path, mode='wb') as outfile:
outfile.write(rendered.encode('utf-8'))
def get_neighbor_next(self):
if len(self.navigation.neighbors_next) > 0:
neighbor = self.navigation.neighbors_next[0]
return {
'title': neighbor.title,
'url': get_url(self.path,
get_new_ext(neighbor.path, 'html'))
}
else:
return None
def get_neighbor_prev(self):
if len(self.navigation.neighbors_prev) > 0:
neighbor = self.navigation.neighbors_prev[0]
return {
'title': neighbor.title,
'url': get_url(self.path,
get_new_ext(neighbor.path, 'html'))
}
else:
return None
def get_categories(self, generator):
categories = []
for category in generator.root_category.children:
categories.append({ 'name': category.name,
'url': get_url(self.path,
category.file_path) })
return categories
def get_rendered(self, generator):
pass
| true
| true
|
1c42ccedd8ff09adddcc1cfdc255863184c47ec2
| 20,881
|
py
|
Python
|
tests/core/test_TransactionPool.py
|
pur-token/pur-core
|
ce372be274262a839c45436dfee58ba4ea105074
|
[
"MIT"
] | null | null | null |
tests/core/test_TransactionPool.py
|
pur-token/pur-core
|
ce372be274262a839c45436dfee58ba4ea105074
|
[
"MIT"
] | null | null | null |
tests/core/test_TransactionPool.py
|
pur-token/pur-core
|
ce372be274262a839c45436dfee58ba4ea105074
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from unittest import TestCase
from mock import Mock, patch
from pur.core.OptimizedAddressState import OptimizedAddressState
from pur.core.Block import Block
from pur.core.State import State
from pur.core.ChainManager import ChainManager
from pur.core.txs.CoinBase import CoinBase
from pur.core.txs.TransferTransaction import TransferTransaction
from pur.core.TransactionPool import TransactionPool
from tests.misc.helper import replacement_getTime, set_pur_dir, get_alice_purss, get_bob_purss
from tests.misc.MockHelper.mock_function import MockFunction
def make_tx(txhash=b'hashbrownies', fee=1, autospec=TransferTransaction, PK=b'publickey', **kwargs):
return Mock(autospec=autospec, txhash=txhash, fee=fee, PK=PK, **kwargs)
def replacement_from_pbdata(protobuf_tx):
return protobuf_tx
@patch('pur.core.misc.ntp.getTime', new=replacement_getTime)
class TestTransactionPool(TestCase):
"""
TransactionPool sits between incoming Transactions from the network and Blocks.
First, incoming Transactions are pending Transactions and go into TransactionPool.pending_tx_pool.
The TxnProcessor has to validate them. Once they are validated, the TxnProcessor puts them into
TransactionPool.transaction_pool, where they wait to be put into the next mined Block.
"""
def setUp(self):
self.txpool = TransactionPool(None)
def test_add_tx_to_pool(self):
tx = make_tx()
result = self.txpool.add_tx_to_pool(tx, 1, replacement_getTime())
self.assertTrue(result)
self.assertEqual(len(self.txpool.transactions), 1)
@patch('pur.core.TransactionPool.TransactionPool.is_full_transaction_pool', autospec=True)
def test_add_tx_to_pool_while_full(self, m_is_full_func):
m_is_full_func.return_value = True
tx = make_tx()
result = self.txpool.add_tx_to_pool(tx, 1, replacement_getTime())
self.assertFalse(result) # refused to add to the pool
self.assertEqual(len(self.txpool.transactions), 0) # remains untouched
@patch('pur.core.TransactionPool.config', autospec=True)
def test_is_full_transaction_pool(self, m_config):
m_config.user.transaction_pool_size = 2
result = self.txpool.is_full_transaction_pool()
self.assertFalse(result)
tx1 = make_tx(fee=1)
tx2 = make_tx(fee=2)
self.txpool.add_tx_to_pool(tx1, 1, replacement_getTime())
self.txpool.add_tx_to_pool(tx2, 1, replacement_getTime())
result = self.txpool.is_full_transaction_pool()
self.assertTrue(result)
def test_get_tx_index_from_pool(self):
tx1 = make_tx(txhash=b'red')
tx2 = make_tx(txhash=b'blue')
tx3 = make_tx(txhash=b'purpink')
self.txpool.add_tx_to_pool(tx1, 1, replacement_getTime())
self.txpool.add_tx_to_pool(tx2, 1, replacement_getTime())
self.txpool.add_tx_to_pool(tx3, 1, replacement_getTime())
idx = self.txpool.get_tx_index_from_pool(b'purpink')
self.assertEqual(idx, 2)
idx = self.txpool.get_tx_index_from_pool(b'red')
self.assertEqual(idx, 0)
idx = self.txpool.get_tx_index_from_pool(b'ultraviolet')
self.assertEqual(idx, -1)
def test_remove_tx_from_pool(self):
tx1 = make_tx(txhash=b'red')
tx2 = make_tx(txhash=b'blue')
tx3 = make_tx(txhash=b'purpink')
self.txpool.add_tx_to_pool(tx1, 1, replacement_getTime())
# If we try to remove a tx that wasn't there, the transaction pool should be untouched
self.assertEqual(len(self.txpool.transaction_pool), 1)
self.txpool.remove_tx_from_pool(tx2)
self.assertEqual(len(self.txpool.transaction_pool), 1)
# Now let's remove a tx from the heap. The size should decrease.
self.txpool.add_tx_to_pool(tx2, 1, replacement_getTime())
self.txpool.add_tx_to_pool(tx3, 1, replacement_getTime())
self.assertEqual(len(self.txpool.transaction_pool), 3)
self.txpool.remove_tx_from_pool(tx2)
self.assertEqual(len(self.txpool.transaction_pool), 2)
@patch('pur.core.TransactionPool.TransactionPool.is_full_pending_transaction_pool', autospec=True)
def test_update_pending_tx_pool(self, m_is_full_pending_transaction_pool):
tx1 = make_tx()
ip = '127.0.0.1'
m_is_full_pending_transaction_pool.return_value = False
# Due to the straightforward way the function is written, no special setup is needed to get the tx to go in.
result = self.txpool.update_pending_tx_pool(tx1, ip)
self.assertTrue(result)
# If we try to re-add the same tx to the pending_tx_pool, though, it should fail.
result = self.txpool.update_pending_tx_pool(tx1, ip)
self.assertFalse(result)
@patch('pur.core.TransactionPool.TransactionPool.is_full_pending_transaction_pool', autospec=True)
def test_update_pending_tx_pool_tx_already_validated(self, m_is_full_pending_transaction_pool):
"""
If the tx is already in TransactionPool.transaction_pool, do not add it to pending_tx_pool.
"""
tx1 = make_tx()
ip = '127.0.0.1'
m_is_full_pending_transaction_pool.return_value = False
self.txpool.add_tx_to_pool(tx1, 1, replacement_getTime())
result = self.txpool.update_pending_tx_pool(tx1, ip)
self.assertFalse(result)
@patch('pur.core.TransactionPool.TransactionPool.is_full_pending_transaction_pool', autospec=True)
def test_update_pending_tx_pool_is_full_already(self, m_is_full_pending_transaction_pool):
tx1 = make_tx()
ip = '127.0.0.1'
m_is_full_pending_transaction_pool.return_value = True
result = self.txpool.update_pending_tx_pool(tx1, ip)
self.assertFalse(result)
@patch('pur.core.TransactionPool.logger')
@patch('pur.core.TransactionPool.TransactionPool.is_full_pending_transaction_pool', autospec=True)
def test_update_pending_tx_pool_rejects_coinbase_txs(self, m_is_full_pending_transaction_pool, m_logger):
tx1 = CoinBase()
ip = '127.0.0.1'
m_is_full_pending_transaction_pool.return_value = False
result = self.txpool.update_pending_tx_pool(tx1, ip)
self.assertFalse(result)
@patch('pur.core.TransactionPool.config', autospec=True)
def test_is_full_pending_transaction_pool(self, m_config):
"""
pending_transaction_pool_size is 3, and pending_transaction_pool_reserve is subtracted out of that, so it's 2.
Trying to add in 3 transactions with ignore_reserve=True will fail, but if ignore_reserve=False, it will go in.
However, after that, adding even more transactions will always fail.
"""
m_config.user.pending_transaction_pool_size = 3
m_config.user.pending_transaction_pool_reserve = 1
tx4 = make_tx(txhash=b'red')
tx1 = make_tx(txhash=b'green')
tx3 = make_tx(txhash=b'blue')
tx2 = make_tx(txhash=b'pink')
ip = '127.0.0.1'
self.txpool.update_pending_tx_pool(tx1, ip)
self.txpool.update_pending_tx_pool(tx2, ip)
result = self.txpool.update_pending_tx_pool(tx3, ip, ignore_reserve=True)
self.assertFalse(result)
result = self.txpool.update_pending_tx_pool(tx3, ip, ignore_reserve=False)
self.assertTrue(result)
result = self.txpool.update_pending_tx_pool(tx4, ip, ignore_reserve=True)
self.assertFalse(result)
result = self.txpool.update_pending_tx_pool(tx4, ip, ignore_reserve=False)
self.assertFalse(result)
@patch('pur.core.misc.ntp.getTime', new=replacement_getTime)
def test_get_pending_transaction(self):
"""
Getting a pending transaction also removes it from the TransactionPool.
Because it may return a single None, or two variables, a funny hack is used in TxnProcessor where the return
from this function is stored in one variable then unpacked later if it is not None.
"""
tx1 = make_tx()
ip = '127.0.0.1'
self.txpool.update_pending_tx_pool(tx1, ip)
self.assertEqual(len(self.txpool.pending_tx_pool_hash), 1)
tx_timestamp = self.txpool.get_pending_transaction()
self.assertEqual(tx_timestamp[0], tx1)
self.assertEqual(len(self.txpool.pending_tx_pool_hash), 0)
tx_timestamp = self.txpool.get_pending_transaction()
self.assertIsNone(tx_timestamp)
@patch('pur.core.TransactionPool.logger')
@patch('pur.core.txs.Transaction.Transaction.from_pbdata', return_value=make_tx())
@patch('pur.core.TransactionPool.TransactionPool.add_tx_to_pool', return_value=True)
def test_add_tx_from_block_to_pool(self, m_add_tx_to_pool, m_from_pbdata, m_logger):
m_block = Mock(autospec=Block, block_number=5, headerhash=b'test block header')
m_block.transactions = [CoinBase(), make_tx(), make_tx()]
self.txpool.add_tx_from_block_to_pool(m_block, 5)
self.assertEqual(m_add_tx_to_pool.call_count, 2) # 2 because the function ignores the Coinbase tx
# If there is a problem adding to the tx_pool, the logger should be invoked.
m_add_tx_to_pool.return_value = False
self.txpool.add_tx_from_block_to_pool(m_block, 5)
m_logger.warning.assert_called()
@patch('pur.core.txs.Transaction.Transaction.from_pbdata', new=replacement_from_pbdata)
def test_remove_tx_in_block_from_pool(self):
m_block = Mock(autospec=Block)
tx1 = make_tx(name='Mock TX 1', ots_key=1, PK=b'pk')
tx2 = make_tx(name='Mock TX 2', ots_key=2, PK=b'pk')
m_block.transactions = [CoinBase(), tx1, tx2]
# To remove the tx from the pool we have to add it first!
self.txpool.add_tx_to_pool(tx1, 5)
self.txpool.add_tx_to_pool(tx2, 5)
self.assertEqual(len(self.txpool.transaction_pool), 2)
self.txpool.remove_tx_in_block_from_pool(m_block)
self.assertEqual(len(self.txpool.transaction_pool), 0)
@patch('pur.core.TransactionInfo.config', autospec=True)
@patch('pur.core.TransactionPool.TransactionPool.is_full_transaction_pool', return_value=False)
def test_check_stale_txn(self, m_is_full_transaction_pool, m_config):
"""
Stale Transactions are Transactions that were supposed to go into block 5, but for some reason didn't make it.
They languish in TransactionPool until check_stale_txn() checks the Pool and updates the tx_info to make them
go into a higher block.
For each stale transaction, P2PFactory.broadcast_tx() will be called.
"""
# Redefine at what point should txs be considered stale
m_config.user.stale_transaction_threshold = 2
bob_purss = get_bob_purss(4)
alice_purss = get_alice_purss(4)
tx1 = TransferTransaction.create(addrs_to=[bob_purss.address], amounts=[1000000],
message_data=None, fee=1, purss_pk=alice_purss.pk)
tx1.sign(alice_purss)
tx2 = TransferTransaction.create(addrs_to=[bob_purss.address], amounts=[10000],
message_data=None, fee=1, purss_pk=alice_purss.pk)
tx2.sign(alice_purss)
m_broadcast_tx = Mock(name='Mock Broadcast TX function (in P2PFactory)')
self.txpool.add_tx_to_pool(tx1, 5)
self.txpool.add_tx_to_pool(tx2, 5)
self.txpool.set_broadcast_tx(m_broadcast_tx)
with set_pur_dir('no_data'):
state = State()
chain_manager = ChainManager(state)
self.txpool.check_stale_txn(chain_manager.new_state_container, chain_manager.update_state_container, 8)
self.assertEqual(m_broadcast_tx.call_count, 0)
m = MockFunction()
bob_address_state = OptimizedAddressState.get_default(bob_purss.address)
bob_address_state.pbdata.balance = 1000000000000
m.put(bob_purss.address, bob_address_state)
chain_manager.get_optimized_address_state = m.get
tx3 = TransferTransaction.create(addrs_to=[alice_purss.address], amounts=[10000],
message_data=None, fee=1, purss_pk=bob_purss.pk)
tx3.sign(bob_purss)
self.txpool.add_tx_to_pool(tx3, 5)
self.txpool.check_stale_txn(chain_manager.new_state_container, chain_manager.update_state_container, 8)
self.assertEqual(m_broadcast_tx.call_count, 1)
@patch('pur.core.misc.ntp.getTime', new=replacement_getTime)
class TestTransactionPoolRemoveTxInBlockFromPool(TestCase):
"""
Up until 4096 (max_ots_tracking_index), the state of each OTS index USED/UNUSED is stored in a bitfield.
Default height of wallet is 12, so 2^12 = 4096 obviously
Above that however, the network only keeps track of the last used OTS index as a number. So the next
tx.ots_index must be 4096 < ots_index < network_ots_index_counter (AddressState.ots_counter).
Suppose you have a Block with two Transactions from the same public address in it, with ots_index=4098 and 4099.
If TransactionPool has 4097, it should be invalidated because 4098 is already used and we are on an counter
method of keeping track of OTS indexes.
Of course, 4098 and 4099 also have to be deleted.
"""
@patch('pur.core.misc.ntp.getTime', new=replacement_getTime)
def setUp(self):
self.txpool = TransactionPool(None)
self.tx_3907 = make_tx(name='Mock TX 3907', txhash=b'h3907', ots_key=3907)
self.tx_4095 = make_tx(name='Mock TX 4095', txhash=b'h4095', ots_key=4095)
self.tx_4096 = make_tx(name='Mock TX 4096', txhash=b'h4096', ots_key=4096)
self.tx_4097 = make_tx(name='Mock TX 4097', txhash=b'h4097', ots_key=4097)
self.tx_4098 = make_tx(name='Mock TX 4098', txhash=b'h4098', ots_key=4098)
self.tx_4099 = make_tx(name='Mock TX 4099', txhash=b'h4099', ots_key=4099)
self.tx_4100 = make_tx(name='Mock TX 4100', txhash=b'h4100', ots_key=4100)
self.tx_4200 = make_tx(name='Mock TX 4200', txhash=b'h4200', ots_key=4200)
# To remove the tx from the pool we have to add it first!
self.txpool.add_tx_to_pool(self.tx_4095, 5)
self.txpool.add_tx_to_pool(self.tx_4096, 5)
self.txpool.add_tx_to_pool(self.tx_4097, 5)
self.txpool.add_tx_to_pool(self.tx_4098, 5)
self.txpool.add_tx_to_pool(self.tx_4099, 5)
self.txpool.add_tx_to_pool(self.tx_4100, 5)
self.txpool.add_tx_to_pool(self.tx_4200, 5)
@patch('pur.core.TransactionPool.config', autospec=True)
@patch('pur.core.TransactionPool.TransactionPool.is_full_transaction_pool', return_value=False)
@patch('pur.core.txs.Transaction.Transaction.from_pbdata', new=replacement_from_pbdata)
def test_block_4098_4099(self, m_is_full_transaction_pool, m_config):
"""
TxPool = [4095-4100, 4200]
Block = [4098, 4099]
TxPool Afterwards = [4095, 4100, 4200]
"""
# Ensure that a "large OTS index" is 4096
m_config.dev.max_ots_tracking_index = 4096
m_block = Mock(autospec=Block)
m_block.transactions = [CoinBase(), self.tx_4098, self.tx_4099]
self.txpool.remove_tx_in_block_from_pool(m_block)
txs_in_txpool = [t[1].transaction for t in self.txpool.transaction_pool]
self.assertEqual(len(self.txpool.transaction_pool), 3)
self.assertNotIn(self.tx_4097, txs_in_txpool)
self.assertNotIn(self.tx_4098, txs_in_txpool)
self.assertNotIn(self.tx_4099, txs_in_txpool)
self.assertIn(self.tx_4095, txs_in_txpool)
self.assertIn(self.tx_4100, txs_in_txpool)
self.assertIn(self.tx_4200, txs_in_txpool)
@patch('pur.core.TransactionPool.config', autospec=True)
@patch('pur.core.TransactionPool.TransactionPool.is_full_transaction_pool', return_value=False)
@patch('pur.core.txs.Transaction.Transaction.from_pbdata', new=replacement_from_pbdata)
def test_txpool_3907_block_4098_4099(self, m_is_full_transaction_pool, m_config):
"""
TxPool = [3907, 4095-4100, 4200]
Block = [4098, 4099]
TxPool Afterwards = [3907, 4095, 4100, 4200]
"""
# Ensure that a "large OTS index" is 4096
m_config.dev.max_ots_tracking_index = 4096
m_block = Mock(autospec=Block)
m_block.transactions = [CoinBase(), self.tx_4098, self.tx_4099]
self.txpool.add_tx_to_pool(self.tx_3907, 5)
self.txpool.remove_tx_in_block_from_pool(m_block)
txs_in_txpool = [t[1].transaction for t in self.txpool.transaction_pool]
# 3907 should also be in the Pool since it is exempt from the counter
self.assertEqual(len(self.txpool.transaction_pool), 4)
self.assertNotIn(self.tx_4097, txs_in_txpool)
self.assertNotIn(self.tx_4098, txs_in_txpool)
self.assertNotIn(self.tx_4099, txs_in_txpool)
self.assertIn(self.tx_3907, txs_in_txpool)
self.assertIn(self.tx_4095, txs_in_txpool)
self.assertIn(self.tx_4100, txs_in_txpool)
self.assertIn(self.tx_4200, txs_in_txpool)
@patch('pur.core.TransactionPool.config', autospec=True)
@patch('pur.core.TransactionPool.TransactionPool.is_full_transaction_pool', return_value=False)
@patch('pur.core.txs.Transaction.Transaction.from_pbdata', new=replacement_from_pbdata)
def test_block_4200(self, m_is_full_transaction_pool, m_config):
"""
TxPool = [3907, 4095-4100, 4200]
Block = [4200]
TxPool Afterwards = [3907, 4095]
"""
# Ensure that a "large OTS index" is 4096
m_config.dev.max_ots_tracking_index = 4096
m_block = Mock(autospec=Block)
m_block.transactions = [CoinBase(), self.tx_4200]
self.txpool.add_tx_to_pool(self.tx_3907, 5)
self.txpool.remove_tx_in_block_from_pool(m_block)
txs_in_txpool = [t[1].transaction for t in self.txpool.transaction_pool]
self.assertEqual(len(self.txpool.transaction_pool), 2)
self.assertIn(self.tx_3907, txs_in_txpool)
self.assertIn(self.tx_4095, txs_in_txpool)
@patch('pur.core.TransactionPool.config', autospec=True)
@patch('pur.core.TransactionPool.TransactionPool.is_full_transaction_pool', return_value=False)
@patch('pur.core.txs.Transaction.Transaction.from_pbdata', new=replacement_from_pbdata)
def test_txpool_4095_4096_4097_otherppl_block_4098_4099(self, m_is_full_transaction_pool, m_config):
"""
TxPool = [4096-4100, 4200, 4095-4097_otherppl]
Block = [4200]
TxPool Afterwards = [4095, 4095-4097_otherppl]
"""
# Ensure that a "large OTS index" is 4096
m_config.dev.max_ots_tracking_index = 4096
m_block = Mock(autospec=Block)
m_block.transactions = [CoinBase(), self.tx_4200]
tx_other_4095 = make_tx(name='Mock TX 4095', txhash=b'h4095_other', ots_key=4095, PK='otherppl')
tx_other_4096 = make_tx(name='Mock TX 4096', txhash=b'h4096_other', ots_key=4096, PK='otherppl')
tx_other_4097 = make_tx(name='Mock TX 4097', txhash=b'h4097_other', ots_key=4097, PK='otherppl')
self.txpool.add_tx_to_pool(tx_other_4095, 5)
self.txpool.add_tx_to_pool(tx_other_4096, 5)
self.txpool.add_tx_to_pool(tx_other_4097, 5)
self.txpool.remove_tx_in_block_from_pool(m_block)
txs_in_txpool = [t[1].transaction for t in self.txpool.transaction_pool]
self.assertEqual(len(self.txpool.transaction_pool), 4)
self.assertIn(self.tx_4095, txs_in_txpool)
self.assertIn(tx_other_4095, txs_in_txpool)
self.assertIn(tx_other_4096, txs_in_txpool)
self.assertIn(tx_other_4097, txs_in_txpool)
@patch('pur.core.TransactionPool.config', autospec=True)
@patch('pur.core.TransactionPool.TransactionPool.is_full_transaction_pool', return_value=False)
@patch('pur.core.txs.Transaction.Transaction.from_pbdata', new=replacement_from_pbdata)
def test_block_1000(self, m_is_full_transaction_pool, m_config):
"""
TxPool = [4095-4100, 4200]
Block = [1000]
TxPool Afterwards = [4095-4100, 4200]
"""
# Ensure that a "large OTS index" is 4096
m_config.dev.max_ots_tracking_index = 4096
tx_1000 = make_tx(name='Mock TX 1000', txhash=b'h1000', ots_key=1000)
m_block = Mock(autospec=Block)
m_block.transactions = [CoinBase(), tx_1000]
self.assertEqual(7, len(self.txpool.transaction_pool))
self.txpool.remove_tx_in_block_from_pool(m_block)
txs_in_txpool = [t[1].transaction for t in self.txpool.transaction_pool]
self.assertEqual(7, len(txs_in_txpool))
| 46.299335
| 119
| 0.704516
|
from unittest import TestCase
from mock import Mock, patch
from pur.core.OptimizedAddressState import OptimizedAddressState
from pur.core.Block import Block
from pur.core.State import State
from pur.core.ChainManager import ChainManager
from pur.core.txs.CoinBase import CoinBase
from pur.core.txs.TransferTransaction import TransferTransaction
from pur.core.TransactionPool import TransactionPool
from tests.misc.helper import replacement_getTime, set_pur_dir, get_alice_purss, get_bob_purss
from tests.misc.MockHelper.mock_function import MockFunction
def make_tx(txhash=b'hashbrownies', fee=1, autospec=TransferTransaction, PK=b'publickey', **kwargs):
return Mock(autospec=autospec, txhash=txhash, fee=fee, PK=PK, **kwargs)
def replacement_from_pbdata(protobuf_tx):
return protobuf_tx
@patch('pur.core.misc.ntp.getTime', new=replacement_getTime)
class TestTransactionPool(TestCase):
def setUp(self):
self.txpool = TransactionPool(None)
def test_add_tx_to_pool(self):
tx = make_tx()
result = self.txpool.add_tx_to_pool(tx, 1, replacement_getTime())
self.assertTrue(result)
self.assertEqual(len(self.txpool.transactions), 1)
@patch('pur.core.TransactionPool.TransactionPool.is_full_transaction_pool', autospec=True)
def test_add_tx_to_pool_while_full(self, m_is_full_func):
m_is_full_func.return_value = True
tx = make_tx()
result = self.txpool.add_tx_to_pool(tx, 1, replacement_getTime())
self.assertFalse(result)
self.assertEqual(len(self.txpool.transactions), 0)
@patch('pur.core.TransactionPool.config', autospec=True)
def test_is_full_transaction_pool(self, m_config):
m_config.user.transaction_pool_size = 2
result = self.txpool.is_full_transaction_pool()
self.assertFalse(result)
tx1 = make_tx(fee=1)
tx2 = make_tx(fee=2)
self.txpool.add_tx_to_pool(tx1, 1, replacement_getTime())
self.txpool.add_tx_to_pool(tx2, 1, replacement_getTime())
result = self.txpool.is_full_transaction_pool()
self.assertTrue(result)
def test_get_tx_index_from_pool(self):
tx1 = make_tx(txhash=b'red')
tx2 = make_tx(txhash=b'blue')
tx3 = make_tx(txhash=b'purpink')
self.txpool.add_tx_to_pool(tx1, 1, replacement_getTime())
self.txpool.add_tx_to_pool(tx2, 1, replacement_getTime())
self.txpool.add_tx_to_pool(tx3, 1, replacement_getTime())
idx = self.txpool.get_tx_index_from_pool(b'purpink')
self.assertEqual(idx, 2)
idx = self.txpool.get_tx_index_from_pool(b'red')
self.assertEqual(idx, 0)
idx = self.txpool.get_tx_index_from_pool(b'ultraviolet')
self.assertEqual(idx, -1)
def test_remove_tx_from_pool(self):
tx1 = make_tx(txhash=b'red')
tx2 = make_tx(txhash=b'blue')
tx3 = make_tx(txhash=b'purpink')
self.txpool.add_tx_to_pool(tx1, 1, replacement_getTime())
self.assertEqual(len(self.txpool.transaction_pool), 1)
self.txpool.remove_tx_from_pool(tx2)
self.assertEqual(len(self.txpool.transaction_pool), 1)
# Now let's remove a tx from the heap. The size should decrease.
self.txpool.add_tx_to_pool(tx2, 1, replacement_getTime())
self.txpool.add_tx_to_pool(tx3, 1, replacement_getTime())
self.assertEqual(len(self.txpool.transaction_pool), 3)
self.txpool.remove_tx_from_pool(tx2)
self.assertEqual(len(self.txpool.transaction_pool), 2)
@patch('pur.core.TransactionPool.TransactionPool.is_full_pending_transaction_pool', autospec=True)
def test_update_pending_tx_pool(self, m_is_full_pending_transaction_pool):
tx1 = make_tx()
ip = '127.0.0.1'
m_is_full_pending_transaction_pool.return_value = False
result = self.txpool.update_pending_tx_pool(tx1, ip)
self.assertTrue(result)
result = self.txpool.update_pending_tx_pool(tx1, ip)
self.assertFalse(result)
@patch('pur.core.TransactionPool.TransactionPool.is_full_pending_transaction_pool', autospec=True)
def test_update_pending_tx_pool_tx_already_validated(self, m_is_full_pending_transaction_pool):
tx1 = make_tx()
ip = '127.0.0.1'
m_is_full_pending_transaction_pool.return_value = False
self.txpool.add_tx_to_pool(tx1, 1, replacement_getTime())
result = self.txpool.update_pending_tx_pool(tx1, ip)
self.assertFalse(result)
@patch('pur.core.TransactionPool.TransactionPool.is_full_pending_transaction_pool', autospec=True)
def test_update_pending_tx_pool_is_full_already(self, m_is_full_pending_transaction_pool):
tx1 = make_tx()
ip = '127.0.0.1'
m_is_full_pending_transaction_pool.return_value = True
result = self.txpool.update_pending_tx_pool(tx1, ip)
self.assertFalse(result)
@patch('pur.core.TransactionPool.logger')
@patch('pur.core.TransactionPool.TransactionPool.is_full_pending_transaction_pool', autospec=True)
def test_update_pending_tx_pool_rejects_coinbase_txs(self, m_is_full_pending_transaction_pool, m_logger):
tx1 = CoinBase()
ip = '127.0.0.1'
m_is_full_pending_transaction_pool.return_value = False
result = self.txpool.update_pending_tx_pool(tx1, ip)
self.assertFalse(result)
@patch('pur.core.TransactionPool.config', autospec=True)
def test_is_full_pending_transaction_pool(self, m_config):
m_config.user.pending_transaction_pool_size = 3
m_config.user.pending_transaction_pool_reserve = 1
tx4 = make_tx(txhash=b'red')
tx1 = make_tx(txhash=b'green')
tx3 = make_tx(txhash=b'blue')
tx2 = make_tx(txhash=b'pink')
ip = '127.0.0.1'
self.txpool.update_pending_tx_pool(tx1, ip)
self.txpool.update_pending_tx_pool(tx2, ip)
result = self.txpool.update_pending_tx_pool(tx3, ip, ignore_reserve=True)
self.assertFalse(result)
result = self.txpool.update_pending_tx_pool(tx3, ip, ignore_reserve=False)
self.assertTrue(result)
result = self.txpool.update_pending_tx_pool(tx4, ip, ignore_reserve=True)
self.assertFalse(result)
result = self.txpool.update_pending_tx_pool(tx4, ip, ignore_reserve=False)
self.assertFalse(result)
@patch('pur.core.misc.ntp.getTime', new=replacement_getTime)
def test_get_pending_transaction(self):
tx1 = make_tx()
ip = '127.0.0.1'
self.txpool.update_pending_tx_pool(tx1, ip)
self.assertEqual(len(self.txpool.pending_tx_pool_hash), 1)
tx_timestamp = self.txpool.get_pending_transaction()
self.assertEqual(tx_timestamp[0], tx1)
self.assertEqual(len(self.txpool.pending_tx_pool_hash), 0)
tx_timestamp = self.txpool.get_pending_transaction()
self.assertIsNone(tx_timestamp)
@patch('pur.core.TransactionPool.logger')
@patch('pur.core.txs.Transaction.Transaction.from_pbdata', return_value=make_tx())
@patch('pur.core.TransactionPool.TransactionPool.add_tx_to_pool', return_value=True)
def test_add_tx_from_block_to_pool(self, m_add_tx_to_pool, m_from_pbdata, m_logger):
m_block = Mock(autospec=Block, block_number=5, headerhash=b'test block header')
m_block.transactions = [CoinBase(), make_tx(), make_tx()]
self.txpool.add_tx_from_block_to_pool(m_block, 5)
self.assertEqual(m_add_tx_to_pool.call_count, 2)
m_add_tx_to_pool.return_value = False
self.txpool.add_tx_from_block_to_pool(m_block, 5)
m_logger.warning.assert_called()
@patch('pur.core.txs.Transaction.Transaction.from_pbdata', new=replacement_from_pbdata)
def test_remove_tx_in_block_from_pool(self):
m_block = Mock(autospec=Block)
tx1 = make_tx(name='Mock TX 1', ots_key=1, PK=b'pk')
tx2 = make_tx(name='Mock TX 2', ots_key=2, PK=b'pk')
m_block.transactions = [CoinBase(), tx1, tx2]
self.txpool.add_tx_to_pool(tx1, 5)
self.txpool.add_tx_to_pool(tx2, 5)
self.assertEqual(len(self.txpool.transaction_pool), 2)
self.txpool.remove_tx_in_block_from_pool(m_block)
self.assertEqual(len(self.txpool.transaction_pool), 0)
@patch('pur.core.TransactionInfo.config', autospec=True)
@patch('pur.core.TransactionPool.TransactionPool.is_full_transaction_pool', return_value=False)
def test_check_stale_txn(self, m_is_full_transaction_pool, m_config):
m_config.user.stale_transaction_threshold = 2
bob_purss = get_bob_purss(4)
alice_purss = get_alice_purss(4)
tx1 = TransferTransaction.create(addrs_to=[bob_purss.address], amounts=[1000000],
message_data=None, fee=1, purss_pk=alice_purss.pk)
tx1.sign(alice_purss)
tx2 = TransferTransaction.create(addrs_to=[bob_purss.address], amounts=[10000],
message_data=None, fee=1, purss_pk=alice_purss.pk)
tx2.sign(alice_purss)
m_broadcast_tx = Mock(name='Mock Broadcast TX function (in P2PFactory)')
self.txpool.add_tx_to_pool(tx1, 5)
self.txpool.add_tx_to_pool(tx2, 5)
self.txpool.set_broadcast_tx(m_broadcast_tx)
with set_pur_dir('no_data'):
state = State()
chain_manager = ChainManager(state)
self.txpool.check_stale_txn(chain_manager.new_state_container, chain_manager.update_state_container, 8)
self.assertEqual(m_broadcast_tx.call_count, 0)
m = MockFunction()
bob_address_state = OptimizedAddressState.get_default(bob_purss.address)
bob_address_state.pbdata.balance = 1000000000000
m.put(bob_purss.address, bob_address_state)
chain_manager.get_optimized_address_state = m.get
tx3 = TransferTransaction.create(addrs_to=[alice_purss.address], amounts=[10000],
message_data=None, fee=1, purss_pk=bob_purss.pk)
tx3.sign(bob_purss)
self.txpool.add_tx_to_pool(tx3, 5)
self.txpool.check_stale_txn(chain_manager.new_state_container, chain_manager.update_state_container, 8)
self.assertEqual(m_broadcast_tx.call_count, 1)
@patch('pur.core.misc.ntp.getTime', new=replacement_getTime)
class TestTransactionPoolRemoveTxInBlockFromPool(TestCase):
@patch('pur.core.misc.ntp.getTime', new=replacement_getTime)
def setUp(self):
self.txpool = TransactionPool(None)
self.tx_3907 = make_tx(name='Mock TX 3907', txhash=b'h3907', ots_key=3907)
self.tx_4095 = make_tx(name='Mock TX 4095', txhash=b'h4095', ots_key=4095)
self.tx_4096 = make_tx(name='Mock TX 4096', txhash=b'h4096', ots_key=4096)
self.tx_4097 = make_tx(name='Mock TX 4097', txhash=b'h4097', ots_key=4097)
self.tx_4098 = make_tx(name='Mock TX 4098', txhash=b'h4098', ots_key=4098)
self.tx_4099 = make_tx(name='Mock TX 4099', txhash=b'h4099', ots_key=4099)
self.tx_4100 = make_tx(name='Mock TX 4100', txhash=b'h4100', ots_key=4100)
self.tx_4200 = make_tx(name='Mock TX 4200', txhash=b'h4200', ots_key=4200)
self.txpool.add_tx_to_pool(self.tx_4095, 5)
self.txpool.add_tx_to_pool(self.tx_4096, 5)
self.txpool.add_tx_to_pool(self.tx_4097, 5)
self.txpool.add_tx_to_pool(self.tx_4098, 5)
self.txpool.add_tx_to_pool(self.tx_4099, 5)
self.txpool.add_tx_to_pool(self.tx_4100, 5)
self.txpool.add_tx_to_pool(self.tx_4200, 5)
@patch('pur.core.TransactionPool.config', autospec=True)
@patch('pur.core.TransactionPool.TransactionPool.is_full_transaction_pool', return_value=False)
@patch('pur.core.txs.Transaction.Transaction.from_pbdata', new=replacement_from_pbdata)
def test_block_4098_4099(self, m_is_full_transaction_pool, m_config):
m_config.dev.max_ots_tracking_index = 4096
m_block = Mock(autospec=Block)
m_block.transactions = [CoinBase(), self.tx_4098, self.tx_4099]
self.txpool.remove_tx_in_block_from_pool(m_block)
txs_in_txpool = [t[1].transaction for t in self.txpool.transaction_pool]
self.assertEqual(len(self.txpool.transaction_pool), 3)
self.assertNotIn(self.tx_4097, txs_in_txpool)
self.assertNotIn(self.tx_4098, txs_in_txpool)
self.assertNotIn(self.tx_4099, txs_in_txpool)
self.assertIn(self.tx_4095, txs_in_txpool)
self.assertIn(self.tx_4100, txs_in_txpool)
self.assertIn(self.tx_4200, txs_in_txpool)
@patch('pur.core.TransactionPool.config', autospec=True)
@patch('pur.core.TransactionPool.TransactionPool.is_full_transaction_pool', return_value=False)
@patch('pur.core.txs.Transaction.Transaction.from_pbdata', new=replacement_from_pbdata)
def test_txpool_3907_block_4098_4099(self, m_is_full_transaction_pool, m_config):
m_config.dev.max_ots_tracking_index = 4096
m_block = Mock(autospec=Block)
m_block.transactions = [CoinBase(), self.tx_4098, self.tx_4099]
self.txpool.add_tx_to_pool(self.tx_3907, 5)
self.txpool.remove_tx_in_block_from_pool(m_block)
txs_in_txpool = [t[1].transaction for t in self.txpool.transaction_pool]
self.assertEqual(len(self.txpool.transaction_pool), 4)
self.assertNotIn(self.tx_4097, txs_in_txpool)
self.assertNotIn(self.tx_4098, txs_in_txpool)
self.assertNotIn(self.tx_4099, txs_in_txpool)
self.assertIn(self.tx_3907, txs_in_txpool)
self.assertIn(self.tx_4095, txs_in_txpool)
self.assertIn(self.tx_4100, txs_in_txpool)
self.assertIn(self.tx_4200, txs_in_txpool)
@patch('pur.core.TransactionPool.config', autospec=True)
@patch('pur.core.TransactionPool.TransactionPool.is_full_transaction_pool', return_value=False)
@patch('pur.core.txs.Transaction.Transaction.from_pbdata', new=replacement_from_pbdata)
def test_block_4200(self, m_is_full_transaction_pool, m_config):
m_config.dev.max_ots_tracking_index = 4096
m_block = Mock(autospec=Block)
m_block.transactions = [CoinBase(), self.tx_4200]
self.txpool.add_tx_to_pool(self.tx_3907, 5)
self.txpool.remove_tx_in_block_from_pool(m_block)
txs_in_txpool = [t[1].transaction for t in self.txpool.transaction_pool]
self.assertEqual(len(self.txpool.transaction_pool), 2)
self.assertIn(self.tx_3907, txs_in_txpool)
self.assertIn(self.tx_4095, txs_in_txpool)
@patch('pur.core.TransactionPool.config', autospec=True)
@patch('pur.core.TransactionPool.TransactionPool.is_full_transaction_pool', return_value=False)
@patch('pur.core.txs.Transaction.Transaction.from_pbdata', new=replacement_from_pbdata)
def test_txpool_4095_4096_4097_otherppl_block_4098_4099(self, m_is_full_transaction_pool, m_config):
m_config.dev.max_ots_tracking_index = 4096
m_block = Mock(autospec=Block)
m_block.transactions = [CoinBase(), self.tx_4200]
tx_other_4095 = make_tx(name='Mock TX 4095', txhash=b'h4095_other', ots_key=4095, PK='otherppl')
tx_other_4096 = make_tx(name='Mock TX 4096', txhash=b'h4096_other', ots_key=4096, PK='otherppl')
tx_other_4097 = make_tx(name='Mock TX 4097', txhash=b'h4097_other', ots_key=4097, PK='otherppl')
self.txpool.add_tx_to_pool(tx_other_4095, 5)
self.txpool.add_tx_to_pool(tx_other_4096, 5)
self.txpool.add_tx_to_pool(tx_other_4097, 5)
self.txpool.remove_tx_in_block_from_pool(m_block)
txs_in_txpool = [t[1].transaction for t in self.txpool.transaction_pool]
self.assertEqual(len(self.txpool.transaction_pool), 4)
self.assertIn(self.tx_4095, txs_in_txpool)
self.assertIn(tx_other_4095, txs_in_txpool)
self.assertIn(tx_other_4096, txs_in_txpool)
self.assertIn(tx_other_4097, txs_in_txpool)
@patch('pur.core.TransactionPool.config', autospec=True)
@patch('pur.core.TransactionPool.TransactionPool.is_full_transaction_pool', return_value=False)
@patch('pur.core.txs.Transaction.Transaction.from_pbdata', new=replacement_from_pbdata)
def test_block_1000(self, m_is_full_transaction_pool, m_config):
m_config.dev.max_ots_tracking_index = 4096
tx_1000 = make_tx(name='Mock TX 1000', txhash=b'h1000', ots_key=1000)
m_block = Mock(autospec=Block)
m_block.transactions = [CoinBase(), tx_1000]
self.assertEqual(7, len(self.txpool.transaction_pool))
self.txpool.remove_tx_in_block_from_pool(m_block)
txs_in_txpool = [t[1].transaction for t in self.txpool.transaction_pool]
self.assertEqual(7, len(txs_in_txpool))
| true
| true
|
1c42ccf8fd88a208f112d7e51a3524c270f106be
| 28,847
|
py
|
Python
|
src/python/pants/engine/internals/engine_test.py
|
cristianmatache/pants
|
3def49fd11784b086b3e2e76bb9bcff09b43175b
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/engine/internals/engine_test.py
|
cristianmatache/pants
|
3def49fd11784b086b3e2e76bb9bcff09b43175b
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/engine/internals/engine_test.py
|
cristianmatache/pants
|
3def49fd11784b086b3e2e76bb9bcff09b43175b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
import time
import unittest
from dataclasses import dataclass, field
from textwrap import dedent
from typing import List, Optional
from pants.engine.engine_aware import EngineAwareReturnType
from pants.engine.fs import (
EMPTY_FILE_DIGEST,
EMPTY_SNAPSHOT,
CreateDigest,
Digest,
DigestContents,
FileContent,
Snapshot,
)
from pants.engine.internals.engine_testutil import (
assert_equal_with_printing,
remove_locations_from_traceback,
)
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.internals.scheduler_test_base import SchedulerTestBase
from pants.engine.process import Process, ProcessResult
from pants.engine.rules import Get, MultiGet, rule
from pants.reporting.streaming_workunit_handler import (
StreamingWorkunitContext,
StreamingWorkunitHandler,
)
from pants.testutil.rule_runner import QueryRule
from pants.testutil.test_base import TestBase
from pants.util.logging import LogLevel
class A:
pass
class B:
pass
class C:
pass
class D:
pass
def fn_raises(x):
raise Exception(f"An exception for {type(x).__name__}")
@rule
def nested_raise(x: B) -> A: # type: ignore[return]
fn_raises(x)
@dataclass(frozen=True)
class Fib:
val: int
@rule(desc="Fibonacci", level=LogLevel.INFO)
async def fib(n: int) -> Fib:
if n < 2:
return Fib(n)
x, y = tuple(await MultiGet([Get(Fib, int(n - 2)), Get(Fib, int(n - 1))]))
return Fib(x.val + y.val)
@dataclass(frozen=True)
class MyInt:
val: int
@dataclass(frozen=True)
class MyFloat:
val: float
@rule
def upcast(n: MyInt) -> MyFloat:
return MyFloat(float(n.val))
# This set of dummy types and the following `@rule`s are intended to test that workunits are
# being generated correctly and with the correct parent-child relationships.
class Input:
pass
class Alpha:
pass
class Beta:
pass
class Gamma:
pass
class Omega:
pass
class Epsilon:
pass
@rule(canonical_name="canonical_rule_one", desc="Rule number 1", level=LogLevel.INFO)
async def rule_one_function(i: Input) -> Beta:
"""This rule should be the first one executed by the engine, and thus have no parent."""
a = Alpha()
o = await Get(Omega, Alpha, a)
b = await Get(Beta, Omega, o)
time.sleep(1)
return b
@rule(desc="Rule number 2", level=LogLevel.INFO)
async def rule_two(a: Alpha) -> Omega:
"""This rule should be invoked in the body of `rule_one` and therefore its workunit should be a
child of `rule_one`'s workunit."""
await Get(Gamma, Alpha, a)
return Omega()
@rule(desc="Rule number 3", level=LogLevel.INFO)
async def rule_three(o: Omega) -> Beta:
"""This rule should be invoked in the body of `rule_one` and therefore its workunit should be a
child of `rule_one`'s workunit."""
return Beta()
@rule(desc="Rule number 4", level=LogLevel.INFO)
def rule_four(a: Alpha) -> Gamma:
"""This rule should be invoked in the body of `rule_two` and therefore its workunit should be a
child of `rule_two`'s workunit."""
return Gamma()
@rule(desc="Rule A", level=LogLevel.INFO)
async def rule_A(i: Input) -> Alpha:
o = Omega()
a = await Get(Alpha, Omega, o)
return a
@rule
async def rule_B(o: Omega) -> Alpha:
e = Epsilon()
a = await Get(Alpha, Epsilon, e)
return a
@rule(desc="Rule C", level=LogLevel.INFO)
def rule_C(e: Epsilon) -> Alpha:
return Alpha()
class EngineTest(unittest.TestCase, SchedulerTestBase):
assert_equal_with_printing = assert_equal_with_printing
def scheduler(self, rules, include_trace_on_error):
return self.mk_scheduler(rules=rules, include_trace_on_error=include_trace_on_error)
def test_recursive_multi_get(self):
# Tests that a rule that "uses itself" multiple times per invoke works.
rules = [fib, QueryRule(Fib, (int,))]
(fib_10,) = self.mk_scheduler(rules=rules).product_request(Fib, subjects=[10])
self.assertEqual(55, fib_10.val)
def test_no_include_trace_error_raises_boring_error(self):
rules = [nested_raise, QueryRule(A, (B,))]
scheduler = self.scheduler(rules, include_trace_on_error=False)
with self.assertRaises(ExecutionError) as cm:
list(scheduler.product_request(A, subjects=[(B())]))
self.assert_equal_with_printing(
"1 Exception encountered:\n\n Exception: An exception for B\n", str(cm.exception)
)
def test_no_include_trace_error_multiple_paths_raises_executionerror(self):
rules = [nested_raise, QueryRule(A, (B,))]
scheduler = self.scheduler(rules, include_trace_on_error=False)
with self.assertRaises(ExecutionError) as cm:
list(scheduler.product_request(A, subjects=[B(), B()]))
self.assert_equal_with_printing(
dedent(
"""
2 Exceptions encountered:
Exception: An exception for B
Exception: An exception for B
"""
).lstrip(),
str(cm.exception),
)
def test_include_trace_error_raises_error_with_trace(self):
rules = [nested_raise, QueryRule(A, (B,))]
scheduler = self.scheduler(rules, include_trace_on_error=True)
with self.assertRaises(ExecutionError) as cm:
list(scheduler.product_request(A, subjects=[(B())]))
self.assert_equal_with_printing(
dedent(
"""
1 Exception encountered:
Engine traceback:
in select
in pants.engine.internals.engine_test.nested_raise
Traceback (most recent call last):
File LOCATION-INFO, in nested_raise
fn_raises(x)
File LOCATION-INFO, in fn_raises
raise Exception(f"An exception for {type(x).__name__}")
Exception: An exception for B
"""
).lstrip(),
remove_locations_from_traceback(str(cm.exception)),
)
def test_nonexistent_root(self) -> None:
rules = [QueryRule(A, [B])]
# No rules are available to compute A.
with self.assertRaises(ValueError) as cm:
self.scheduler(rules, include_trace_on_error=False)
assert (
"No installed rules return the type A, and it was not provided by potential callers of "
) in str(cm.exception)
def test_missing_query_rule(self) -> None:
# Even if we register the rule to go from MyInt -> MyFloat, we must register a QueryRule
# for the graph to work when making a synchronous call via `Scheduler.product_request`.
scheduler = self.mk_scheduler(rules=[upcast], include_trace_on_error=False)
with self.assertRaises(Exception) as cm:
scheduler.product_request(MyFloat, subjects=[MyInt(0)])
assert (
"No installed QueryRules return the type MyFloat. Try registering QueryRule(MyFloat "
"for MyInt)."
) in str(cm.exception)
@dataclass
class WorkunitTracker:
"""This class records every non-empty batch of started and completed workunits received from the
engine."""
finished_workunit_chunks: List[List[dict]] = field(default_factory=list)
started_workunit_chunks: List[List[dict]] = field(default_factory=list)
finished: bool = False
def add(self, **kwargs) -> None:
if kwargs["finished"] is True:
self.finished = True
started_workunits = kwargs.get("started_workunits")
if started_workunits:
self.started_workunit_chunks.append(started_workunits)
completed_workunits = kwargs.get("completed_workunits")
if completed_workunits:
self.finished_workunit_chunks.append(completed_workunits)
class StreamingWorkunitTests(unittest.TestCase, SchedulerTestBase):
def test_streaming_workunits_reporting(self):
rules = [fib, QueryRule(Fib, (int,))]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.INFO,
)
with handler.session():
scheduler.product_request(Fib, subjects=[0])
flattened = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
# The execution of the single named @rule "fib" should be providing this one workunit.
self.assertEqual(len(flattened), 1)
tracker.finished_workunit_chunks = []
with handler.session():
scheduler.product_request(Fib, subjects=[10])
# Requesting a bigger fibonacci number will result in more rule executions and thus more reported workunits.
# In this case, we expect 10 invocations of the `fib` rule.
flattened = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
assert len(flattened) == 10
assert tracker.finished
def test_streaming_workunits_parent_id_and_rule_metadata(self):
rules = [rule_one_function, rule_two, rule_three, rule_four, QueryRule(Beta, (Input,))]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.INFO,
)
with handler.session():
i = Input()
scheduler.product_request(Beta, subjects=[i])
assert tracker.finished
# rule_one should complete well-after the other rules because of the artificial delay in it caused by the sleep().
assert {item["name"] for item in tracker.finished_workunit_chunks[0]} == {
"pants.engine.internals.engine_test.rule_two",
"pants.engine.internals.engine_test.rule_three",
"pants.engine.internals.engine_test.rule_four",
}
# Because of the artificial delay in rule_one, it should have time to be reported as
# started but not yet finished.
started = list(itertools.chain.from_iterable(tracker.started_workunit_chunks))
assert len(list(item for item in started if item["name"] == "canonical_rule_one")) > 0
assert {item["name"] for item in tracker.finished_workunit_chunks[1]} == {
"canonical_rule_one"
}
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
r1 = next(item for item in finished if item["name"] == "canonical_rule_one")
r2 = next(
item
for item in finished
if item["name"] == "pants.engine.internals.engine_test.rule_two"
)
r3 = next(
item
for item in finished
if item["name"] == "pants.engine.internals.engine_test.rule_three"
)
r4 = next(
item
for item in finished
if item["name"] == "pants.engine.internals.engine_test.rule_four"
)
# rule_one should have no parent_id because its actual parent workunit was filted based on level
assert r1.get("parent_id", None) is None
assert r2["parent_id"] == r1["span_id"]
assert r3["parent_id"] == r1["span_id"]
assert r4["parent_id"] == r2["span_id"]
assert r3["description"] == "Rule number 3"
assert r4["description"] == "Rule number 4"
assert r4["level"] == "INFO"
def test_streaming_workunit_log_levels(self) -> None:
rules = [rule_one_function, rule_two, rule_three, rule_four, QueryRule(Beta, (Input,))]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.TRACE,
)
with handler.session():
i = Input()
scheduler.product_request(Beta, subjects=[i])
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
# With the max_workunit_verbosity set to TRACE, we should see the workunit corresponding to the Select node.
select = next(
item
for item in finished
if item["name"]
not in {
"canonical_rule_one",
"pants.engine.internals.engine_test.rule_two",
"pants.engine.internals.engine_test.rule_three",
"pants.engine.internals.engine_test.rule_four",
}
)
assert select["name"] == "select"
assert select["level"] == "TRACE"
r1 = next(item for item in finished if item["name"] == "canonical_rule_one")
assert r1["parent_id"] == select["span_id"]
def test_streaming_workunit_log_level_parent_rewrite(self) -> None:
rules = [rule_A, rule_B, rule_C, QueryRule(Alpha, (Input,))]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
info_level_handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.INFO,
)
with info_level_handler.session():
i = Input()
scheduler.product_request(Alpha, subjects=[i])
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
assert len(finished) == 2
r_A = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_A"
)
r_C = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_C"
)
assert "parent_id" not in r_A
assert r_C["parent_id"] == r_A["span_id"]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
debug_level_handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.TRACE,
)
with debug_level_handler.session():
i = Input()
scheduler.product_request(Alpha, subjects=[i])
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
r_A = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_A"
)
r_B = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_B"
)
r_C = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_C"
)
assert r_B["parent_id"] == r_A["span_id"]
assert r_C["parent_id"] == r_B["span_id"]
def test_engine_aware_rule(self):
@dataclass(frozen=True)
class ModifiedOutput(EngineAwareReturnType):
_level: LogLevel
val: int
def level(self):
return self._level
@rule(desc="a_rule")
def a_rule(n: int) -> ModifiedOutput:
return ModifiedOutput(val=n, _level=LogLevel.ERROR)
rules = [a_rule, QueryRule(ModifiedOutput, (int,))]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.TRACE,
)
with handler.session():
scheduler.product_request(ModifiedOutput, subjects=[0])
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
workunit = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.a_rule"
)
assert workunit["level"] == "ERROR"
def test_engine_aware_none_case(self):
@dataclass(frozen=True)
# If level() returns None, the engine shouldn't try to set
# a new workunit level.
class ModifiedOutput(EngineAwareReturnType):
_level: Optional[LogLevel]
val: int
def level(self):
return self._level
@rule(desc="a_rule")
def a_rule(n: int) -> ModifiedOutput:
return ModifiedOutput(val=n, _level=None)
rules = [a_rule, QueryRule(ModifiedOutput, (int,))]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.TRACE,
)
with handler.session():
scheduler.product_request(ModifiedOutput, subjects=[0])
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
workunit = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.a_rule"
)
assert workunit["level"] == "TRACE"
def test_artifacts_on_engine_aware_type(self) -> None:
@dataclass(frozen=True)
class Output(EngineAwareReturnType):
val: int
def artifacts(self):
return {"some_arbitrary_key": EMPTY_SNAPSHOT}
@rule(desc="a_rule")
def a_rule(n: int) -> Output:
return Output(val=n)
rules = [a_rule, QueryRule(Output, (int,))]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.TRACE,
)
with handler.session():
scheduler.product_request(Output, subjects=[0])
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
workunit = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.a_rule"
)
artifacts = workunit["artifacts"]
assert artifacts["some_arbitrary_key"] == EMPTY_SNAPSHOT
def test_metadata_on_engine_aware_type(self) -> None:
@dataclass(frozen=True)
class Output(EngineAwareReturnType):
val: int
def metadata(self):
return {"k1": 1, "k2": "a string", "k3": [1, 2, 3]}
@rule(desc="a_rule")
def a_rule(n: int) -> Output:
return Output(val=n)
rules = [a_rule, QueryRule(Output, (int,))]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.TRACE,
)
with handler.session():
scheduler.product_request(Output, subjects=[0])
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
workunit = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.a_rule"
)
metadata = workunit["metadata"]
assert metadata == {"k1": 1, "k2": "a string", "k3": [1, 2, 3]}
def test_metadata_non_string_key_behavior(self) -> None:
# If someone passes a non-string key in a metadata() method,
# this should fail to produce a meaningful metadata entry on
# the workunit (with a warning), but not fail.
@dataclass(frozen=True)
class Output(EngineAwareReturnType):
val: int
def metadata(self):
return {10: "foo", "other_key": "other value"}
@rule(desc="a_rule")
def a_rule(n: int) -> Output:
return Output(val=n)
rules = [a_rule, QueryRule(Output, (int,))]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.TRACE,
)
with handler.session():
scheduler.product_request(Output, subjects=[0])
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
workunit = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.a_rule"
)
assert workunit["metadata"] == {}
@dataclass(frozen=True)
class ComplicatedInput:
snapshot_1: Snapshot
snapshot_2: Snapshot
@dataclass(frozen=True)
class Output(EngineAwareReturnType):
snapshot_1: Snapshot
snapshot_2: Snapshot
def artifacts(self):
return {"snapshot_1": self.snapshot_1, "snapshot_2": self.snapshot_2}
@rule(desc="a_rule")
def a_rule(input: ComplicatedInput) -> Output:
return Output(snapshot_1=input.snapshot_1, snapshot_2=input.snapshot_2)
class MoreComplicatedEngineAware(TestBase):
@classmethod
def rules(cls):
return (
*super().rules(),
a_rule,
QueryRule(Output, (ComplicatedInput,)),
)
def test_more_complicated_engine_aware(self) -> None:
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
self.scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.TRACE,
)
with handler.session():
input_1 = CreateDigest(
(
FileContent(path="a.txt", content=b"alpha"),
FileContent(path="b.txt", content=b"beta"),
)
)
digest_1 = self.request(Digest, [input_1])
snapshot_1 = self.request(Snapshot, [digest_1])
input_2 = CreateDigest((FileContent(path="g.txt", content=b"gamma"),))
digest_2 = self.request(Digest, [input_2])
snapshot_2 = self.request(Snapshot, [digest_2])
input = ComplicatedInput(snapshot_1=snapshot_1, snapshot_2=snapshot_2)
self.request(Output, [input])
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
workunit = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.a_rule"
)
streaming_workunit_context = handler._context
artifacts = workunit["artifacts"]
output_snapshot_1 = artifacts["snapshot_1"]
output_snapshot_2 = artifacts["snapshot_2"]
output_contents_list = streaming_workunit_context.snapshots_to_file_contents(
[output_snapshot_1, output_snapshot_2]
)
assert len(output_contents_list) == 2
assert isinstance(output_contents_list[0], DigestContents)
assert isinstance(output_contents_list[1], DigestContents)
digest_contents_1 = output_contents_list[0]
digest_contents_2 = output_contents_list[1]
assert len(tuple(x for x in digest_contents_1 if x.content == b"alpha")) == 1
assert len(tuple(x for x in digest_contents_1 if x.content == b"beta")) == 1
assert len(tuple(x for x in digest_contents_2 if x.content == b"gamma")) == 1
class StreamingWorkunitProcessTests(TestBase):
additional_options = ["--no-process-execution-use-local-cache"]
@classmethod
def rules(cls):
return [*super().rules(), QueryRule(ProcessResult, (Process,))]
def test_process_digests_on_workunits(self):
scheduler = self.scheduler
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.INFO,
)
stdout_process = Process(
argv=("/bin/bash", "-c", "/bin/echo 'stdout output'"), description="Stdout process"
)
with handler.session():
result = self.request(ProcessResult, [stdout_process])
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
process_workunit = next(
item for item in finished if item["name"] == "multi_platform_process-running"
)
assert process_workunit is not None
stdout_digest = process_workunit["artifacts"]["stdout_digest"]
stderr_digest = process_workunit["artifacts"]["stderr_digest"]
assert result.stdout == b"stdout output\n"
assert stderr_digest == EMPTY_FILE_DIGEST
assert stdout_digest.serialized_bytes_length == len(result.stdout)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
self._scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.INFO,
)
stderr_process = Process(
argv=("/bin/bash", "-c", "1>&2 /bin/echo 'stderr output'"), description="Stderr process"
)
with handler.session():
result = self.request(ProcessResult, [stderr_process])
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
process_workunit = next(
item for item in finished if item["name"] == "multi_platform_process-running"
)
assert process_workunit is not None
stdout_digest = process_workunit["artifacts"]["stdout_digest"]
stderr_digest = process_workunit["artifacts"]["stderr_digest"]
assert result.stderr == b"stderr output\n"
assert stdout_digest == EMPTY_FILE_DIGEST
assert stderr_digest.serialized_bytes_length == len(result.stderr)
try:
self._scheduler.ensure_remote_has_recursive([stdout_digest, stderr_digest])
except Exception as e:
# This is the exception message we should expect from invoking ensure_remote_has_recursive()
# in rust.
assert str(e) == "Cannot ensure remote has blobs without a remote"
byte_outputs = self._scheduler.single_file_digests_to_bytes([stdout_digest, stderr_digest])
assert byte_outputs[0] == result.stdout
assert byte_outputs[1] == result.stderr
def test_context_object(self):
scheduler = self.scheduler
def callback(**kwargs) -> None:
context = kwargs["context"]
assert isinstance(context, StreamingWorkunitContext)
completed_workunits = kwargs["completed_workunits"]
for workunit in completed_workunits:
if "artifacts" in workunit and "stdout_digest" in workunit["artifacts"]:
digest = workunit["artifacts"]["stdout_digest"]
output = context.single_file_digests_to_bytes([digest])
assert output == (b"stdout output\n",)
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[callback],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.INFO,
)
stdout_process = Process(
argv=("/bin/bash", "-c", "/bin/echo 'stdout output'"), description="Stdout process"
)
with handler.session():
self.request(ProcessResult, [stdout_process])
| 34.423628
| 122
| 0.636253
|
import itertools
import time
import unittest
from dataclasses import dataclass, field
from textwrap import dedent
from typing import List, Optional
from pants.engine.engine_aware import EngineAwareReturnType
from pants.engine.fs import (
EMPTY_FILE_DIGEST,
EMPTY_SNAPSHOT,
CreateDigest,
Digest,
DigestContents,
FileContent,
Snapshot,
)
from pants.engine.internals.engine_testutil import (
assert_equal_with_printing,
remove_locations_from_traceback,
)
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.internals.scheduler_test_base import SchedulerTestBase
from pants.engine.process import Process, ProcessResult
from pants.engine.rules import Get, MultiGet, rule
from pants.reporting.streaming_workunit_handler import (
StreamingWorkunitContext,
StreamingWorkunitHandler,
)
from pants.testutil.rule_runner import QueryRule
from pants.testutil.test_base import TestBase
from pants.util.logging import LogLevel
class A:
pass
class B:
pass
class C:
pass
class D:
pass
def fn_raises(x):
raise Exception(f"An exception for {type(x).__name__}")
@rule
def nested_raise(x: B) -> A:
fn_raises(x)
@dataclass(frozen=True)
class Fib:
val: int
@rule(desc="Fibonacci", level=LogLevel.INFO)
async def fib(n: int) -> Fib:
if n < 2:
return Fib(n)
x, y = tuple(await MultiGet([Get(Fib, int(n - 2)), Get(Fib, int(n - 1))]))
return Fib(x.val + y.val)
@dataclass(frozen=True)
class MyInt:
val: int
@dataclass(frozen=True)
class MyFloat:
val: float
@rule
def upcast(n: MyInt) -> MyFloat:
return MyFloat(float(n.val))
class Input:
pass
class Alpha:
pass
class Beta:
pass
class Gamma:
pass
class Omega:
pass
class Epsilon:
pass
@rule(canonical_name="canonical_rule_one", desc="Rule number 1", level=LogLevel.INFO)
async def rule_one_function(i: Input) -> Beta:
a = Alpha()
o = await Get(Omega, Alpha, a)
b = await Get(Beta, Omega, o)
time.sleep(1)
return b
@rule(desc="Rule number 2", level=LogLevel.INFO)
async def rule_two(a: Alpha) -> Omega:
await Get(Gamma, Alpha, a)
return Omega()
@rule(desc="Rule number 3", level=LogLevel.INFO)
async def rule_three(o: Omega) -> Beta:
return Beta()
@rule(desc="Rule number 4", level=LogLevel.INFO)
def rule_four(a: Alpha) -> Gamma:
return Gamma()
@rule(desc="Rule A", level=LogLevel.INFO)
async def rule_A(i: Input) -> Alpha:
o = Omega()
a = await Get(Alpha, Omega, o)
return a
@rule
async def rule_B(o: Omega) -> Alpha:
e = Epsilon()
a = await Get(Alpha, Epsilon, e)
return a
@rule(desc="Rule C", level=LogLevel.INFO)
def rule_C(e: Epsilon) -> Alpha:
return Alpha()
class EngineTest(unittest.TestCase, SchedulerTestBase):
assert_equal_with_printing = assert_equal_with_printing
def scheduler(self, rules, include_trace_on_error):
return self.mk_scheduler(rules=rules, include_trace_on_error=include_trace_on_error)
def test_recursive_multi_get(self):
rules = [fib, QueryRule(Fib, (int,))]
(fib_10,) = self.mk_scheduler(rules=rules).product_request(Fib, subjects=[10])
self.assertEqual(55, fib_10.val)
def test_no_include_trace_error_raises_boring_error(self):
rules = [nested_raise, QueryRule(A, (B,))]
scheduler = self.scheduler(rules, include_trace_on_error=False)
with self.assertRaises(ExecutionError) as cm:
list(scheduler.product_request(A, subjects=[(B())]))
self.assert_equal_with_printing(
"1 Exception encountered:\n\n Exception: An exception for B\n", str(cm.exception)
)
def test_no_include_trace_error_multiple_paths_raises_executionerror(self):
rules = [nested_raise, QueryRule(A, (B,))]
scheduler = self.scheduler(rules, include_trace_on_error=False)
with self.assertRaises(ExecutionError) as cm:
list(scheduler.product_request(A, subjects=[B(), B()]))
self.assert_equal_with_printing(
dedent(
"""
2 Exceptions encountered:
Exception: An exception for B
Exception: An exception for B
"""
).lstrip(),
str(cm.exception),
)
def test_include_trace_error_raises_error_with_trace(self):
rules = [nested_raise, QueryRule(A, (B,))]
scheduler = self.scheduler(rules, include_trace_on_error=True)
with self.assertRaises(ExecutionError) as cm:
list(scheduler.product_request(A, subjects=[(B())]))
self.assert_equal_with_printing(
dedent(
"""
1 Exception encountered:
Engine traceback:
in select
in pants.engine.internals.engine_test.nested_raise
Traceback (most recent call last):
File LOCATION-INFO, in nested_raise
fn_raises(x)
File LOCATION-INFO, in fn_raises
raise Exception(f"An exception for {type(x).__name__}")
Exception: An exception for B
"""
).lstrip(),
remove_locations_from_traceback(str(cm.exception)),
)
def test_nonexistent_root(self) -> None:
rules = [QueryRule(A, [B])]
with self.assertRaises(ValueError) as cm:
self.scheduler(rules, include_trace_on_error=False)
assert (
"No installed rules return the type A, and it was not provided by potential callers of "
) in str(cm.exception)
def test_missing_query_rule(self) -> None:
scheduler = self.mk_scheduler(rules=[upcast], include_trace_on_error=False)
with self.assertRaises(Exception) as cm:
scheduler.product_request(MyFloat, subjects=[MyInt(0)])
assert (
"No installed QueryRules return the type MyFloat. Try registering QueryRule(MyFloat "
"for MyInt)."
) in str(cm.exception)
@dataclass
class WorkunitTracker:
finished_workunit_chunks: List[List[dict]] = field(default_factory=list)
started_workunit_chunks: List[List[dict]] = field(default_factory=list)
finished: bool = False
def add(self, **kwargs) -> None:
if kwargs["finished"] is True:
self.finished = True
started_workunits = kwargs.get("started_workunits")
if started_workunits:
self.started_workunit_chunks.append(started_workunits)
completed_workunits = kwargs.get("completed_workunits")
if completed_workunits:
self.finished_workunit_chunks.append(completed_workunits)
class StreamingWorkunitTests(unittest.TestCase, SchedulerTestBase):
def test_streaming_workunits_reporting(self):
rules = [fib, QueryRule(Fib, (int,))]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.INFO,
)
with handler.session():
scheduler.product_request(Fib, subjects=[0])
flattened = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
self.assertEqual(len(flattened), 1)
tracker.finished_workunit_chunks = []
with handler.session():
scheduler.product_request(Fib, subjects=[10])
flattened = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
assert len(flattened) == 10
assert tracker.finished
def test_streaming_workunits_parent_id_and_rule_metadata(self):
rules = [rule_one_function, rule_two, rule_three, rule_four, QueryRule(Beta, (Input,))]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.INFO,
)
with handler.session():
i = Input()
scheduler.product_request(Beta, subjects=[i])
assert tracker.finished
assert {item["name"] for item in tracker.finished_workunit_chunks[0]} == {
"pants.engine.internals.engine_test.rule_two",
"pants.engine.internals.engine_test.rule_three",
"pants.engine.internals.engine_test.rule_four",
}
started = list(itertools.chain.from_iterable(tracker.started_workunit_chunks))
assert len(list(item for item in started if item["name"] == "canonical_rule_one")) > 0
assert {item["name"] for item in tracker.finished_workunit_chunks[1]} == {
"canonical_rule_one"
}
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
r1 = next(item for item in finished if item["name"] == "canonical_rule_one")
r2 = next(
item
for item in finished
if item["name"] == "pants.engine.internals.engine_test.rule_two"
)
r3 = next(
item
for item in finished
if item["name"] == "pants.engine.internals.engine_test.rule_three"
)
r4 = next(
item
for item in finished
if item["name"] == "pants.engine.internals.engine_test.rule_four"
)
assert r1.get("parent_id", None) is None
assert r2["parent_id"] == r1["span_id"]
assert r3["parent_id"] == r1["span_id"]
assert r4["parent_id"] == r2["span_id"]
assert r3["description"] == "Rule number 3"
assert r4["description"] == "Rule number 4"
assert r4["level"] == "INFO"
def test_streaming_workunit_log_levels(self) -> None:
rules = [rule_one_function, rule_two, rule_three, rule_four, QueryRule(Beta, (Input,))]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.TRACE,
)
with handler.session():
i = Input()
scheduler.product_request(Beta, subjects=[i])
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
select = next(
item
for item in finished
if item["name"]
not in {
"canonical_rule_one",
"pants.engine.internals.engine_test.rule_two",
"pants.engine.internals.engine_test.rule_three",
"pants.engine.internals.engine_test.rule_four",
}
)
assert select["name"] == "select"
assert select["level"] == "TRACE"
r1 = next(item for item in finished if item["name"] == "canonical_rule_one")
assert r1["parent_id"] == select["span_id"]
def test_streaming_workunit_log_level_parent_rewrite(self) -> None:
rules = [rule_A, rule_B, rule_C, QueryRule(Alpha, (Input,))]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
info_level_handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.INFO,
)
with info_level_handler.session():
i = Input()
scheduler.product_request(Alpha, subjects=[i])
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
assert len(finished) == 2
r_A = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_A"
)
r_C = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_C"
)
assert "parent_id" not in r_A
assert r_C["parent_id"] == r_A["span_id"]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
debug_level_handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.TRACE,
)
with debug_level_handler.session():
i = Input()
scheduler.product_request(Alpha, subjects=[i])
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
r_A = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_A"
)
r_B = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_B"
)
r_C = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_C"
)
assert r_B["parent_id"] == r_A["span_id"]
assert r_C["parent_id"] == r_B["span_id"]
def test_engine_aware_rule(self):
@dataclass(frozen=True)
class ModifiedOutput(EngineAwareReturnType):
_level: LogLevel
val: int
def level(self):
return self._level
@rule(desc="a_rule")
def a_rule(n: int) -> ModifiedOutput:
return ModifiedOutput(val=n, _level=LogLevel.ERROR)
rules = [a_rule, QueryRule(ModifiedOutput, (int,))]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.TRACE,
)
with handler.session():
scheduler.product_request(ModifiedOutput, subjects=[0])
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
workunit = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.a_rule"
)
assert workunit["level"] == "ERROR"
def test_engine_aware_none_case(self):
@dataclass(frozen=True)
# a new workunit level.
class ModifiedOutput(EngineAwareReturnType):
_level: Optional[LogLevel]
val: int
def level(self):
return self._level
@rule(desc="a_rule")
def a_rule(n: int) -> ModifiedOutput:
return ModifiedOutput(val=n, _level=None)
rules = [a_rule, QueryRule(ModifiedOutput, (int,))]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.TRACE,
)
with handler.session():
scheduler.product_request(ModifiedOutput, subjects=[0])
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
workunit = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.a_rule"
)
assert workunit["level"] == "TRACE"
def test_artifacts_on_engine_aware_type(self) -> None:
@dataclass(frozen=True)
class Output(EngineAwareReturnType):
val: int
def artifacts(self):
return {"some_arbitrary_key": EMPTY_SNAPSHOT}
@rule(desc="a_rule")
def a_rule(n: int) -> Output:
return Output(val=n)
rules = [a_rule, QueryRule(Output, (int,))]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.TRACE,
)
with handler.session():
scheduler.product_request(Output, subjects=[0])
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
workunit = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.a_rule"
)
artifacts = workunit["artifacts"]
assert artifacts["some_arbitrary_key"] == EMPTY_SNAPSHOT
def test_metadata_on_engine_aware_type(self) -> None:
@dataclass(frozen=True)
class Output(EngineAwareReturnType):
val: int
def metadata(self):
return {"k1": 1, "k2": "a string", "k3": [1, 2, 3]}
@rule(desc="a_rule")
def a_rule(n: int) -> Output:
return Output(val=n)
rules = [a_rule, QueryRule(Output, (int,))]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.TRACE,
)
with handler.session():
scheduler.product_request(Output, subjects=[0])
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
workunit = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.a_rule"
)
metadata = workunit["metadata"]
assert metadata == {"k1": 1, "k2": "a string", "k3": [1, 2, 3]}
def test_metadata_non_string_key_behavior(self) -> None:
# If someone passes a non-string key in a metadata() method,
# this should fail to produce a meaningful metadata entry on
# the workunit (with a warning), but not fail.
@dataclass(frozen=True)
class Output(EngineAwareReturnType):
val: int
def metadata(self):
return {10: "foo", "other_key": "other value"}
@rule(desc="a_rule")
def a_rule(n: int) -> Output:
return Output(val=n)
rules = [a_rule, QueryRule(Output, (int,))]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.TRACE,
)
with handler.session():
scheduler.product_request(Output, subjects=[0])
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
workunit = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.a_rule"
)
assert workunit["metadata"] == {}
@dataclass(frozen=True)
class ComplicatedInput:
snapshot_1: Snapshot
snapshot_2: Snapshot
@dataclass(frozen=True)
class Output(EngineAwareReturnType):
snapshot_1: Snapshot
snapshot_2: Snapshot
def artifacts(self):
return {"snapshot_1": self.snapshot_1, "snapshot_2": self.snapshot_2}
@rule(desc="a_rule")
def a_rule(input: ComplicatedInput) -> Output:
return Output(snapshot_1=input.snapshot_1, snapshot_2=input.snapshot_2)
class MoreComplicatedEngineAware(TestBase):
@classmethod
def rules(cls):
return (
*super().rules(),
a_rule,
QueryRule(Output, (ComplicatedInput,)),
)
def test_more_complicated_engine_aware(self) -> None:
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
self.scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.TRACE,
)
with handler.session():
input_1 = CreateDigest(
(
FileContent(path="a.txt", content=b"alpha"),
FileContent(path="b.txt", content=b"beta"),
)
)
digest_1 = self.request(Digest, [input_1])
snapshot_1 = self.request(Snapshot, [digest_1])
input_2 = CreateDigest((FileContent(path="g.txt", content=b"gamma"),))
digest_2 = self.request(Digest, [input_2])
snapshot_2 = self.request(Snapshot, [digest_2])
input = ComplicatedInput(snapshot_1=snapshot_1, snapshot_2=snapshot_2)
self.request(Output, [input])
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
workunit = next(
item for item in finished if item["name"] == "pants.engine.internals.engine_test.a_rule"
)
streaming_workunit_context = handler._context
artifacts = workunit["artifacts"]
output_snapshot_1 = artifacts["snapshot_1"]
output_snapshot_2 = artifacts["snapshot_2"]
output_contents_list = streaming_workunit_context.snapshots_to_file_contents(
[output_snapshot_1, output_snapshot_2]
)
assert len(output_contents_list) == 2
assert isinstance(output_contents_list[0], DigestContents)
assert isinstance(output_contents_list[1], DigestContents)
digest_contents_1 = output_contents_list[0]
digest_contents_2 = output_contents_list[1]
assert len(tuple(x for x in digest_contents_1 if x.content == b"alpha")) == 1
assert len(tuple(x for x in digest_contents_1 if x.content == b"beta")) == 1
assert len(tuple(x for x in digest_contents_2 if x.content == b"gamma")) == 1
class StreamingWorkunitProcessTests(TestBase):
additional_options = ["--no-process-execution-use-local-cache"]
@classmethod
def rules(cls):
return [*super().rules(), QueryRule(ProcessResult, (Process,))]
def test_process_digests_on_workunits(self):
scheduler = self.scheduler
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.INFO,
)
stdout_process = Process(
argv=("/bin/bash", "-c", "/bin/echo 'stdout output'"), description="Stdout process"
)
with handler.session():
result = self.request(ProcessResult, [stdout_process])
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
process_workunit = next(
item for item in finished if item["name"] == "multi_platform_process-running"
)
assert process_workunit is not None
stdout_digest = process_workunit["artifacts"]["stdout_digest"]
stderr_digest = process_workunit["artifacts"]["stderr_digest"]
assert result.stdout == b"stdout output\n"
assert stderr_digest == EMPTY_FILE_DIGEST
assert stdout_digest.serialized_bytes_length == len(result.stdout)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
self._scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.INFO,
)
stderr_process = Process(
argv=("/bin/bash", "-c", "1>&2 /bin/echo 'stderr output'"), description="Stderr process"
)
with handler.session():
result = self.request(ProcessResult, [stderr_process])
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
process_workunit = next(
item for item in finished if item["name"] == "multi_platform_process-running"
)
assert process_workunit is not None
stdout_digest = process_workunit["artifacts"]["stdout_digest"]
stderr_digest = process_workunit["artifacts"]["stderr_digest"]
assert result.stderr == b"stderr output\n"
assert stdout_digest == EMPTY_FILE_DIGEST
assert stderr_digest.serialized_bytes_length == len(result.stderr)
try:
self._scheduler.ensure_remote_has_recursive([stdout_digest, stderr_digest])
except Exception as e:
# This is the exception message we should expect from invoking ensure_remote_has_recursive()
# in rust.
assert str(e) == "Cannot ensure remote has blobs without a remote"
byte_outputs = self._scheduler.single_file_digests_to_bytes([stdout_digest, stderr_digest])
assert byte_outputs[0] == result.stdout
assert byte_outputs[1] == result.stderr
def test_context_object(self):
scheduler = self.scheduler
def callback(**kwargs) -> None:
context = kwargs["context"]
assert isinstance(context, StreamingWorkunitContext)
completed_workunits = kwargs["completed_workunits"]
for workunit in completed_workunits:
if "artifacts" in workunit and "stdout_digest" in workunit["artifacts"]:
digest = workunit["artifacts"]["stdout_digest"]
output = context.single_file_digests_to_bytes([digest])
assert output == (b"stdout output\n",)
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[callback],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.INFO,
)
stdout_process = Process(
argv=("/bin/bash", "-c", "/bin/echo 'stdout output'"), description="Stdout process"
)
with handler.session():
self.request(ProcessResult, [stdout_process])
| true
| true
|
1c42cf6f42fc0141cc2213b526517dda985712bd
| 15,828
|
py
|
Python
|
Cogs/Spotify2.py
|
kazoeru/Acinonyx-v3
|
6d202ee22179567b132010aeec34d51cd316913c
|
[
"MIT"
] | null | null | null |
Cogs/Spotify2.py
|
kazoeru/Acinonyx-v3
|
6d202ee22179567b132010aeec34d51cd316913c
|
[
"MIT"
] | null | null | null |
Cogs/Spotify2.py
|
kazoeru/Acinonyx-v3
|
6d202ee22179567b132010aeec34d51cd316913c
|
[
"MIT"
] | null | null | null |
import discord
import spotipy
import os
import dropbox
import ffmpeg
import re
import subprocess
import asyncio
import json
from discord.ext import commands
from Cogs import Settings
from savify import Savify
from savify.types import Type, Format, Quality
from spotipy.oauth2 import SpotifyClientCredentials
from pathlib import Path
help_spotdl = """Command ini membutuhkan nama penyanyi dan judul lagu setelah command.
**Contoh / Example**
`acx music Neffex - gratefull`
"""
processing_file = """Memproses file musik dari server Acinonyx
• [DISCLAIMER](https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/SPOTIFY-DOWNLOADER-DISCLAIMER)
• [HOW THIS BOT IS WORK?](https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/HOW-SPOTIFY-DOWNLOADER-IS-WORK%3F)"""
uploading_file = """Mengunggah file musik ke discord, tunggu sebentar...
• [DISCLAIMER](https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/SPOTIFY-DOWNLOADER-DISCLAIMER)
• [HOW THIS BOT IS WORK?](https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/HOW-SPOTIFY-DOWNLOADER-IS-WORK%3F)"""
upload_dropbox = """File melebihi batas server discord 8MB, memulai upload ke dropbox tunggu sebentar
• [DISCLAIMER](https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/SPOTIFY-DOWNLOADER-DISCLAIMER)
• [HOW THIS BOT IS WORK?](https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/HOW-SPOTIFY-DOWNLOADER-IS-WORK%3F)"""
upload_complete = """File musik telah di upload ke dropbox silahkan klik link dibawah ini, file akan dihapus dalam 7 menit
"""
def setup(bot):
try:
settings = bot.get_cog("Settings")
except:
settings = None
bot.add_cog(Spotify2(bot, settings))
class Spotify2(commands.Cog):
def __init__(self, bot, settings):
self.preloads = ("Cogs.Settings")
self.bot = bot
self.settings = settings
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
@commands.command(pass_context=True)
async def music(self, ctx, *, music = None):
with open ("/home/nvstar/Corp-ina.py/Settings.json") as settingsJson:
data = json.load(settingsJson)
Freemium = data["Servers"]["440765395172065280"]["Members"]
user = "{}".format(ctx.author.id)
isOwner = self.settings.isOwner(ctx.author)
#if isOwner == None:
# return
#elif isOwner == False:
# msgText = "Command ini sedang dalam tahap pengembangan"
# em = discord.Embed(color = 0XFF8C00, description = msgText)
# await ctx.channel.send(embed = em)
# return
if user not in Freemium:
msg = "Ini adalah fitur ***PREMIUM***\n"
msg += "Dengan bergabung server kami, kamu dapat menggunakan fitur ini\n\n"
msg += "**[Klik Disini](https://discord.gg/vMcMe8f)** untuk bergabung dengan kami"
em = discord.Embed(color = 0XFF8C00,
description = msg)
em.set_author(name = "PREMION ONLY")
em.set_footer(text = "{}".format(ctx.author), icon_url= "{}".format(ctx.author.avatar_url))
return await ctx.send(embed = em)
if music == None:
em = discord.Embed(title = "<a:exclamation:750557709107068939>**COMMAND FAILURE**<a:exclamation:750557709107068939>",
color = 0XFF8C00,
description = help_spotdl)
em.set_thumbnail(url = "https://cdn.discordapp.com/attachments/518118753226063887/725569194304733435/photo.jpg")
em.set_footer(text = f"Request by : {ctx.author.name}", icon_url = f"{ctx.author.avatar_url}")
return await ctx.send(embed = em)
sp = spotipy.Spotify(auth_manager = SpotifyClientCredentials(client_id = "690da446e39b44a7baf8deaff12be418",
client_secret = "782ddebbf58846f1a1d70a074d62ce1a"))
results = sp.search(q = f'{music}', limit = 1)
for idx, track in enumerate(results['tracks']['items']):
artist_name = track['artists'][0]['name']
album_info = track['album']
album_images = album_info['images'][0]
album_images_url = album_images['url']
album_artist = album_info['artists'][0]
album_artist_name = album_artist['name']
external_urls = track['external_urls']
#external_urls_json = json.loads(external_urls)
track_name = track['name']
spotify_urls = external_urls['spotify']
#embed dan hasil output
em = discord.Embed(title = None,
color = 0XFF8C00,
description = f"> [{album_artist_name} - {track_name}]({spotify_urls})\n> \n> • [DISCLAIMER](https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/SPOTIFY-DOWNLOADER-DISCLAIMER)\n> • [HOW THIS BOT IS WORK?](https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/HOW-SPOTIFY-DOWNLOADER-IS-WORK%3F)\n> \n> <a:acx_mp3:744868331382767617> **.MP3 Format**")
em.set_author(name = "Spotify downloader",
url = "https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/SPOTIFY-DOWNLOADER-DISCLAIMER",
icon_url = "https://cdn.discordapp.com/attachments/726031951101689897/739778620658155602/spotify-logo-png-7061.png")
em.set_thumbnail(url = album_images_url)
em.set_footer(text = "Request by : {}".format(ctx.message.author.name),
icon_url = ctx.message.author.avatar_url)
msg = await ctx.send(embed = em, delete_after = 15)
await msg.add_reaction('<a:acx_mp3:744868331382767617>')
while True:
reaction, user = await self.bot.wait_for(event='reaction_add',)
if user == ctx.author:
emoji = str(reaction.emoji)
if emoji == '<a:acx_mp3:744868331382767617>':
await msg.delete()
s = Savify(api_credentials=("690da446e39b44a7baf8deaff12be418","782ddebbf58846f1a1d70a074d62ce1a"),
quality = Quality.BEST,
download_format = Format.MP3,
group='{}'.format(ctx.author.id),
output_path=Path('/home/nvstar/Corp-ina.py/Temp'))
em = discord.Embed(title = None,
color = 0XFF8C00,
description = f"[{album_artist_name} - {track_name}]({spotify_urls})\n" + processing_file)
em.set_author(name = "Spotify downloader",
url = "https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/SPOTIFY-DOWNLOADER-DISCLAIMER",
icon_url = "https://cdn.discordapp.com/attachments/726031951101689897/739778620658155602/spotify-logo-png-7061.png")
em.set_thumbnail(url = album_images_url)
em.set_footer(text = f"{ctx.author}",
icon_url = ctx.message.author.avatar_url)
dld = await ctx.send(embed = em)
musicDownload = s.download("{}".format(spotify_urls))
checkServer = ctx.guild.premium_tier
if checkServer > 1:
em = discord.Embed(title = None,
color = 0XFF8C00 ,
description = f"[{album_artist_name} - {track_name}]({spotify_urls})\n" + uploading_file)
em.set_author(name = "Spotify downloader",
url = "https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/SPOTIFY-DOWNLOADER-DISCLAIMER",
icon_url = "https://cdn.discordapp.com/attachments/726031951101689897/739778620658155602/spotify-logo-png-7061.png")
em.set_thumbnail(url = album_images_url)
em.set_footer(text = f"{ctx.author}",
icon_url = ctx.message.author.avatar_url)
await dld.edit(embed = em)
await ctx.send(file = discord.File(f"/home/nvstar/Corp-ina.py/Temp/{ctx.author.id}/{artist_name} - {track_name}.mp3"))
botKernel_DeleteFile = subprocess.Popen(["rm", "-rf", f"/home/nvstar/Corp-ina.py/Temp/{ctx.author.id}/{artist_name} - {track_name}.mp3"], stdout = subprocess.PIPE).communicate()[0]
await dld.delete()
await ctx.send(f"{ctx.author.mention} :arrow_up:")
checkFile = os.path.getsize(f"/home/nvstar/Corp-ina.py/Temp/{ctx.author.id}/{artist_name} - {track_name}.mp3")
if checkFile > 8000000:
await dld.delete()
em = discord.Embed(
title = "<a:exclamation:750557709107068939>**EXCEED THE LIMIT**<a:exclamation:750557709107068939>",
color = 0XFF8C00,
description = f"[{album_artist_name} - {track_name}]({spotify_urls})\n\n" + upload_dropbox)
em.set_thumbnail(url = album_images_url)
em.set_footer(text = f"{ctx.author}",
icon_url = f"{ctx.author.avatar_url}")
msg2 = await ctx.send(embed = em)
# MEMULAI UPLOAD DROPBOX
dropbox_access_token = "INmLpmjvCLQAAAAAAAAAAa--h2Jb571-pTJ_UHPdqp3XoMC0KJuSekPufnCI-a2y"
computer_path = '/home/nvstar/Corp-ina.py/Temp/{}/{} - {}.mp3'.format(ctx.author.id, artist_name, track_name)
dropbox_path = f"/Apps/Acinonyc music file/{album_artist_name} - {track_name}.mp3"
client = dropbox.Dropbox(dropbox_access_token)
print("[SUCCESS] dropbox account linked")
client.files_upload(open(computer_path, "rb").read(), dropbox_path, mode = dropbox.files.WriteMode("overwrite"))
print("[UPLOADED] {}".format(computer_path))
d = dropbox.Dropbox(dropbox_access_token)
target = dropbox_path
link_dropbox = d.sharing_create_shared_link(target)
dl_link = re.sub(r"\?dl\=0", "?dl=1", str(link_dropbox.url))
botKernel_DeleteFile = subprocess.Popen(["rm", "-rf", '/home/nvstar/Corp-ina.py/Temp/{}/{} - {}.mp3'.format(ctx.author.id, artist_name, track_name)], stdout = subprocess.PIPE).communicate()[0]
#EMBED FILE SELESAI UPLOAD
em = discord.Embed(
title = None,
color = 0XFF8C00,
description = f"{upload_complete}\n**[DOWNLOAD HERE]({dl_link})**")
em.set_author(name = "Spotify downloader",
url = "https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/SPOTIFY-DOWNLOADER-DISCLAIMER",
icon_url = "https://cdn.discordapp.com/attachments/726031951101689897/739778620658155602/spotify-logo-png-7061.png")
em.set_thumbnail(url = album_images_url)
em.set_footer(text = f"{ctx.author.name}", icon_url = f"{ctx.author.avatar_url}")
await msg2.delete()
msg3 = await ctx.send(embed = em)
#DELETE FILES
await asyncio.sleep(420)
dropbox_delete = d.files_delete(dropbox_path)
await msg3.delete()
em2 = discord.Embed(title = None,
color = 0XFF8C00 ,
description = f"[{album_artist_name} - {track_name}]({spotify_urls})\n" + uploading_file)
em2.set_author(name = "Spotify downloader",
url = "https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/SPOTIFY-DOWNLOADER-DISCLAIMER",
icon_url = "https://cdn.discordapp.com/attachments/726031951101689897/739778620658155602/spotify-logo-png-7061.png")
em2.set_thumbnail(url = album_images_url)
em2.set_footer(text = f"{ctx.author}",
icon_url = ctx.message.author.avatar_url)
await dld.edit(embed = em2)
await ctx.send(file = discord.File('/home/nvstar/Corp-ina.py/Temp/{}/{} - {}.mp3'.format(ctx.author.id, artist_name, track_name)))
botKernel_DeleteFile = subprocess.Popen(["rm", "-rf", '/home/nvstar/Corp-ina.py/Temp/{}/{} - {}.mp3'.format(ctx.author.id, artist_name, track_name)], stdout = subprocess.PIPE).communicate()[0]
await dld.delete()
await ctx.send(f"{ctx.author.mention} :arrow_up:")
# if self.bot.user != user:
# await msg.remove_reaction()
@commands.command(pass_context=True)
async def printspot(self, ctx, *, music = None):
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
return
elif isOwner == False:
msgText = "Command ini sedang dalam tahap pengembangan"
em = discord.Embed(color = 0XFF8C00, description = msgText)
await ctx.channel.send(msg)
return
sp = spotipy.Spotify(auth_manager = SpotifyClientCredentials(client_id = "690da446e39b44a7baf8deaff12be418",
client_secret = "782ddebbf58846f1a1d70a074d62ce1a"))
results = sp.search(q = f'{music}', limit = 1)
for idx, track in enumerate(results['tracks']['items']):
artist_name = track['artists'][0]['name']
album_info = track['album']
album_images = album_info['images'][0]
album_images_url = album_images['url']
album_artist = album_info['artists'][0]
album_artist_name = album_artist['name']
external_urls = track['external_urls']
#external_urls_json = json.loads(external_urls)
track_name = track['name']
spotify_urls = external_urls['spotify']
await ctx.send("```{}```".format(album_info))
| 63.312
| 383
| 0.529378
|
import discord
import spotipy
import os
import dropbox
import ffmpeg
import re
import subprocess
import asyncio
import json
from discord.ext import commands
from Cogs import Settings
from savify import Savify
from savify.types import Type, Format, Quality
from spotipy.oauth2 import SpotifyClientCredentials
from pathlib import Path
help_spotdl = """Command ini membutuhkan nama penyanyi dan judul lagu setelah command.
**Contoh / Example**
`acx music Neffex - gratefull`
"""
processing_file = """Memproses file musik dari server Acinonyx
• [DISCLAIMER](https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/SPOTIFY-DOWNLOADER-DISCLAIMER)
• [HOW THIS BOT IS WORK?](https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/HOW-SPOTIFY-DOWNLOADER-IS-WORK%3F)"""
uploading_file = """Mengunggah file musik ke discord, tunggu sebentar...
• [DISCLAIMER](https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/SPOTIFY-DOWNLOADER-DISCLAIMER)
• [HOW THIS BOT IS WORK?](https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/HOW-SPOTIFY-DOWNLOADER-IS-WORK%3F)"""
upload_dropbox = """File melebihi batas server discord 8MB, memulai upload ke dropbox tunggu sebentar
• [DISCLAIMER](https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/SPOTIFY-DOWNLOADER-DISCLAIMER)
• [HOW THIS BOT IS WORK?](https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/HOW-SPOTIFY-DOWNLOADER-IS-WORK%3F)"""
upload_complete = """File musik telah di upload ke dropbox silahkan klik link dibawah ini, file akan dihapus dalam 7 menit
"""
def setup(bot):
try:
settings = bot.get_cog("Settings")
except:
settings = None
bot.add_cog(Spotify2(bot, settings))
class Spotify2(commands.Cog):
def __init__(self, bot, settings):
self.preloads = ("Cogs.Settings")
self.bot = bot
self.settings = settings
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
@commands.command(pass_context=True)
async def music(self, ctx, *, music = None):
with open ("/home/nvstar/Corp-ina.py/Settings.json") as settingsJson:
data = json.load(settingsJson)
Freemium = data["Servers"]["440765395172065280"]["Members"]
user = "{}".format(ctx.author.id)
isOwner = self.settings.isOwner(ctx.author)
if user not in Freemium:
msg = "Ini adalah fitur ***PREMIUM***\n"
msg += "Dengan bergabung server kami, kamu dapat menggunakan fitur ini\n\n"
msg += "**[Klik Disini](https://discord.gg/vMcMe8f)** untuk bergabung dengan kami"
em = discord.Embed(color = 0XFF8C00,
description = msg)
em.set_author(name = "PREMION ONLY")
em.set_footer(text = "{}".format(ctx.author), icon_url= "{}".format(ctx.author.avatar_url))
return await ctx.send(embed = em)
if music == None:
em = discord.Embed(title = "<a:exclamation:750557709107068939>**COMMAND FAILURE**<a:exclamation:750557709107068939>",
color = 0XFF8C00,
description = help_spotdl)
em.set_thumbnail(url = "https://cdn.discordapp.com/attachments/518118753226063887/725569194304733435/photo.jpg")
em.set_footer(text = f"Request by : {ctx.author.name}", icon_url = f"{ctx.author.avatar_url}")
return await ctx.send(embed = em)
sp = spotipy.Spotify(auth_manager = SpotifyClientCredentials(client_id = "690da446e39b44a7baf8deaff12be418",
client_secret = "782ddebbf58846f1a1d70a074d62ce1a"))
results = sp.search(q = f'{music}', limit = 1)
for idx, track in enumerate(results['tracks']['items']):
artist_name = track['artists'][0]['name']
album_info = track['album']
album_images = album_info['images'][0]
album_images_url = album_images['url']
album_artist = album_info['artists'][0]
album_artist_name = album_artist['name']
external_urls = track['external_urls']
track_name = track['name']
spotify_urls = external_urls['spotify']
em = discord.Embed(title = None,
color = 0XFF8C00,
description = f"> [{album_artist_name} - {track_name}]({spotify_urls})\n> \n> • [DISCLAIMER](https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/SPOTIFY-DOWNLOADER-DISCLAIMER)\n> • [HOW THIS BOT IS WORK?](https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/HOW-SPOTIFY-DOWNLOADER-IS-WORK%3F)\n> \n> <a:acx_mp3:744868331382767617> **.MP3 Format**")
em.set_author(name = "Spotify downloader",
url = "https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/SPOTIFY-DOWNLOADER-DISCLAIMER",
icon_url = "https://cdn.discordapp.com/attachments/726031951101689897/739778620658155602/spotify-logo-png-7061.png")
em.set_thumbnail(url = album_images_url)
em.set_footer(text = "Request by : {}".format(ctx.message.author.name),
icon_url = ctx.message.author.avatar_url)
msg = await ctx.send(embed = em, delete_after = 15)
await msg.add_reaction('<a:acx_mp3:744868331382767617>')
while True:
reaction, user = await self.bot.wait_for(event='reaction_add',)
if user == ctx.author:
emoji = str(reaction.emoji)
if emoji == '<a:acx_mp3:744868331382767617>':
await msg.delete()
s = Savify(api_credentials=("690da446e39b44a7baf8deaff12be418","782ddebbf58846f1a1d70a074d62ce1a"),
quality = Quality.BEST,
download_format = Format.MP3,
group='{}'.format(ctx.author.id),
output_path=Path('/home/nvstar/Corp-ina.py/Temp'))
em = discord.Embed(title = None,
color = 0XFF8C00,
description = f"[{album_artist_name} - {track_name}]({spotify_urls})\n" + processing_file)
em.set_author(name = "Spotify downloader",
url = "https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/SPOTIFY-DOWNLOADER-DISCLAIMER",
icon_url = "https://cdn.discordapp.com/attachments/726031951101689897/739778620658155602/spotify-logo-png-7061.png")
em.set_thumbnail(url = album_images_url)
em.set_footer(text = f"{ctx.author}",
icon_url = ctx.message.author.avatar_url)
dld = await ctx.send(embed = em)
musicDownload = s.download("{}".format(spotify_urls))
checkServer = ctx.guild.premium_tier
if checkServer > 1:
em = discord.Embed(title = None,
color = 0XFF8C00 ,
description = f"[{album_artist_name} - {track_name}]({spotify_urls})\n" + uploading_file)
em.set_author(name = "Spotify downloader",
url = "https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/SPOTIFY-DOWNLOADER-DISCLAIMER",
icon_url = "https://cdn.discordapp.com/attachments/726031951101689897/739778620658155602/spotify-logo-png-7061.png")
em.set_thumbnail(url = album_images_url)
em.set_footer(text = f"{ctx.author}",
icon_url = ctx.message.author.avatar_url)
await dld.edit(embed = em)
await ctx.send(file = discord.File(f"/home/nvstar/Corp-ina.py/Temp/{ctx.author.id}/{artist_name} - {track_name}.mp3"))
botKernel_DeleteFile = subprocess.Popen(["rm", "-rf", f"/home/nvstar/Corp-ina.py/Temp/{ctx.author.id}/{artist_name} - {track_name}.mp3"], stdout = subprocess.PIPE).communicate()[0]
await dld.delete()
await ctx.send(f"{ctx.author.mention} :arrow_up:")
checkFile = os.path.getsize(f"/home/nvstar/Corp-ina.py/Temp/{ctx.author.id}/{artist_name} - {track_name}.mp3")
if checkFile > 8000000:
await dld.delete()
em = discord.Embed(
title = "<a:exclamation:750557709107068939>**EXCEED THE LIMIT**<a:exclamation:750557709107068939>",
color = 0XFF8C00,
description = f"[{album_artist_name} - {track_name}]({spotify_urls})\n\n" + upload_dropbox)
em.set_thumbnail(url = album_images_url)
em.set_footer(text = f"{ctx.author}",
icon_url = f"{ctx.author.avatar_url}")
msg2 = await ctx.send(embed = em)
dropbox_access_token = "INmLpmjvCLQAAAAAAAAAAa--h2Jb571-pTJ_UHPdqp3XoMC0KJuSekPufnCI-a2y"
computer_path = '/home/nvstar/Corp-ina.py/Temp/{}/{} - {}.mp3'.format(ctx.author.id, artist_name, track_name)
dropbox_path = f"/Apps/Acinonyc music file/{album_artist_name} - {track_name}.mp3"
client = dropbox.Dropbox(dropbox_access_token)
print("[SUCCESS] dropbox account linked")
client.files_upload(open(computer_path, "rb").read(), dropbox_path, mode = dropbox.files.WriteMode("overwrite"))
print("[UPLOADED] {}".format(computer_path))
d = dropbox.Dropbox(dropbox_access_token)
target = dropbox_path
link_dropbox = d.sharing_create_shared_link(target)
dl_link = re.sub(r"\?dl\=0", "?dl=1", str(link_dropbox.url))
botKernel_DeleteFile = subprocess.Popen(["rm", "-rf", '/home/nvstar/Corp-ina.py/Temp/{}/{} - {}.mp3'.format(ctx.author.id, artist_name, track_name)], stdout = subprocess.PIPE).communicate()[0]
em = discord.Embed(
title = None,
color = 0XFF8C00,
description = f"{upload_complete}\n**[DOWNLOAD HERE]({dl_link})**")
em.set_author(name = "Spotify downloader",
url = "https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/SPOTIFY-DOWNLOADER-DISCLAIMER",
icon_url = "https://cdn.discordapp.com/attachments/726031951101689897/739778620658155602/spotify-logo-png-7061.png")
em.set_thumbnail(url = album_images_url)
em.set_footer(text = f"{ctx.author.name}", icon_url = f"{ctx.author.avatar_url}")
await msg2.delete()
msg3 = await ctx.send(embed = em)
await asyncio.sleep(420)
dropbox_delete = d.files_delete(dropbox_path)
await msg3.delete()
em2 = discord.Embed(title = None,
color = 0XFF8C00 ,
description = f"[{album_artist_name} - {track_name}]({spotify_urls})\n" + uploading_file)
em2.set_author(name = "Spotify downloader",
url = "https://github.com/acinonyx-esports/Acinonyx-Bot/wiki/SPOTIFY-DOWNLOADER-DISCLAIMER",
icon_url = "https://cdn.discordapp.com/attachments/726031951101689897/739778620658155602/spotify-logo-png-7061.png")
em2.set_thumbnail(url = album_images_url)
em2.set_footer(text = f"{ctx.author}",
icon_url = ctx.message.author.avatar_url)
await dld.edit(embed = em2)
await ctx.send(file = discord.File('/home/nvstar/Corp-ina.py/Temp/{}/{} - {}.mp3'.format(ctx.author.id, artist_name, track_name)))
botKernel_DeleteFile = subprocess.Popen(["rm", "-rf", '/home/nvstar/Corp-ina.py/Temp/{}/{} - {}.mp3'.format(ctx.author.id, artist_name, track_name)], stdout = subprocess.PIPE).communicate()[0]
await dld.delete()
await ctx.send(f"{ctx.author.mention} :arrow_up:")
@commands.command(pass_context=True)
async def printspot(self, ctx, *, music = None):
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
return
elif isOwner == False:
msgText = "Command ini sedang dalam tahap pengembangan"
em = discord.Embed(color = 0XFF8C00, description = msgText)
await ctx.channel.send(msg)
return
sp = spotipy.Spotify(auth_manager = SpotifyClientCredentials(client_id = "690da446e39b44a7baf8deaff12be418",
client_secret = "782ddebbf58846f1a1d70a074d62ce1a"))
results = sp.search(q = f'{music}', limit = 1)
for idx, track in enumerate(results['tracks']['items']):
artist_name = track['artists'][0]['name']
album_info = track['album']
album_images = album_info['images'][0]
album_images_url = album_images['url']
album_artist = album_info['artists'][0]
album_artist_name = album_artist['name']
external_urls = track['external_urls']
track_name = track['name']
spotify_urls = external_urls['spotify']
await ctx.send("```{}```".format(album_info))
| true
| true
|
1c42cf9f6ce6702159b65b786d05168c119ecdd8
| 10,139
|
py
|
Python
|
triangle.py
|
luigialberti/pytriangle
|
99ecafc299a692ef0f33e262bc7a1c912d3aa694
|
[
"MIT"
] | 8
|
2016-09-16T08:55:39.000Z
|
2020-02-07T09:49:59.000Z
|
triangle.py
|
luigialberti/pytriangle
|
99ecafc299a692ef0f33e262bc7a1c912d3aa694
|
[
"MIT"
] | 5
|
2015-11-15T15:34:37.000Z
|
2021-08-31T10:17:13.000Z
|
triangle.py
|
luigialberti/pytriangle
|
99ecafc299a692ef0f33e262bc7a1c912d3aa694
|
[
"MIT"
] | 3
|
2016-01-18T15:07:43.000Z
|
2021-02-25T08:25:06.000Z
|
#!/usr/bin/env python
import triangulate
import sys
"""
Interface to the TRIANGLE program by Jonathan Richard Shewchuck
"""
class Triangle:
def __init__(self):
"""
Constructor
"""
# create handles to hold the
# triangulation structures
self.hndls = [triangulate.new(),]
self.h_vor = triangulate.new()
self.area = None
self.mode = ''
self.has_points = False
self.has_segmts = False
self.has_trgltd = False
def set_points(self, pts, markers=[]):
"""
Set the points
@param pts [(x, y),...]
@param markers [m, ...] where m is 1 on the outer boundary and 0 in the interior or internal boundary)
"""
if not markers:
# set all the markers to zero
mrks = [0 for i in range(len(pts))]
else:
npts = len(pts)
nmrk = len(markers)
if npts != nmrk:
print('%s: Warning. Incompatible size between marker and point lists len(pts)=%d != len(markers)=%d.' % \
(__file__, npts, nmrk))
n1 = min(npts, nmrk)
n2 = npts - nmrk
mrks = [markers[i] for i in range(n1)] + [0 for i in range(n2)]
else:
mrks = markers
triangulate.set_points(self.hndls[0], pts, mrks)
self.has_points = True
def set_segments(self, segs, markers=[]):
"""
Set the boundary contour.
@param segs [(p0, p1), ....] where p0 and p1 are point indices. The ordering is counterclockwise for an outer boundary
and clockwise for an internal boundary.
markers [m1,m2,...] optional markers to assign physical tags to segments
@note invoke this method after 'set_points'.
"""
if not markers:
# set all the markers to zero
mrks = [0 for i in range(len(segs))]
else:
nseg = len(segs)
nmrk = len(markers)
if nseg != nmrk:
print('%s: Warning. Incompatible size between marker and segment lists len(segs)=%d != len(markers)=%d.' % \
(__file__, nseg, nmrk))
n1 = min(nseg, nmrk)
n2 = nseg - nmrk
mrks = [markers[i] for i in range(n1)] + [0 for i in range(n2)]
else:
mrks = markers
triangulate.set_segments(self.hndls[0], segs, mrks)
self.has_sgmts = True
def set_holes(self, xy):
"""
Set the list of points in the holes.
@param xy [ (x0, y0), ... ] where (x0,y0) is a point inside a hole
"""
triangulate.set_holes(self.hndls[0], xy)
def set_regions(self, xy):
"""
Set the list of regions.
@param xy [ (x0, y0, r, a), ... ] where (x0,y0) is a point inside a region
r is the region attribute (tag)
a is the area constraint
"""
triangulate.set_regions(self.hndls[0], xy)
def set_point_attributes(self, att):
"""
Set the point attributes.
@param att [(a0,..), ...]
"""
triangulate.set_point_attributes(self.hndls[0], att)
def set_triangle_attributes(self, att):
"""
Set the triangle attributes.
@param att [(a0,..), ...]
"""
triangulate.set_triangle_attributes(self.hndls[1], att)
def triangulate(self, area=None, mode='pzq27eQ'):
"""
Perform an initial triangulation.
@param area is a max area constraint
@param mode a string of TRIANGLE switches. Refer to the TRIANGLE doc for more info about mode:
http://www.cs.cmu.edu/~quake/triangle.switch.html
@note invoke this after setting the boundary points, segments, and optionally hole positions.
"""
if not self.has_points and not self.has_segmts:
print('%s: Error. Must set points, segments, and optionally holes prior to calling "triangulate"' \
% (__file__))
return
# mode string
# z: start indexing with zero
# q<angle>: quality mesh
# e: edge
# p: planar straight line graph
# Q: quiet mode
self.mode = mode
if area:
self.area = area
mode += 'a%f'% area
if len(self.hndls) <= 1: self.hndls.append( triangulate.new() )
triangulate.triangulate(mode, self.hndls[0], self.hndls[1], self.h_vor)
self.has_trgltd = True
def get_num_points(self, level=-1):
"""
Get the number of nodes/points.
@param level refinement level (-1 for the last level). The coarsest level is 1.
@return number
"""
return triangulate.get_num_points(self.hndls[level])
def get_num_triangles(self, level=-1):
"""
Get the number of cells/triangles.
@param level refinement level (-1 for the last level). The coarsest level is 1
@return number
"""
return triangulate.get_num_triangles(self.hndls[level])
def refine(self, area_ratio=2.0):
"""
Refine the triangulation.
@param area_ratio represents the max triangle area reduction factor
@note should be called after performing an initial triangulation.
"""
if not self.has_trgltd:
print('%s: Error. Must triangulate prior to calling "refine"' \
% (__file__))
return
self.hndls.append( triangulate.new() )
mode = self.mode + 'cr'
if self.area:
self.area /= area_ratio
mode += 'a%f' % self.area
triangulate.triangulate(mode, self.hndls[-2],
self.hndls[-1], self.h_vor)
def get_points(self, level=-1):
"""
Get the points and their markers.
@param level refinement level (-1 for the last level). The coarsest level is 1. The level can be used
to retrieve previous triangulation refinements: level=-1 will retrieve the last,
level=-2 the previous one, etc.
@return [ [(x, y), marker], ...] where marker is 1 on the boundary and 0 inside. Here,
"""
return triangulate.get_points(self.hndls[level])
def get_edges(self, level=-1):
"""
Get the list of edges.
@param level refinement level (-1 for the last level). The coarsest level is 1. The level can be used
to retrieve previous triangulation refinements: level=-1 will retrieve the last,
level=-2 the previous one, etc.
@return [((p0, p1), m),..) where (p0, p1) are point indices and
m is the boundary marker (0=interior, 1=boundary)
"""
return triangulate.get_edges(self.hndls[level])
def get_triangles(self, level=-1):
"""
Get the list of triangles.
@param level refinement level (-1 for the last level). The coarsest level is 1. The level can be used
to retrieve previous triangulation refinements: level=-1 will retrieve the last,
level=-2 the previous one, etc.
@return [([p0, p1, p2,..], (k0,k1,k2), [a0,a1,..]),..] where p0, p1, p2,.. are the
point indices at the triangle corners, optionally followed by intermediate points (k0, k1, k2) and triangle cell attributes a1,a2..
"""
return triangulate.get_triangles(self.hndls[level])
def get_point_attributes(self, level=-1):
"""
Get the point attributes.
@param level refinement level (-1 for the last level). The coarsest level is 1. The level can be used
to retrieve previous triangulation refinements: level=-1 will retrieve the last,
level=-2 the previous one, etc.
@return [(a0,...), ....]
"""
return triangulate.get_point_attributes(self.hndls[level])
def get_triangle_attributes(self, level=-1):
"""
Get the triangle attributes.
@param level refinement level (-1 for the last level). The coarsest level is 1. The level can be used
to retrieve previous triangulation refinements: level=-1 will retrieve the last,
level=-2 the previous one, etc.
@return [(a0,...), ....]
"""
return triangulate.get_triangle_attributes(self.hndls[level])
# backward compatibility
get_num_nodes = get_num_points
set_nodes = set_points
get_nodes = get_points
set_attributes = set_point_attributes
get_attributes = get_point_attributes
# add some visualization capability to fast check the mesh
def plot_mesh(self,level=-1):
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
mesh = self.get_triangles(level)
points = self.get_points(level)
verts = []
codes = []
fig, ax = plt.subplots()
for _t in mesh:
#([n1, n2, n3], (), [regiona_tag])
p = _t[0]
x1,y1 = points[p[0]][0]
x2,y2 = points[p[1]][0]
x3,y3 = points[p[2]][0]
verts.append((x1, y1))
verts.append((x2, y2))
verts.append((x3, y3))
verts.append((x1, y1))
codes.append(Path.MOVETO)
codes.append(Path.LINETO)
codes.append(Path.LINETO)
codes.append(Path.CLOSEPOLY)
ax.text((x1+x2+x3)/3, (y1+y2+y3)/3, str(_t[2]))
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor='None', lw=1)
ax.add_patch(patch)
ax.margins(0.05)
ax.axis('equal')
return plt
| 30.356287
| 140
| 0.547687
|
import triangulate
import sys
class Triangle:
def __init__(self):
self.hndls = [triangulate.new(),]
self.h_vor = triangulate.new()
self.area = None
self.mode = ''
self.has_points = False
self.has_segmts = False
self.has_trgltd = False
def set_points(self, pts, markers=[]):
if not markers:
mrks = [0 for i in range(len(pts))]
else:
npts = len(pts)
nmrk = len(markers)
if npts != nmrk:
print('%s: Warning. Incompatible size between marker and point lists len(pts)=%d != len(markers)=%d.' % \
(__file__, npts, nmrk))
n1 = min(npts, nmrk)
n2 = npts - nmrk
mrks = [markers[i] for i in range(n1)] + [0 for i in range(n2)]
else:
mrks = markers
triangulate.set_points(self.hndls[0], pts, mrks)
self.has_points = True
def set_segments(self, segs, markers=[]):
if not markers:
mrks = [0 for i in range(len(segs))]
else:
nseg = len(segs)
nmrk = len(markers)
if nseg != nmrk:
print('%s: Warning. Incompatible size between marker and segment lists len(segs)=%d != len(markers)=%d.' % \
(__file__, nseg, nmrk))
n1 = min(nseg, nmrk)
n2 = nseg - nmrk
mrks = [markers[i] for i in range(n1)] + [0 for i in range(n2)]
else:
mrks = markers
triangulate.set_segments(self.hndls[0], segs, mrks)
self.has_sgmts = True
def set_holes(self, xy):
triangulate.set_holes(self.hndls[0], xy)
def set_regions(self, xy):
triangulate.set_regions(self.hndls[0], xy)
def set_point_attributes(self, att):
triangulate.set_point_attributes(self.hndls[0], att)
def set_triangle_attributes(self, att):
triangulate.set_triangle_attributes(self.hndls[1], att)
def triangulate(self, area=None, mode='pzq27eQ'):
if not self.has_points and not self.has_segmts:
print('%s: Error. Must set points, segments, and optionally holes prior to calling "triangulate"' \
% (__file__))
return
self.mode = mode
if area:
self.area = area
mode += 'a%f'% area
if len(self.hndls) <= 1: self.hndls.append( triangulate.new() )
triangulate.triangulate(mode, self.hndls[0], self.hndls[1], self.h_vor)
self.has_trgltd = True
def get_num_points(self, level=-1):
return triangulate.get_num_points(self.hndls[level])
def get_num_triangles(self, level=-1):
return triangulate.get_num_triangles(self.hndls[level])
def refine(self, area_ratio=2.0):
if not self.has_trgltd:
print('%s: Error. Must triangulate prior to calling "refine"' \
% (__file__))
return
self.hndls.append( triangulate.new() )
mode = self.mode + 'cr'
if self.area:
self.area /= area_ratio
mode += 'a%f' % self.area
triangulate.triangulate(mode, self.hndls[-2],
self.hndls[-1], self.h_vor)
def get_points(self, level=-1):
return triangulate.get_points(self.hndls[level])
def get_edges(self, level=-1):
return triangulate.get_edges(self.hndls[level])
def get_triangles(self, level=-1):
return triangulate.get_triangles(self.hndls[level])
def get_point_attributes(self, level=-1):
return triangulate.get_point_attributes(self.hndls[level])
def get_triangle_attributes(self, level=-1):
return triangulate.get_triangle_attributes(self.hndls[level])
get_num_nodes = get_num_points
set_nodes = set_points
get_nodes = get_points
set_attributes = set_point_attributes
get_attributes = get_point_attributes
def plot_mesh(self,level=-1):
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
mesh = self.get_triangles(level)
points = self.get_points(level)
verts = []
codes = []
fig, ax = plt.subplots()
for _t in mesh:
p = _t[0]
x1,y1 = points[p[0]][0]
x2,y2 = points[p[1]][0]
x3,y3 = points[p[2]][0]
verts.append((x1, y1))
verts.append((x2, y2))
verts.append((x3, y3))
verts.append((x1, y1))
codes.append(Path.MOVETO)
codes.append(Path.LINETO)
codes.append(Path.LINETO)
codes.append(Path.CLOSEPOLY)
ax.text((x1+x2+x3)/3, (y1+y2+y3)/3, str(_t[2]))
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor='None', lw=1)
ax.add_patch(patch)
ax.margins(0.05)
ax.axis('equal')
return plt
| true
| true
|
1c42d0c6c4bef2adc42ca9de02a957245b99fc80
| 4,708
|
py
|
Python
|
script.module.exodus/lib/resources/lib/sources/ru/exfs.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | 1
|
2019-03-05T09:38:10.000Z
|
2019-03-05T09:38:10.000Z
|
script.module.exodus/lib/resources/lib/sources/ru/exfs.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | null | null | null |
script.module.exodus/lib/resources/lib/sources/ru/exfs.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | 1
|
2021-11-05T20:48:09.000Z
|
2021-11-05T20:48:09.000Z
|
# -*- coding: utf-8 -*-
"""
Exodus Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urlparse
from resources.lib.sources.ru.lib import moonwalk
from resources.lib.sources.ru.lib import utils
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
class source:
def __init__(self):
self.priority = 1
self.language = ['ru']
self.domains = ['ex-fs.net']
self.base_link = 'http://ex-fs.net'
self.search_link = '/engine/ajax/search.php'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return urllib.urlencode({'url': url}) if url else None
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = self.__search([localtvshowtitle] + source_utils.aliases_to_array(aliases), year)
if not url and tvshowtitle != localtvshowtitle: url = self.__search([tvshowtitle] + source_utils.aliases_to_array(aliases), year)
return urllib.urlencode({'url': url}) if url else None
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
data.update({'season': season, 'episode': episode})
return urllib.urlencode(data)
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
url = data.get('url')
season = data.get('season')
episode = data.get('episode')
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for link in r:
try:
urls = []
if 'moonwalk.cc' in link or 'ex-fs.net' in link: host = 'moonwalk'; direct = True; urls = moonwalk.moonwalk(link, url, season, episode)
for i in urls: sources.append({'source': host, 'quality': i['quality'], 'info': i.get('info', ''), 'language': 'ru', 'url': i['url'], 'direct': direct, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles, year):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': titles[0]}, XHR=True)
r = dom_parser.parse_dom(r, 'a', req='href')
r = [(i.attrs['href'], i.content.split('<br')[0]) for i in r]
r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1])) for i in r]
r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if i[2] else i[1], i[2][0][1] if i[2] else '0') for i in r]
r = [(i[0], re.sub(u'\(с \d+ по \d+ сезон\)', '', i[1]), i[2]) for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
| 38.276423
| 193
| 0.569456
|
import re
import urllib
import urlparse
from resources.lib.sources.ru.lib import moonwalk
from resources.lib.sources.ru.lib import utils
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
class source:
def __init__(self):
self.priority = 1
self.language = ['ru']
self.domains = ['ex-fs.net']
self.base_link = 'http://ex-fs.net'
self.search_link = '/engine/ajax/search.php'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return urllib.urlencode({'url': url}) if url else None
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = self.__search([localtvshowtitle] + source_utils.aliases_to_array(aliases), year)
if not url and tvshowtitle != localtvshowtitle: url = self.__search([tvshowtitle] + source_utils.aliases_to_array(aliases), year)
return urllib.urlencode({'url': url}) if url else None
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
data.update({'season': season, 'episode': episode})
return urllib.urlencode(data)
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
url = data.get('url')
season = data.get('season')
episode = data.get('episode')
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for link in r:
try:
urls = []
if 'moonwalk.cc' in link or 'ex-fs.net' in link: host = 'moonwalk'; direct = True; urls = moonwalk.moonwalk(link, url, season, episode)
for i in urls: sources.append({'source': host, 'quality': i['quality'], 'info': i.get('info', ''), 'language': 'ru', 'url': i['url'], 'direct': direct, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles, year):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': titles[0]}, XHR=True)
r = dom_parser.parse_dom(r, 'a', req='href')
r = [(i.attrs['href'], i.content.split('<br')[0]) for i in r]
r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1])) for i in r]
r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if i[2] else i[1], i[2][0][1] if i[2] else '0') for i in r]
r = [(i[0], re.sub(u'\(с \d+ по \d+ сезон\)', '', i[1]), i[2]) for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True)
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
| true
| true
|
1c42d104334a1108c87083bd4fcb174c411cc6dd
| 2,699
|
py
|
Python
|
Environnement/Environnement.py
|
OctThe16th/BetterTrainingDataMnist_RL_GAN
|
fcc75c9ddf768d7c66c9fade3e86973a4c828624
|
[
"MIT"
] | null | null | null |
Environnement/Environnement.py
|
OctThe16th/BetterTrainingDataMnist_RL_GAN
|
fcc75c9ddf768d7c66c9fade3e86973a4c828624
|
[
"MIT"
] | null | null | null |
Environnement/Environnement.py
|
OctThe16th/BetterTrainingDataMnist_RL_GAN
|
fcc75c9ddf768d7c66c9fade3e86973a4c828624
|
[
"MIT"
] | null | null | null |
import numpy as np
from lightgbm import LGBMClassifier
from keras.datasets import mnist
from sklearn.metrics import f1_score
import warnings
warnings.filterwarnings("ignore")
class Environnement:
def __init__(self, amount_per_class):
self.amount_per_class = amount_per_class
(self.x_train, self.y_train), (self.x_test, self.y_test) = mnist.load_data()
self.x_train = self.x_train.astype(np.float32)
self.x_test = self.x_test.astype(np.float32)
self.x_train -= 127.5
self.x_train /= 127.5
self.x_test -= 127.5
self.x_test /= 127.5
self.class_indexes = [np.where(self.y_train == i) for i in range(10)]
choices = np.array([np.random.choice(class_index[0], 1000)
for class_index in self.class_indexes]).flatten()
self.gbm_x_val = np.reshape(self.x_train[choices], (self.x_train[choices].shape[0], 28*28))
self.y_val = self.y_train[choices]
self.x_train = self.x_train[~choices]
self.y_train = self.y_train[~choices]
self.x_train = np.reshape(self.x_train, (self.x_train.shape[0], 28, 28, 1))
self.gbm_x_train = np.reshape(self.x_train, (self.x_train.shape[0], 28 * 28))
self.gbm_x_test = np.reshape(self.x_test, (self.x_test.shape[0], 28 * 28))
self.model = LGBMClassifier(objective='multiclass', num_class=10, n_jobs=1, min_child_samples=1,
min_child_weight=0, min_data_in_bin=1, verbosity=-1, verbose=-1)
self.class_indexes = [np.where(self.y_train == i) for i in range(10)]
def get_values(self, actions, targets):
actions = np.reshape(actions, (actions.shape[0], 28*28))
self.model.fit(actions, targets)
pred_val = self.model.predict(self.gbm_x_val)
pred_test = self.model.predict(self.gbm_x_test)
val = f1_score(y_true=self.y_val, y_pred=pred_val, average=None)
test = f1_score(y_true=self.y_test, y_pred=pred_test, average=None)
return val, test
def get_whole_training_set(self):
return self.x_train, self.y_train
def query_state(self):
choices = np.array([np.random.choice(class_index[0], self.amount_per_class)
for class_index in self.class_indexes]).flatten()
state = self.x_train[choices]
targets = self.y_train[choices]
self.model.fit(self.gbm_x_train[choices], self.y_train[choices])
pred = self.model.predict(self.gbm_x_val)
f1 = f1_score(y_true=self.y_val, y_pred=pred, average=None)
return state, f1, targets
if __name__ == '__main__':
env = Environnement(1)
print(env.query_state())
| 39.115942
| 104
| 0.653946
|
import numpy as np
from lightgbm import LGBMClassifier
from keras.datasets import mnist
from sklearn.metrics import f1_score
import warnings
warnings.filterwarnings("ignore")
class Environnement:
def __init__(self, amount_per_class):
self.amount_per_class = amount_per_class
(self.x_train, self.y_train), (self.x_test, self.y_test) = mnist.load_data()
self.x_train = self.x_train.astype(np.float32)
self.x_test = self.x_test.astype(np.float32)
self.x_train -= 127.5
self.x_train /= 127.5
self.x_test -= 127.5
self.x_test /= 127.5
self.class_indexes = [np.where(self.y_train == i) for i in range(10)]
choices = np.array([np.random.choice(class_index[0], 1000)
for class_index in self.class_indexes]).flatten()
self.gbm_x_val = np.reshape(self.x_train[choices], (self.x_train[choices].shape[0], 28*28))
self.y_val = self.y_train[choices]
self.x_train = self.x_train[~choices]
self.y_train = self.y_train[~choices]
self.x_train = np.reshape(self.x_train, (self.x_train.shape[0], 28, 28, 1))
self.gbm_x_train = np.reshape(self.x_train, (self.x_train.shape[0], 28 * 28))
self.gbm_x_test = np.reshape(self.x_test, (self.x_test.shape[0], 28 * 28))
self.model = LGBMClassifier(objective='multiclass', num_class=10, n_jobs=1, min_child_samples=1,
min_child_weight=0, min_data_in_bin=1, verbosity=-1, verbose=-1)
self.class_indexes = [np.where(self.y_train == i) for i in range(10)]
def get_values(self, actions, targets):
actions = np.reshape(actions, (actions.shape[0], 28*28))
self.model.fit(actions, targets)
pred_val = self.model.predict(self.gbm_x_val)
pred_test = self.model.predict(self.gbm_x_test)
val = f1_score(y_true=self.y_val, y_pred=pred_val, average=None)
test = f1_score(y_true=self.y_test, y_pred=pred_test, average=None)
return val, test
def get_whole_training_set(self):
return self.x_train, self.y_train
def query_state(self):
choices = np.array([np.random.choice(class_index[0], self.amount_per_class)
for class_index in self.class_indexes]).flatten()
state = self.x_train[choices]
targets = self.y_train[choices]
self.model.fit(self.gbm_x_train[choices], self.y_train[choices])
pred = self.model.predict(self.gbm_x_val)
f1 = f1_score(y_true=self.y_val, y_pred=pred, average=None)
return state, f1, targets
if __name__ == '__main__':
env = Environnement(1)
print(env.query_state())
| true
| true
|
1c42d1487d91f70af8fd59810e40bc48fec8d65b
| 57
|
py
|
Python
|
CodeWars/7 Kyu/Build a square.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/7 Kyu/Build a square.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/7 Kyu/Build a square.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
def generateShape(n):
return "\n".join(["+" * n] * n)
| 28.5
| 35
| 0.526316
|
def generateShape(n):
return "\n".join(["+" * n] * n)
| true
| true
|
1c42d191e50517487ce29edd00a0d3e85b40a9be
| 15,309
|
py
|
Python
|
RocketSimulation.py
|
pietrotrope/SolarSystemSimulation
|
905eec31eb73e1203ee23a32846954b30bbc5925
|
[
"MIT"
] | null | null | null |
RocketSimulation.py
|
pietrotrope/SolarSystemSimulation
|
905eec31eb73e1203ee23a32846954b30bbc5925
|
[
"MIT"
] | null | null | null |
RocketSimulation.py
|
pietrotrope/SolarSystemSimulation
|
905eec31eb73e1203ee23a32846954b30bbc5925
|
[
"MIT"
] | null | null | null |
import sys
import csv
import json
import math
import pygame
import numpy as np
from pygame.locals import *
import pandas as pd
from data import *
from agent import agentsList, Agent
global screenSize
screenSize = [1920, 1080]
def load_parameters(path):
package = []
file = open(path, 'r')
j = json.load(file)
for subgroup in j.values():
package.append([cast(x) for x in subgroup.values()])
env_variables = package.pop(4)
file.close()
return (package, env_variables)
def cast(x):
try:
return float(x)
except Exception:
return str(x)
class Environment:
def __init__(self, vars):
# Environmental Constants
self.elev, self.t, self.g, self.M_air, self.R, self.gamma, self.P_zero = vars # noqa
self.g_zero = self.g
self.Re = 6356766
# Layer base altitudes
self.hb = [0, 11000, 20000, 32000, 47000, 51000, 71000]
# Layer base pressures
self.Pb = [101325, 22632.1, 5474.89,
868.019, 110.906, 66.9389, 3.95642]
# Layer base temperatures
self.Tb = [288.15, 216.65, 216.65, 228.65, 270.65, 270.65, 214.65]
# Layer lapse rates
self.Lm = [-0.0065, 0.0, 0.001, 0.0028, 0.0, -0.0028, -0.002]
def get_geopotential_altitude(self, z: float) -> float:
return self.Re*z / (self.Re+z)
def atmo_heterosphere_equ(self, z: float, a, b, c, d, e):
z_km = z/1000
return math.exp(a * z_km**4 + b * z_km**3 + c * z_km**2 + d * z_km + e) # noqa
def get_gravity(self, z: float) -> float:
return self.g_zero * (self.Re / (self.Re + z))**2
def get_temp(self, z: float, h: float) -> float:
if h <= 84852:
for i in range(len(self.hb)-1):
if self.hb[i] <= h <= self.hb[i+1]:
return (self.Tb[i] + self.Lm[i]*(h-self.hb[i]), i)
return (self.Tb[i+1] + self.Lm[i+1]*(h-self.hb[i+1]), i+1)
elif 86000 < z <= 91000:
return (186.87, 7)
elif 91000 < z <= 110000:
if 91000 < z <= 100000:
layer = 8
elif 100000 < z <= 110000:
layer = 9
return (
263.1905 - 76.3232 * math.sqrt(1 - ((z - 91000) / -19942.9)**2), # noqa
layer
)
elif 110000 < z <= 120000:
return (240 + 0.012 * (z - 110000), 10)
elif 120000 < z <= 1000000:
if 120000 < z <= 150000:
layer = 11
elif 150000 < z <= 200000:
layer = 12
elif 200000 < z <= 300000:
layer = 13
elif 300000 < z <= 500000:
layer = 14
elif 500000 < z <= 750000:
layer = 15
elif 750000 < z <= 1000000:
layer = 16
xi = (z - 120000) * (6356766 + 120000) / (6356766 + z)
return (1000 - 640 * math.exp(-0.00001875 * xi), layer)
def get_pressure(self, z: float, h: float, T: float, b: int) -> float:
if b <= 6:
if self.Lm[b] != 0:
return self.Pb[b] * (self.Tb[b]/T)**(self.g_zero*self.M_air/(self.R*self.Lm[b])) # noqa
else:
return self.Pb[b] * math.exp(-self.g_zero * self.M_air * (h-self.hb[b]) / (self.R*self.Tb[b])) # noqa
elif b == 7:
return self.atmo_heterosphere_equ(
z, 0.000000, 2.159582e-6, -4.836957e-4, -0.1425192, 13.47530)
elif b == 8:
return self.atmo_heterosphere_equ(
z, 0.000000, 3.304895e-5, -0.009062730, 0.6516698, -11.03037)
elif b == 9:
return self.atmo_heterosphere_equ(
z, 0.000000, 6.693926e-5, -0.01945388, 1.719080, -47.75030)
elif b == 10:
return self.atmo_heterosphere_equ(
z, 0.000000, -6.539316e-5, 0.02485568, -3.223620, 135.9355)
elif b == 11:
return self.atmo_heterosphere_equ(
z, 2.283506e-7, -1.343221e-4, 0.02999016, -3.055446, 113.5764)
elif b == 12:
return self.atmo_heterosphere_equ(
z, 1.209434e-8, -9.692458e-6, 0.003002041, -0.4523015, 19.19151)
elif b == 13:
return self.atmo_heterosphere_equ(
z, 8.113942e-10, -9.822568e-7, 4.687616e-4, -0.1231710, 3.067409)
elif b == 14:
return self.atmo_heterosphere_equ(
z, 9.814674e-11, -1.654439e-7, 1.148115e-4, -0.05431334, -2.011365)
elif b == 15:
return self.atmo_heterosphere_equ(
z, -7.835161e-11, 1.964589e-7, -1.657213e-4, 0.04305869, -14.77132)
elif b == 16:
return self.atmo_heterosphere_equ(
z, 2.813255e-11, -1.120689e-7, 1.695568e-4, -0.1188941, 14.56718)
def get_density(self, z: float, P: float, T: float, b) -> float:
if b <= 6:
return (P * self.M_air)/(self.R * T)
elif b == 7:
return self.atmo_heterosphere_equ(
z, 0.000000, -3.322622E-06, 9.111460E-04, -0.2609971, 5.944694)
elif b == 8:
return self.atmo_heterosphere_equ(
z, 0.000000, 2.873405e-05, -0.008492037, 0.6541179, -23.62010)
elif b == 9:
return self.atmo_heterosphere_equ(
z, -1.240774e-05, 0.005162063, -0.8048342, 55.55996, -1443.338)
elif b == 10:
return self.atmo_heterosphere_equ(
z, 0.00000, -8.854164e-05, 0.03373254, -4.390837, 176.5294)
elif b == 11:
return self.atmo_heterosphere_equ(
z, 3.661771e-07, -2.154344e-04, 0.04809214, -4.884744, 172.3597)
elif b == 12:
return self.atmo_heterosphere_equ(
z, 1.906032e-08, -1.527799E-05, 0.004724294, -0.6992340, 20.50921)
elif b == 13:
return self.atmo_heterosphere_equ(
z, 1.199282e-09, -1.451051e-06, 6.910474e-04, -0.1736220, -5.321644)
elif b == 14:
return self.atmo_heterosphere_equ(
z, 1.140564e-10, -2.130756e-07, 1.570762e-04, -0.07029296, -12.89844)
elif b == 15:
return self.atmo_heterosphere_equ(
z, 8.105631e-12, -2.358417e-09, -2.635110e-06, -0.01562608, -20.02246)
elif b == 16:
return self.atmo_heterosphere_equ(
z, -3.701195e-12, -8.608611e-09, 5.118829e-05, -0.06600998, -6.137674)
def get_c(self, T: float) -> float:
return math.sqrt((self.gamma * self.R * T) / self.M_air)
def get_status(self, z: float):
h = round(self.get_geopotential_altitude(z), 0)
self.g = self.get_gravity(z)
self.T, b = self.get_temp(z, h)
self.P = self.get_pressure(z, h, self.T, b)
self.Rho = self.get_density(z, self.P, self.T, b)
self.c = self.get_c(self.T)
class System:
def __init__(self, params, env, burn_time: float):
package = params
print(package)
# Environment
self.env = env
# Burn time
self.num_steps = int(burn_time // self.env.t)
self.burn_time = self.num_steps * self.env.t
# Engine specs
self.etype = package[0][0]
package[0].pop(0)
if self.etype == "Liquid":
self.isp, self.thrust = package[0]
elif self.etype == "Solid":
self.isp, self.avg_thrust, path = package[0] # noqa
with(open(path)) as f:
csv_reader = csv.reader(f)
self.thrust_curve = {}
for row in csv_reader:
self.thrust_curve.update({
float(row[0]): float(row[1])
})
f.close()
# Fuel Specs
if self.etype == "Liquid":
self.OFratio, self.Reserve = package[1]
elif self.etype == "Solid":
self.OFratio = 0
self.Reserve = package[1][0]
# Flow Rate
if self.etype == "Liquid":
self.w = (self.thrust/self.env.g_zero)/self.isp
elif self.etype == "Solid":
self.w = (self.avg_thrust/self.env.g_zero)/self.isp
self.dF = self.w * (1 / (self.OFratio + 1))
self.dOx = (self.w - self.dF)
# Fuel & Oxidizer
self.F = (self.dF * self.burn_time)/(1 - self.Reserve/100)
self.Ox = (self.dOx * self.burn_time)/(1 - self.Reserve/100)
# Mass
self.dry_mass = package[2][0]
# Aerodynamics
self.Cd, self.cross_section = package[3]
# Output
self.csvout = package[4][0]
self.field_names = ["t", "thrust", "drag", "m", "v", "mach", "a", "altitude",
"asl", "twr", "max_v", "max_mach", "max_acc", "min_acc", "max_g", "min_g"]
with open(self.csvout, "w", newline="") as f:
csv_writer = csv.writer(f)
csv_writer.writerow(self.field_names)
f.close()
# Flight
def launch(self):
"""Runs a simulation within the given parameters."""
# Variables setup
self.t = 0
self.altitude = 0
self.asl = self.altitude + self.env.elev
self.calc_mass()
self.env.get_status(self.asl)
self.calc_thrust()
self.calc_twr()
self.drag = 0
self.v = 0
self.max_v = 0
self.mach = 0
self.max_mach = 0
self.max_acc = 0
self.max_g = 0
self.min_acc = 0
self.min_g = 0
self.a = 0
self.j = 0
self.s = 0
# Used by matplotlib
self.data = [[], [], [], [], [], [], [], [], [], [], []]
# Accelaration phase
for i in range(self.num_steps):
# Output management
self.add_data()
# Environment-related
self.update_env()
# Thrust-related
self.calc_thrust()
# Accelaration/derivative-related
self.calc_acc()
self.calc_additional_derivatives()
# Position-related
self.set_altitude()
# Velocity-related
self.calc_velocity()
# Force-related
self.calc_drag()
self.calc_twr()
# Mass-related
self.calc_propellant()
self.calc_mass()
# Time-related
self.t += self.env.t
if self.a > self.max_acc:
self.max_acc = self.a
self.max_g = self.max_acc/self.env.g
if self.v > self.max_v:
self.max_v = self.v
self.max_mach = self.mach
self.thrust = 0
# Deceleration phase
while self.v > 0:
# Output management
self.add_data()
# Environment-related
self.update_env()
# Accelaration/derivative-related
self.calc_acc()
self.calc_additional_derivatives()
# Position-related
self.set_altitude()
# Velocity-related
self.calc_velocity()
# Force-related
self.calc_drag()
self.calc_twr()
# Mass-related
self.calc_mass()
# Time-related
self.t += self.env.t
if self.a < self.min_acc:
self.min_acc = self.a
self.min_g = self.min_acc/self.env.g
self.output("max_v", "max_mach", "max_acc",
"min_acc", "max_g", "min_g")
def suicide_burn(self):
"""Run a suicide burn simulation, will affct ascent simulation."""
self.Vt = math.sqrt((2 * self.m * self.env.g) / (self.env.Rho * self.cross_section * self.Cd)) # noqa
# Mass
def calc_mass(self):
self.propellant_mass = (self.Ox + self.F)
self.m = self.propellant_mass + self.dry_mass
def calc_propellant(self):
if self.etype == "Liquid":
self.w = (self.thrust/self.env.g_zero)/self.isp
elif self.etype == "Solid":
self.w = (self.avg_thrust/self.env.g_zero)/self.isp
self.dF = self.w * (1/(self.OFratio+1))
self.dOx = (self.w - self.dF)
self.Ox -= self.dOx * self.env.t
self.F -= self.dF * self.env.t
# Position
def set_altitude(self):
self.altitude += self.v * self.env.t + (self.a * self.env.t**2)/2 # noqa
self.asl = self.altitude + self.env.elev
# Derivatives of position
def calc_velocity(self):
self.v += self.a * self.env.t
self.mach = self.v/self.env.c
def calc_acc(self):
self.a = (self.thrust - (self.m * self.env.g + self.drag)) / self.m
def calc_additional_derivatives(self):
self.j = (self.a - self.data[4][-1]) / self.env.t
self.s = (self.j - self.data[5][-1]) / self.env.t
# Forces
def calc_thrust(self):
if self.etype == "Liquid":
pass
elif self.etype == "Solid":
self.thrust = self.thrust_curve[round(self.t, 3)]
def calc_drag(self):
self.drag = 0.5 * (self.env.Rho * self.v**2 * self.Cd * self.cross_section) # noqa
def calc_twr(self):
self.twr = self.thrust / (self.m * self.env.g)
# Environment
def update_env(self):
self.env.get_status(self.asl)
# Ouput
def output(self, *args):
values = []
for field in self.field_names:
value = str(round(eval(field, self.__dict__), 5))
values.append(value)
with open(self.csvout, "a", newline="") as f:
csv_writer = csv.writer(f)
csv_writer.writerow(values)
f.close()
def add_data(self):
self.data[0].append(self.t)
self.data[1].append(self.altitude)
self.data[2].append(self.v)
self.data[3].append(self.env.c)
self.data[4].append(self.a)
self.data[5].append(self.j)
self.data[6].append(self.s)
self.data[7].append(self.drag)
self.output("t", "thrust", "drag", "m", "v",
"mach", "a", "altitude", "asl", "twr")
def run_simulation(burn_time):
params = load_parameters("RocketSimulationData/info.json")
env = Environment(params[1])
s = System(params[0], env, burn_time)
s.launch()
def renderAgents(screen, res, ratio):
screen.fill((0, 0, 0))
pygame.draw.rect(screen, (0, 0, 255), (0, 1080-108, 1920, 108))
pos = screenSize[1]-158 - res["altitude"]*ratio
# print("altitude: "+str(res["altitude"])+", pos: "+str(pos))
pygame.draw.rect(screen, (255, 255, 255), (940, pos, 20, 50))
pygame.display.update()
def simulateRocket(screen):
run_simulation(150)
df = pd.read_csv('RocketSimulationData/Flight.csv')
result = df.to_dict("index")
ratio = screenSize[1]/1000000
interestingPoint = None
for res in result:
# print("time: "+str(result[res]["t"])+" Altitude: "+str(result[res]["altitude"]))
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
renderAgents(screen, result[res], ratio)
if result[res]["altitude"] < 800000:
interestingPoint = result[res]
pygame.display.update()
return interestingPoint
| 33.720264
| 118
| 0.528317
|
import sys
import csv
import json
import math
import pygame
import numpy as np
from pygame.locals import *
import pandas as pd
from data import *
from agent import agentsList, Agent
global screenSize
screenSize = [1920, 1080]
def load_parameters(path):
package = []
file = open(path, 'r')
j = json.load(file)
for subgroup in j.values():
package.append([cast(x) for x in subgroup.values()])
env_variables = package.pop(4)
file.close()
return (package, env_variables)
def cast(x):
try:
return float(x)
except Exception:
return str(x)
class Environment:
def __init__(self, vars):
self.elev, self.t, self.g, self.M_air, self.R, self.gamma, self.P_zero = vars
self.g_zero = self.g
self.Re = 6356766
self.hb = [0, 11000, 20000, 32000, 47000, 51000, 71000]
self.Pb = [101325, 22632.1, 5474.89,
868.019, 110.906, 66.9389, 3.95642]
self.Tb = [288.15, 216.65, 216.65, 228.65, 270.65, 270.65, 214.65]
self.Lm = [-0.0065, 0.0, 0.001, 0.0028, 0.0, -0.0028, -0.002]
def get_geopotential_altitude(self, z: float) -> float:
return self.Re*z / (self.Re+z)
def atmo_heterosphere_equ(self, z: float, a, b, c, d, e):
z_km = z/1000
return math.exp(a * z_km**4 + b * z_km**3 + c * z_km**2 + d * z_km + e)
def get_gravity(self, z: float) -> float:
return self.g_zero * (self.Re / (self.Re + z))**2
def get_temp(self, z: float, h: float) -> float:
if h <= 84852:
for i in range(len(self.hb)-1):
if self.hb[i] <= h <= self.hb[i+1]:
return (self.Tb[i] + self.Lm[i]*(h-self.hb[i]), i)
return (self.Tb[i+1] + self.Lm[i+1]*(h-self.hb[i+1]), i+1)
elif 86000 < z <= 91000:
return (186.87, 7)
elif 91000 < z <= 110000:
if 91000 < z <= 100000:
layer = 8
elif 100000 < z <= 110000:
layer = 9
return (
263.1905 - 76.3232 * math.sqrt(1 - ((z - 91000) / -19942.9)**2),
layer
)
elif 110000 < z <= 120000:
return (240 + 0.012 * (z - 110000), 10)
elif 120000 < z <= 1000000:
if 120000 < z <= 150000:
layer = 11
elif 150000 < z <= 200000:
layer = 12
elif 200000 < z <= 300000:
layer = 13
elif 300000 < z <= 500000:
layer = 14
elif 500000 < z <= 750000:
layer = 15
elif 750000 < z <= 1000000:
layer = 16
xi = (z - 120000) * (6356766 + 120000) / (6356766 + z)
return (1000 - 640 * math.exp(-0.00001875 * xi), layer)
def get_pressure(self, z: float, h: float, T: float, b: int) -> float:
if b <= 6:
if self.Lm[b] != 0:
return self.Pb[b] * (self.Tb[b]/T)**(self.g_zero*self.M_air/(self.R*self.Lm[b]))
else:
return self.Pb[b] * math.exp(-self.g_zero * self.M_air * (h-self.hb[b]) / (self.R*self.Tb[b]))
elif b == 7:
return self.atmo_heterosphere_equ(
z, 0.000000, 2.159582e-6, -4.836957e-4, -0.1425192, 13.47530)
elif b == 8:
return self.atmo_heterosphere_equ(
z, 0.000000, 3.304895e-5, -0.009062730, 0.6516698, -11.03037)
elif b == 9:
return self.atmo_heterosphere_equ(
z, 0.000000, 6.693926e-5, -0.01945388, 1.719080, -47.75030)
elif b == 10:
return self.atmo_heterosphere_equ(
z, 0.000000, -6.539316e-5, 0.02485568, -3.223620, 135.9355)
elif b == 11:
return self.atmo_heterosphere_equ(
z, 2.283506e-7, -1.343221e-4, 0.02999016, -3.055446, 113.5764)
elif b == 12:
return self.atmo_heterosphere_equ(
z, 1.209434e-8, -9.692458e-6, 0.003002041, -0.4523015, 19.19151)
elif b == 13:
return self.atmo_heterosphere_equ(
z, 8.113942e-10, -9.822568e-7, 4.687616e-4, -0.1231710, 3.067409)
elif b == 14:
return self.atmo_heterosphere_equ(
z, 9.814674e-11, -1.654439e-7, 1.148115e-4, -0.05431334, -2.011365)
elif b == 15:
return self.atmo_heterosphere_equ(
z, -7.835161e-11, 1.964589e-7, -1.657213e-4, 0.04305869, -14.77132)
elif b == 16:
return self.atmo_heterosphere_equ(
z, 2.813255e-11, -1.120689e-7, 1.695568e-4, -0.1188941, 14.56718)
def get_density(self, z: float, P: float, T: float, b) -> float:
if b <= 6:
return (P * self.M_air)/(self.R * T)
elif b == 7:
return self.atmo_heterosphere_equ(
z, 0.000000, -3.322622E-06, 9.111460E-04, -0.2609971, 5.944694)
elif b == 8:
return self.atmo_heterosphere_equ(
z, 0.000000, 2.873405e-05, -0.008492037, 0.6541179, -23.62010)
elif b == 9:
return self.atmo_heterosphere_equ(
z, -1.240774e-05, 0.005162063, -0.8048342, 55.55996, -1443.338)
elif b == 10:
return self.atmo_heterosphere_equ(
z, 0.00000, -8.854164e-05, 0.03373254, -4.390837, 176.5294)
elif b == 11:
return self.atmo_heterosphere_equ(
z, 3.661771e-07, -2.154344e-04, 0.04809214, -4.884744, 172.3597)
elif b == 12:
return self.atmo_heterosphere_equ(
z, 1.906032e-08, -1.527799E-05, 0.004724294, -0.6992340, 20.50921)
elif b == 13:
return self.atmo_heterosphere_equ(
z, 1.199282e-09, -1.451051e-06, 6.910474e-04, -0.1736220, -5.321644)
elif b == 14:
return self.atmo_heterosphere_equ(
z, 1.140564e-10, -2.130756e-07, 1.570762e-04, -0.07029296, -12.89844)
elif b == 15:
return self.atmo_heterosphere_equ(
z, 8.105631e-12, -2.358417e-09, -2.635110e-06, -0.01562608, -20.02246)
elif b == 16:
return self.atmo_heterosphere_equ(
z, -3.701195e-12, -8.608611e-09, 5.118829e-05, -0.06600998, -6.137674)
def get_c(self, T: float) -> float:
return math.sqrt((self.gamma * self.R * T) / self.M_air)
def get_status(self, z: float):
h = round(self.get_geopotential_altitude(z), 0)
self.g = self.get_gravity(z)
self.T, b = self.get_temp(z, h)
self.P = self.get_pressure(z, h, self.T, b)
self.Rho = self.get_density(z, self.P, self.T, b)
self.c = self.get_c(self.T)
class System:
def __init__(self, params, env, burn_time: float):
package = params
print(package)
self.env = env
self.num_steps = int(burn_time // self.env.t)
self.burn_time = self.num_steps * self.env.t
self.etype = package[0][0]
package[0].pop(0)
if self.etype == "Liquid":
self.isp, self.thrust = package[0]
elif self.etype == "Solid":
self.isp, self.avg_thrust, path = package[0]
with(open(path)) as f:
csv_reader = csv.reader(f)
self.thrust_curve = {}
for row in csv_reader:
self.thrust_curve.update({
float(row[0]): float(row[1])
})
f.close()
if self.etype == "Liquid":
self.OFratio, self.Reserve = package[1]
elif self.etype == "Solid":
self.OFratio = 0
self.Reserve = package[1][0]
if self.etype == "Liquid":
self.w = (self.thrust/self.env.g_zero)/self.isp
elif self.etype == "Solid":
self.w = (self.avg_thrust/self.env.g_zero)/self.isp
self.dF = self.w * (1 / (self.OFratio + 1))
self.dOx = (self.w - self.dF)
self.F = (self.dF * self.burn_time)/(1 - self.Reserve/100)
self.Ox = (self.dOx * self.burn_time)/(1 - self.Reserve/100)
self.dry_mass = package[2][0]
self.Cd, self.cross_section = package[3]
self.csvout = package[4][0]
self.field_names = ["t", "thrust", "drag", "m", "v", "mach", "a", "altitude",
"asl", "twr", "max_v", "max_mach", "max_acc", "min_acc", "max_g", "min_g"]
with open(self.csvout, "w", newline="") as f:
csv_writer = csv.writer(f)
csv_writer.writerow(self.field_names)
f.close()
def launch(self):
self.t = 0
self.altitude = 0
self.asl = self.altitude + self.env.elev
self.calc_mass()
self.env.get_status(self.asl)
self.calc_thrust()
self.calc_twr()
self.drag = 0
self.v = 0
self.max_v = 0
self.mach = 0
self.max_mach = 0
self.max_acc = 0
self.max_g = 0
self.min_acc = 0
self.min_g = 0
self.a = 0
self.j = 0
self.s = 0
self.data = [[], [], [], [], [], [], [], [], [], [], []]
for i in range(self.num_steps):
self.add_data()
self.update_env()
self.calc_thrust()
self.calc_acc()
self.calc_additional_derivatives()
self.set_altitude()
self.calc_velocity()
self.calc_drag()
self.calc_twr()
self.calc_propellant()
self.calc_mass()
self.t += self.env.t
if self.a > self.max_acc:
self.max_acc = self.a
self.max_g = self.max_acc/self.env.g
if self.v > self.max_v:
self.max_v = self.v
self.max_mach = self.mach
self.thrust = 0
while self.v > 0:
self.add_data()
self.update_env()
self.calc_acc()
self.calc_additional_derivatives()
self.set_altitude()
self.calc_velocity()
self.calc_drag()
self.calc_twr()
self.calc_mass()
self.t += self.env.t
if self.a < self.min_acc:
self.min_acc = self.a
self.min_g = self.min_acc/self.env.g
self.output("max_v", "max_mach", "max_acc",
"min_acc", "max_g", "min_g")
def suicide_burn(self):
self.Vt = math.sqrt((2 * self.m * self.env.g) / (self.env.Rho * self.cross_section * self.Cd))
def calc_mass(self):
self.propellant_mass = (self.Ox + self.F)
self.m = self.propellant_mass + self.dry_mass
def calc_propellant(self):
if self.etype == "Liquid":
self.w = (self.thrust/self.env.g_zero)/self.isp
elif self.etype == "Solid":
self.w = (self.avg_thrust/self.env.g_zero)/self.isp
self.dF = self.w * (1/(self.OFratio+1))
self.dOx = (self.w - self.dF)
self.Ox -= self.dOx * self.env.t
self.F -= self.dF * self.env.t
def set_altitude(self):
self.altitude += self.v * self.env.t + (self.a * self.env.t**2)/2
self.asl = self.altitude + self.env.elev
def calc_velocity(self):
self.v += self.a * self.env.t
self.mach = self.v/self.env.c
def calc_acc(self):
self.a = (self.thrust - (self.m * self.env.g + self.drag)) / self.m
def calc_additional_derivatives(self):
self.j = (self.a - self.data[4][-1]) / self.env.t
self.s = (self.j - self.data[5][-1]) / self.env.t
def calc_thrust(self):
if self.etype == "Liquid":
pass
elif self.etype == "Solid":
self.thrust = self.thrust_curve[round(self.t, 3)]
def calc_drag(self):
self.drag = 0.5 * (self.env.Rho * self.v**2 * self.Cd * self.cross_section)
def calc_twr(self):
self.twr = self.thrust / (self.m * self.env.g)
def update_env(self):
self.env.get_status(self.asl)
def output(self, *args):
values = []
for field in self.field_names:
value = str(round(eval(field, self.__dict__), 5))
values.append(value)
with open(self.csvout, "a", newline="") as f:
csv_writer = csv.writer(f)
csv_writer.writerow(values)
f.close()
def add_data(self):
self.data[0].append(self.t)
self.data[1].append(self.altitude)
self.data[2].append(self.v)
self.data[3].append(self.env.c)
self.data[4].append(self.a)
self.data[5].append(self.j)
self.data[6].append(self.s)
self.data[7].append(self.drag)
self.output("t", "thrust", "drag", "m", "v",
"mach", "a", "altitude", "asl", "twr")
def run_simulation(burn_time):
params = load_parameters("RocketSimulationData/info.json")
env = Environment(params[1])
s = System(params[0], env, burn_time)
s.launch()
def renderAgents(screen, res, ratio):
screen.fill((0, 0, 0))
pygame.draw.rect(screen, (0, 0, 255), (0, 1080-108, 1920, 108))
pos = screenSize[1]-158 - res["altitude"]*ratio
pygame.draw.rect(screen, (255, 255, 255), (940, pos, 20, 50))
pygame.display.update()
def simulateRocket(screen):
run_simulation(150)
df = pd.read_csv('RocketSimulationData/Flight.csv')
result = df.to_dict("index")
ratio = screenSize[1]/1000000
interestingPoint = None
for res in result:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
renderAgents(screen, result[res], ratio)
if result[res]["altitude"] < 800000:
interestingPoint = result[res]
pygame.display.update()
return interestingPoint
| true
| true
|
1c42d2a7b9a24fe9fa7a92db6edb25a00cd190ee
| 7,223
|
py
|
Python
|
evaluate.py
|
wilsonloo/my_traffic_tf2_yolo3
|
322104de934794870822e1ea2494ee8228de2540
|
[
"MIT"
] | 1
|
2021-07-02T01:44:40.000Z
|
2021-07-02T01:44:40.000Z
|
evaluate.py
|
wilsonloo/my_traffic_tf2_yolo3
|
322104de934794870822e1ea2494ee8228de2540
|
[
"MIT"
] | null | null | null |
evaluate.py
|
wilsonloo/my_traffic_tf2_yolo3
|
322104de934794870822e1ea2494ee8228de2540
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : evaluate.py
# Author : YunYang1994
# Created date: 2019-02-21 15:30:26
# Description :
#
#================================================================
import cv2
import os
import shutil
import numpy as np
#lws
#import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import core.utils as utils
from core.config import cfg
from core.yolov3 import YOLOV3
class YoloTest(object):
def __init__(self):
self.input_size = cfg.TEST.INPUT_SIZE
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_classes = len(self.classes)
self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
self.score_threshold = cfg.TEST.SCORE_THRESHOLD
self.iou_threshold = cfg.TEST.IOU_THRESHOLD
self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
self.annotation_path = cfg.TEST.ANNOT_PATH
self.weight_file = cfg.TEST.WEIGHT_FILE
self.write_image = cfg.TEST.WRITE_IMAGE
self.write_image_path = cfg.TEST.WRITE_IMAGE_PATH
self.show_label = cfg.TEST.SHOW_LABEL
with tf.name_scope('input'):
self.input_data = tf.placeholder(dtype=tf.float32, name='input_data')
self.trainable = tf.placeholder(dtype=tf.bool, name='trainable')
model = YOLOV3(self.input_data, self.trainable)
self.pred_sbbox, self.pred_mbbox, self.pred_lbbox = model.pred_sbbox, model.pred_mbbox, model.pred_lbbox
with tf.name_scope('ema'):
ema_obj = tf.train.ExponentialMovingAverage(self.moving_ave_decay)
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
self.saver = tf.train.Saver(ema_obj.variables_to_restore())
self.saver.restore(self.sess, self.weight_file)
def predict(self, image):
org_image = np.copy(image)
org_h, org_w, _ = org_image.shape
image_data = utils.image_preporcess(image, [self.input_size, self.input_size])
image_data = image_data[np.newaxis, ...]
pred_sbbox, pred_mbbox, pred_lbbox = self.sess.run(
[self.pred_sbbox, self.pred_mbbox, self.pred_lbbox],
feed_dict={
self.input_data: image_data,
self.trainable: False
}
)
pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + self.num_classes)),
np.reshape(pred_mbbox, (-1, 5 + self.num_classes)),
np.reshape(pred_lbbox, (-1, 5 + self.num_classes))], axis=0)
bboxes = utils.postprocess_boxes(pred_bbox, (org_h, org_w), self.input_size, self.score_threshold)
bboxes = utils.nms(bboxes, self.iou_threshold)
return bboxes
def evaluate(self):
predicted_dir_path = './mAP/predicted'
ground_truth_dir_path = './mAP/ground-truth'
if os.path.exists(predicted_dir_path): shutil.rmtree(predicted_dir_path)
if os.path.exists(ground_truth_dir_path): shutil.rmtree(ground_truth_dir_path)
if os.path.exists(self.write_image_path): shutil.rmtree(self.write_image_path)
os.mkdir(predicted_dir_path)
os.mkdir(ground_truth_dir_path)
os.mkdir(self.write_image_path)
with open(self.annotation_path, 'r') as annotation_file:
for num, line in enumerate(annotation_file):
annotation = line.strip().split()
image_path = annotation[0]
image_name = image_path.split('/')[-1]
image = cv2.imread(image_path)
bbox_data_gt = np.array([list(map(int, box.split(','))) for box in annotation[1:]])
if len(bbox_data_gt) == 0:
bboxes_gt=[]
classes_gt=[]
else:
bboxes_gt, classes_gt = bbox_data_gt[:, :4], bbox_data_gt[:, 4]
ground_truth_path = os.path.join(ground_truth_dir_path, str(num) + '.txt')
print('=> ground truth of %s:' % image_name)
num_bbox_gt = len(bboxes_gt)
with open(ground_truth_path, 'w') as f:
for i in range(num_bbox_gt):
class_name = self.classes[classes_gt[i]]
xmin, ymin, xmax, ymax = list(map(str, bboxes_gt[i]))
bbox_mess = ' '.join([class_name, xmin, ymin, xmax, ymax]) + '\n'
f.write(bbox_mess)
print('\t' + str(bbox_mess).strip())
print('=> predict result of %s:' % image_name)
predict_result_path = os.path.join(predicted_dir_path, str(num) + '.txt')
bboxes_pr = self.predict(image)
if self.write_image:
image = utils.draw_bbox(image, bboxes_pr, show_label=self.show_label)
cv2.imwrite(self.write_image_path+image_name, image)
with open(predict_result_path, 'w') as f:
for bbox in bboxes_pr:
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
class_name = self.classes[class_ind]
score = '%.4f' % score
xmin, ymin, xmax, ymax = list(map(str, coor))
bbox_mess = ' '.join([class_name, score, xmin, ymin, xmax, ymax]) + '\n'
f.write(bbox_mess)
print('\t' + str(bbox_mess).strip())
def voc_2012_test(self, voc2012_test_path):
img_inds_file = os.path.join(voc2012_test_path, 'ImageSets', 'Main', 'test.txt')
with open(img_inds_file, 'r') as f:
txt = f.readlines()
image_inds = [line.strip() for line in txt]
results_path = 'results/VOC2012/Main'
if os.path.exists(results_path):
shutil.rmtree(results_path)
os.makedirs(results_path)
for image_ind in image_inds:
image_path = os.path.join(voc2012_test_path, 'JPEGImages', image_ind + '.jpg')
image = cv2.imread(image_path)
print('predict result of %s:' % image_ind)
bboxes_pr = self.predict(image)
for bbox in bboxes_pr:
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
class_name = self.classes[class_ind]
score = '%.4f' % score
xmin, ymin, xmax, ymax = list(map(str, coor))
bbox_mess = ' '.join([image_ind, score, xmin, ymin, xmax, ymax]) + '\n'
with open(os.path.join(results_path, 'comp4_det_test_' + class_name + '.txt'), 'a') as f:
f.write(bbox_mess)
print('\t' + str(bbox_mess).strip())
if __name__ == '__main__': YoloTest().evaluate()
| 42.239766
| 112
| 0.5686
|
import cv2
import os
import shutil
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import core.utils as utils
from core.config import cfg
from core.yolov3 import YOLOV3
class YoloTest(object):
def __init__(self):
self.input_size = cfg.TEST.INPUT_SIZE
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_classes = len(self.classes)
self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
self.score_threshold = cfg.TEST.SCORE_THRESHOLD
self.iou_threshold = cfg.TEST.IOU_THRESHOLD
self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
self.annotation_path = cfg.TEST.ANNOT_PATH
self.weight_file = cfg.TEST.WEIGHT_FILE
self.write_image = cfg.TEST.WRITE_IMAGE
self.write_image_path = cfg.TEST.WRITE_IMAGE_PATH
self.show_label = cfg.TEST.SHOW_LABEL
with tf.name_scope('input'):
self.input_data = tf.placeholder(dtype=tf.float32, name='input_data')
self.trainable = tf.placeholder(dtype=tf.bool, name='trainable')
model = YOLOV3(self.input_data, self.trainable)
self.pred_sbbox, self.pred_mbbox, self.pred_lbbox = model.pred_sbbox, model.pred_mbbox, model.pred_lbbox
with tf.name_scope('ema'):
ema_obj = tf.train.ExponentialMovingAverage(self.moving_ave_decay)
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
self.saver = tf.train.Saver(ema_obj.variables_to_restore())
self.saver.restore(self.sess, self.weight_file)
def predict(self, image):
org_image = np.copy(image)
org_h, org_w, _ = org_image.shape
image_data = utils.image_preporcess(image, [self.input_size, self.input_size])
image_data = image_data[np.newaxis, ...]
pred_sbbox, pred_mbbox, pred_lbbox = self.sess.run(
[self.pred_sbbox, self.pred_mbbox, self.pred_lbbox],
feed_dict={
self.input_data: image_data,
self.trainable: False
}
)
pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + self.num_classes)),
np.reshape(pred_mbbox, (-1, 5 + self.num_classes)),
np.reshape(pred_lbbox, (-1, 5 + self.num_classes))], axis=0)
bboxes = utils.postprocess_boxes(pred_bbox, (org_h, org_w), self.input_size, self.score_threshold)
bboxes = utils.nms(bboxes, self.iou_threshold)
return bboxes
def evaluate(self):
predicted_dir_path = './mAP/predicted'
ground_truth_dir_path = './mAP/ground-truth'
if os.path.exists(predicted_dir_path): shutil.rmtree(predicted_dir_path)
if os.path.exists(ground_truth_dir_path): shutil.rmtree(ground_truth_dir_path)
if os.path.exists(self.write_image_path): shutil.rmtree(self.write_image_path)
os.mkdir(predicted_dir_path)
os.mkdir(ground_truth_dir_path)
os.mkdir(self.write_image_path)
with open(self.annotation_path, 'r') as annotation_file:
for num, line in enumerate(annotation_file):
annotation = line.strip().split()
image_path = annotation[0]
image_name = image_path.split('/')[-1]
image = cv2.imread(image_path)
bbox_data_gt = np.array([list(map(int, box.split(','))) for box in annotation[1:]])
if len(bbox_data_gt) == 0:
bboxes_gt=[]
classes_gt=[]
else:
bboxes_gt, classes_gt = bbox_data_gt[:, :4], bbox_data_gt[:, 4]
ground_truth_path = os.path.join(ground_truth_dir_path, str(num) + '.txt')
print('=> ground truth of %s:' % image_name)
num_bbox_gt = len(bboxes_gt)
with open(ground_truth_path, 'w') as f:
for i in range(num_bbox_gt):
class_name = self.classes[classes_gt[i]]
xmin, ymin, xmax, ymax = list(map(str, bboxes_gt[i]))
bbox_mess = ' '.join([class_name, xmin, ymin, xmax, ymax]) + '\n'
f.write(bbox_mess)
print('\t' + str(bbox_mess).strip())
print('=> predict result of %s:' % image_name)
predict_result_path = os.path.join(predicted_dir_path, str(num) + '.txt')
bboxes_pr = self.predict(image)
if self.write_image:
image = utils.draw_bbox(image, bboxes_pr, show_label=self.show_label)
cv2.imwrite(self.write_image_path+image_name, image)
with open(predict_result_path, 'w') as f:
for bbox in bboxes_pr:
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
class_name = self.classes[class_ind]
score = '%.4f' % score
xmin, ymin, xmax, ymax = list(map(str, coor))
bbox_mess = ' '.join([class_name, score, xmin, ymin, xmax, ymax]) + '\n'
f.write(bbox_mess)
print('\t' + str(bbox_mess).strip())
def voc_2012_test(self, voc2012_test_path):
img_inds_file = os.path.join(voc2012_test_path, 'ImageSets', 'Main', 'test.txt')
with open(img_inds_file, 'r') as f:
txt = f.readlines()
image_inds = [line.strip() for line in txt]
results_path = 'results/VOC2012/Main'
if os.path.exists(results_path):
shutil.rmtree(results_path)
os.makedirs(results_path)
for image_ind in image_inds:
image_path = os.path.join(voc2012_test_path, 'JPEGImages', image_ind + '.jpg')
image = cv2.imread(image_path)
print('predict result of %s:' % image_ind)
bboxes_pr = self.predict(image)
for bbox in bboxes_pr:
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
class_name = self.classes[class_ind]
score = '%.4f' % score
xmin, ymin, xmax, ymax = list(map(str, coor))
bbox_mess = ' '.join([image_ind, score, xmin, ymin, xmax, ymax]) + '\n'
with open(os.path.join(results_path, 'comp4_det_test_' + class_name + '.txt'), 'a') as f:
f.write(bbox_mess)
print('\t' + str(bbox_mess).strip())
if __name__ == '__main__': YoloTest().evaluate()
| true
| true
|
1c42d415f4eb1f9c6c235e5b2f7f7495bf3abe7f
| 275
|
py
|
Python
|
CS_4320 Software Development 1/assignments/Sprint-4/SSO_Project/01-login/auth0login/urls.py
|
hickmanjv/hickmanjv
|
390e22317b9ace552855897af19963ffb416b1b7
|
[
"MIT"
] | null | null | null |
CS_4320 Software Development 1/assignments/Sprint-4/SSO_Project/01-login/auth0login/urls.py
|
hickmanjv/hickmanjv
|
390e22317b9ace552855897af19963ffb416b1b7
|
[
"MIT"
] | null | null | null |
CS_4320 Software Development 1/assignments/Sprint-4/SSO_Project/01-login/auth0login/urls.py
|
hickmanjv/hickmanjv
|
390e22317b9ace552855897af19963ffb416b1b7
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index),
path('dashboard', views.dashboard),
path('logout', views.logout),
path('', include('django.contrib.auth.urls')),
path('', include('social_django.urls')),
]
| 22.916667
| 50
| 0.650909
|
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index),
path('dashboard', views.dashboard),
path('logout', views.logout),
path('', include('django.contrib.auth.urls')),
path('', include('social_django.urls')),
]
| true
| true
|
1c42d450691102fb5bc1c4a0d53bd558ecee17bb
| 7,293
|
py
|
Python
|
var/spack/repos/builtin/packages/grass/package.py
|
RemoteConnectionManager/spack
|
f2967b6c16effd26ce007cf86cadbb645c574f50
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2
|
2019-05-19T12:24:44.000Z
|
2019-05-24T10:58:09.000Z
|
var/spack/repos/builtin/packages/grass/package.py
|
openbiox/spack
|
bb6ec7fb40c14b37e094a860e3625af53f633174
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 17
|
2018-09-20T18:32:50.000Z
|
2019-12-04T16:58:12.000Z
|
var/spack/repos/builtin/packages/grass/package.py
|
openbiox/spack
|
bb6ec7fb40c14b37e094a860e3625af53f633174
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2019-09-21T07:45:10.000Z
|
2019-09-21T07:45:10.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Grass(AutotoolsPackage):
"""GRASS GIS (Geographic Resources Analysis Support System), is a free
and open source Geographic Information System (GIS) software suite
used for geospatial data management and analysis, image processing,
graphics and maps production, spatial modeling, and visualization."""
homepage = "http://grass.osgeo.org"
version('7.6.1', '9ca74f9010d013f735737a90c65d8a7f')
version('7.4.4', '98ae22f8a97a83a4d99a537236639e9c')
version('7.4.3', '4f4462af7a95fe84ee21f3dd585dcb05')
version('7.4.2', 'bb3fc005e707f762c8fee36095e1df7f')
version('7.4.1', 'bf7add62cbeb05a3ed5ad832344ba524')
version('7.4.0', '15b9eb019d6c132c1a65455b3283cf69')
variant('cxx', default=True, description='Add c++ functionality')
variant('tiff', default=True, description='Add TIFF functionality')
variant('png', default=True, description='Add PNG functionality')
variant('postgres', default=False, description='Add PostgreSQL functionality')
variant('mysql', default=False, description='Add MySQL functionality')
variant('sqlite', default=True, description='Add SQLite functionality')
variant('opengl', default=True, description='Add OpenGL functionality')
variant('fftw', default=True, description='Add FFTW functionality')
variant('blas', default=False, description='Add BLAS functionality')
variant('lapack', default=False, description='Add LAPACK functionality')
variant('cairo', default=True, description='Add Cairo functionality')
variant('freetype', default=True, description='Add FreeType functionality')
variant('readline', default=False, description='Add Readline functionality')
variant('regex', default=True, description='Add regex functionality')
variant('pthread', default=False, description='Add POSIX threads functionality')
variant('openmp', default=False, description='Add OpenMP functionality')
variant('opencl', default=False, description='Add OpenCL functionality')
variant('bzlib', default=False, description='Add BZIP2 functionality')
variant('netcdf', default=False, description='Enable NetCDF support')
variant('geos', default=False, description='Geometry Engine for v.buffer')
# required components
depends_on('gmake@3.8.1:', type='build')
depends_on('zlib')
depends_on('flex', type='build')
depends_on('bison', type='build')
depends_on('proj')
depends_on('proj@:4', when='@:7.5')
depends_on('proj@:5', when='@:7.7')
depends_on('gdal')
depends_on('python@2.7:2.9', type=('build', 'run'))
depends_on('libx11')
# optional pieces
depends_on('libtiff', when='+tiff')
depends_on('libpng', when='+png')
depends_on('postgresql', when='+postgres')
depends_on('mariadb', when='+mysql')
depends_on('sqlite', when='+sqlite')
depends_on('gl', when='+opengl')
depends_on('fftw', when='+fftw')
depends_on('blas', when='+blas')
depends_on('lapack', when='+lapack')
depends_on('cairo', when='+cairo')
depends_on('freetype', when='+freetype')
depends_on('readline', when='+readline')
depends_on('opencl', when='+opencl')
depends_on('bzip2', when='+bzlib')
depends_on('netcdf', when='+netcdf')
depends_on('geos', when='+geos')
def url_for_version(self, version):
base = 'https://grass.osgeo.org'
return '{0}/grass{1}/source/grass-{2}.tar.gz'.format(
base, version.up_to(2).joined, version.dotted
)
def configure_args(self):
spec = self.spec
args = [
'--without-odbc',
'--without-nls',
'--without-opendwg',
'--with-x',
'--with-gdal={0}/bin/gdal-config'.format(
spec['gdal'].prefix),
'--with-proj-share={0}/share/proj'.format(
spec['proj'].prefix),
]
if '+cxx' in spec:
args.append('--with-cxx')
else:
args.append('--without-cxx')
if '+tiff' in spec:
args.append('--with-tiff')
else:
args.append('--without-tiff')
if '+png' in spec:
args.append('--with-png')
else:
args.append('--without-png')
if '+postgres' in spec:
args.append('--with-postgres')
else:
args.append('--without-postgres')
if '+mysql' in spec:
args.append('--with-mysql')
else:
args.append('--without-mysql')
if '+sqlite' in spec:
args.append('--with-sqlite')
else:
args.append('--without-sqlite')
if '+opengl' in spec:
args.append('--with-opengl')
else:
args.append('--without-opengl')
if '+fftw' in spec:
args.append('--with-fftw')
else:
args.append('--without-fftw')
if '+blas' in spec:
args.append('--with-blas')
else:
args.append('--without-blas')
if '+lapack' in spec:
args.append('--with-lapack')
else:
args.append('--without-lapack')
if '+cairo' in spec:
args.append('--with-cairo')
else:
args.append('--without-cairo')
if '+freetype' in spec:
args.append('--with-freetype')
else:
args.append('--without-freetype')
if '+readline' in spec:
args.append('--with-readline')
else:
args.append('--without-readline')
if '+regex' in spec:
args.append('--with-regex')
else:
args.append('--without-regex')
if '+pthread' in spec:
args.append('--with-pthread')
else:
args.append('--without-pthread')
if '+openmp' in spec:
args.append('--with-openmp')
else:
args.append('--without-openmp')
if '+opencl' in spec:
args.append('--with-opencl')
else:
args.append('--without-opencl')
if '+bzlib' in spec:
args.append('--with-bzlib')
else:
args.append('--without-bzlib')
if '+netcdf' in spec:
args.append('--with-netcdf={0}/bin/nc-config'.format(
spec['netcdf'].prefix))
else:
args.append('--without-netcdf')
if '+geos' in spec:
args.append('--with-geos={0}/bin/geos-config'.format(
spec['geos'].prefix))
else:
args.append('--without-geos')
return args
# see issue: https://github.com/spack/spack/issues/11325
# 'Platform.make' is created after configure step
# hence invoke the following function afterwards
@run_after('configure')
def fix_iconv_linking(self):
makefile = FileFilter('include/Make/Platform.make')
makefile.filter(r'^ICONVLIB\s*=\s*', 'ICONVLIB = -liconv')
return None
| 35.231884
| 86
| 0.586864
|
from spack import *
class Grass(AutotoolsPackage):
homepage = "http://grass.osgeo.org"
version('7.6.1', '9ca74f9010d013f735737a90c65d8a7f')
version('7.4.4', '98ae22f8a97a83a4d99a537236639e9c')
version('7.4.3', '4f4462af7a95fe84ee21f3dd585dcb05')
version('7.4.2', 'bb3fc005e707f762c8fee36095e1df7f')
version('7.4.1', 'bf7add62cbeb05a3ed5ad832344ba524')
version('7.4.0', '15b9eb019d6c132c1a65455b3283cf69')
variant('cxx', default=True, description='Add c++ functionality')
variant('tiff', default=True, description='Add TIFF functionality')
variant('png', default=True, description='Add PNG functionality')
variant('postgres', default=False, description='Add PostgreSQL functionality')
variant('mysql', default=False, description='Add MySQL functionality')
variant('sqlite', default=True, description='Add SQLite functionality')
variant('opengl', default=True, description='Add OpenGL functionality')
variant('fftw', default=True, description='Add FFTW functionality')
variant('blas', default=False, description='Add BLAS functionality')
variant('lapack', default=False, description='Add LAPACK functionality')
variant('cairo', default=True, description='Add Cairo functionality')
variant('freetype', default=True, description='Add FreeType functionality')
variant('readline', default=False, description='Add Readline functionality')
variant('regex', default=True, description='Add regex functionality')
variant('pthread', default=False, description='Add POSIX threads functionality')
variant('openmp', default=False, description='Add OpenMP functionality')
variant('opencl', default=False, description='Add OpenCL functionality')
variant('bzlib', default=False, description='Add BZIP2 functionality')
variant('netcdf', default=False, description='Enable NetCDF support')
variant('geos', default=False, description='Geometry Engine for v.buffer')
depends_on('gmake@3.8.1:', type='build')
depends_on('zlib')
depends_on('flex', type='build')
depends_on('bison', type='build')
depends_on('proj')
depends_on('proj@:4', when='@:7.5')
depends_on('proj@:5', when='@:7.7')
depends_on('gdal')
depends_on('python@2.7:2.9', type=('build', 'run'))
depends_on('libx11')
depends_on('libtiff', when='+tiff')
depends_on('libpng', when='+png')
depends_on('postgresql', when='+postgres')
depends_on('mariadb', when='+mysql')
depends_on('sqlite', when='+sqlite')
depends_on('gl', when='+opengl')
depends_on('fftw', when='+fftw')
depends_on('blas', when='+blas')
depends_on('lapack', when='+lapack')
depends_on('cairo', when='+cairo')
depends_on('freetype', when='+freetype')
depends_on('readline', when='+readline')
depends_on('opencl', when='+opencl')
depends_on('bzip2', when='+bzlib')
depends_on('netcdf', when='+netcdf')
depends_on('geos', when='+geos')
def url_for_version(self, version):
base = 'https://grass.osgeo.org'
return '{0}/grass{1}/source/grass-{2}.tar.gz'.format(
base, version.up_to(2).joined, version.dotted
)
def configure_args(self):
spec = self.spec
args = [
'--without-odbc',
'--without-nls',
'--without-opendwg',
'--with-x',
'--with-gdal={0}/bin/gdal-config'.format(
spec['gdal'].prefix),
'--with-proj-share={0}/share/proj'.format(
spec['proj'].prefix),
]
if '+cxx' in spec:
args.append('--with-cxx')
else:
args.append('--without-cxx')
if '+tiff' in spec:
args.append('--with-tiff')
else:
args.append('--without-tiff')
if '+png' in spec:
args.append('--with-png')
else:
args.append('--without-png')
if '+postgres' in spec:
args.append('--with-postgres')
else:
args.append('--without-postgres')
if '+mysql' in spec:
args.append('--with-mysql')
else:
args.append('--without-mysql')
if '+sqlite' in spec:
args.append('--with-sqlite')
else:
args.append('--without-sqlite')
if '+opengl' in spec:
args.append('--with-opengl')
else:
args.append('--without-opengl')
if '+fftw' in spec:
args.append('--with-fftw')
else:
args.append('--without-fftw')
if '+blas' in spec:
args.append('--with-blas')
else:
args.append('--without-blas')
if '+lapack' in spec:
args.append('--with-lapack')
else:
args.append('--without-lapack')
if '+cairo' in spec:
args.append('--with-cairo')
else:
args.append('--without-cairo')
if '+freetype' in spec:
args.append('--with-freetype')
else:
args.append('--without-freetype')
if '+readline' in spec:
args.append('--with-readline')
else:
args.append('--without-readline')
if '+regex' in spec:
args.append('--with-regex')
else:
args.append('--without-regex')
if '+pthread' in spec:
args.append('--with-pthread')
else:
args.append('--without-pthread')
if '+openmp' in spec:
args.append('--with-openmp')
else:
args.append('--without-openmp')
if '+opencl' in spec:
args.append('--with-opencl')
else:
args.append('--without-opencl')
if '+bzlib' in spec:
args.append('--with-bzlib')
else:
args.append('--without-bzlib')
if '+netcdf' in spec:
args.append('--with-netcdf={0}/bin/nc-config'.format(
spec['netcdf'].prefix))
else:
args.append('--without-netcdf')
if '+geos' in spec:
args.append('--with-geos={0}/bin/geos-config'.format(
spec['geos'].prefix))
else:
args.append('--without-geos')
return args
@run_after('configure')
def fix_iconv_linking(self):
makefile = FileFilter('include/Make/Platform.make')
makefile.filter(r'^ICONVLIB\s*=\s*', 'ICONVLIB = -liconv')
return None
| true
| true
|
1c42d46121402d7f9d32a038eb5d12b43d8dc541
| 102
|
py
|
Python
|
message_sender/apps.py
|
anisimovih/Message_sending_emulator
|
7aaf52849625cf658f06503792c7dd8e3ba157fe
|
[
"MIT"
] | null | null | null |
message_sender/apps.py
|
anisimovih/Message_sending_emulator
|
7aaf52849625cf658f06503792c7dd8e3ba157fe
|
[
"MIT"
] | 6
|
2021-03-18T22:32:27.000Z
|
2021-09-22T18:21:39.000Z
|
message_sender/apps.py
|
anisimovih/Message_sending_emulator
|
7aaf52849625cf658f06503792c7dd8e3ba157fe
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class MessageSenderConfig(AppConfig):
name = 'message_sender'
| 17
| 37
| 0.784314
|
from django.apps import AppConfig
class MessageSenderConfig(AppConfig):
name = 'message_sender'
| true
| true
|
1c42d484665d3871711fa452207d4b87be303a80
| 28,770
|
py
|
Python
|
lib/sqlalchemy/dialects/sybase/base.py
|
paylogic/sqlalchemy
|
876a487bf06a038efde7d46ce09e253b9247aae5
|
[
"MIT"
] | 5
|
2015-01-18T01:47:56.000Z
|
2016-01-30T14:58:58.000Z
|
lib/sqlalchemy/dialects/sybase/base.py
|
mitsuhiko/sqlalchemy
|
5a6895471fb6bf9afe9bdf017f1fa2c6246ae303
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/dialects/sybase/base.py
|
mitsuhiko/sqlalchemy
|
5a6895471fb6bf9afe9bdf017f1fa2c6246ae303
|
[
"MIT"
] | null | null | null |
# sybase/base.py
# Copyright (C) 2010-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
# get_select_precolumns(), limit_clause() implementation
# copyright (C) 2007 Fisch Asset Management
# AG http://www.fam.ch, with coding by Alexander Houben
# alexander.houben@thor-solutions.ch
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase
:name: Sybase
.. note::
The Sybase dialect functions on current SQLAlchemy versions
but is not regularly tested, and may have many issues and
caveats not currently handled.
"""
import operator
import re
from sqlalchemy.sql import compiler, expression, text, bindparam
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import types as sqltypes
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import schema as sa_schema
from sqlalchemy import util, sql, exc
from sqlalchemy.types import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\
TEXT, DATE, DATETIME, FLOAT, NUMERIC,\
BIGINT, INT, INTEGER, SMALLINT, BINARY,\
VARBINARY, DECIMAL, TIMESTAMP, Unicode,\
UnicodeText, REAL
RESERVED_WORDS = set([
"add", "all", "alter", "and",
"any", "as", "asc", "backup",
"begin", "between", "bigint", "binary",
"bit", "bottom", "break", "by",
"call", "capability", "cascade", "case",
"cast", "char", "char_convert", "character",
"check", "checkpoint", "close", "comment",
"commit", "connect", "constraint", "contains",
"continue", "convert", "create", "cross",
"cube", "current", "current_timestamp", "current_user",
"cursor", "date", "dbspace", "deallocate",
"dec", "decimal", "declare", "default",
"delete", "deleting", "desc", "distinct",
"do", "double", "drop", "dynamic",
"else", "elseif", "encrypted", "end",
"endif", "escape", "except", "exception",
"exec", "execute", "existing", "exists",
"externlogin", "fetch", "first", "float",
"for", "force", "foreign", "forward",
"from", "full", "goto", "grant",
"group", "having", "holdlock", "identified",
"if", "in", "index", "index_lparen",
"inner", "inout", "insensitive", "insert",
"inserting", "install", "instead", "int",
"integer", "integrated", "intersect", "into",
"iq", "is", "isolation", "join",
"key", "lateral", "left", "like",
"lock", "login", "long", "match",
"membership", "message", "mode", "modify",
"natural", "new", "no", "noholdlock",
"not", "notify", "null", "numeric",
"of", "off", "on", "open",
"option", "options", "or", "order",
"others", "out", "outer", "over",
"passthrough", "precision", "prepare", "primary",
"print", "privileges", "proc", "procedure",
"publication", "raiserror", "readtext", "real",
"reference", "references", "release", "remote",
"remove", "rename", "reorganize", "resource",
"restore", "restrict", "return", "revoke",
"right", "rollback", "rollup", "save",
"savepoint", "scroll", "select", "sensitive",
"session", "set", "setuser", "share",
"smallint", "some", "sqlcode", "sqlstate",
"start", "stop", "subtrans", "subtransaction",
"synchronize", "syntax_error", "table", "temporary",
"then", "time", "timestamp", "tinyint",
"to", "top", "tran", "trigger",
"truncate", "tsequal", "unbounded", "union",
"unique", "unknown", "unsigned", "update",
"updating", "user", "using", "validate",
"values", "varbinary", "varchar", "variable",
"varying", "view", "wait", "waitfor",
"when", "where", "while", "window",
"with", "with_cube", "with_lparen", "with_rollup",
"within", "work", "writetext",
])
class _SybaseUnitypeMixin(object):
"""these types appear to return a buffer object."""
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return str(value) # decode("ucs-2")
else:
return None
return process
class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNICHAR'
class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNIVARCHAR'
class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText):
__visit_name__ = 'UNITEXT'
class TINYINT(sqltypes.Integer):
__visit_name__ = 'TINYINT'
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
class MONEY(sqltypes.TypeEngine):
__visit_name__ = "MONEY"
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = "SMALLMONEY"
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
class SybaseTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_):
return self.visit_IMAGE(type_)
def visit_boolean(self, type_):
return self.visit_BIT(type_)
def visit_unicode(self, type_):
return self.visit_NVARCHAR(type_)
def visit_UNICHAR(self, type_):
return "UNICHAR(%d)" % type_.length
def visit_UNIVARCHAR(self, type_):
return "UNIVARCHAR(%d)" % type_.length
def visit_UNITEXT(self, type_):
return "UNITEXT"
def visit_TINYINT(self, type_):
return "TINYINT"
def visit_IMAGE(self, type_):
return "IMAGE"
def visit_BIT(self, type_):
return "BIT"
def visit_MONEY(self, type_):
return "MONEY"
def visit_SMALLMONEY(self, type_):
return "SMALLMONEY"
def visit_UNIQUEIDENTIFIER(self, type_):
return "UNIQUEIDENTIFIER"
ischema_names = {
'bigint': BIGINT,
'int': INTEGER,
'integer': INTEGER,
'smallint': SMALLINT,
'tinyint': TINYINT,
'unsigned bigint': BIGINT, # TODO: unsigned flags
'unsigned int': INTEGER, # TODO: unsigned flags
'unsigned smallint': SMALLINT, # TODO: unsigned flags
'numeric': NUMERIC,
'decimal': DECIMAL,
'dec': DECIMAL,
'float': FLOAT,
'double': NUMERIC, # TODO
'double precision': NUMERIC, # TODO
'real': REAL,
'smallmoney': SMALLMONEY,
'money': MONEY,
'smalldatetime': DATETIME,
'datetime': DATETIME,
'date': DATE,
'time': TIME,
'char': CHAR,
'character': CHAR,
'varchar': VARCHAR,
'character varying': VARCHAR,
'char varying': VARCHAR,
'unichar': UNICHAR,
'unicode character': UNIVARCHAR,
'nchar': NCHAR,
'national char': NCHAR,
'national character': NCHAR,
'nvarchar': NVARCHAR,
'nchar varying': NVARCHAR,
'national char varying': NVARCHAR,
'national character varying': NVARCHAR,
'text': TEXT,
'unitext': UNITEXT,
'binary': BINARY,
'varbinary': VARBINARY,
'image': IMAGE,
'bit': BIT,
# not in documentation for ASE 15.7
'long varchar': TEXT, # TODO
'timestamp': TIMESTAMP,
'uniqueidentifier': UNIQUEIDENTIFIER,
}
class SybaseInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_id(self, table_name, schema=None):
"""Return the table id from `table_name` and `schema`."""
return self.dialect.get_table_id(self.bind, table_name, schema,
info_cache=self.info_cache)
class SybaseExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
def set_ddl_autocommit(self, connection, value):
"""Must be implemented by subclasses to accommodate DDL executions.
"connection" is the raw unwrapped DBAPI connection. "value"
is True or False. when True, the connection should be configured
such that a DDL can take place subsequently. when False,
a DDL has taken place and the connection should be resumed
into non-autocommit mode.
"""
raise NotImplementedError()
def pre_exec(self):
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = \
seq_column.key in self.compiled_parameters[0]
else:
self._enable_identity_insert = False
if self._enable_identity_insert:
self.cursor.execute("SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl))
if self.isddl:
# TODO: to enhance this, we can detect "ddl in tran" on the
# database settings. this error message should be improved to
# include a note about that.
if not self.should_autocommit:
raise exc.InvalidRequestError(
"The Sybase dialect only supports "
"DDL in 'autocommit' mode at this time.")
self.root_connection.engine.logger.info(
"AUTOCOMMIT (Assuming no Sybase 'ddl in tran')")
self.set_ddl_autocommit(
self.root_connection.connection.connection,
True)
def post_exec(self):
if self.isddl:
self.set_ddl_autocommit(self.root_connection, False)
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer.
format_table(self.compiled.statement.table)
)
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT @@identity AS lastrowid")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class SybaseSQLCompiler(compiler.SQLCompiler):
ansi_bind_rules = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond'
})
def get_select_precolumns(self, select):
s = select._distinct and "DISTINCT " or ""
# TODO: don't think Sybase supports
# bind params for FIRST / TOP
if select._limit:
#if select._limit == 1:
#s += "FIRST "
#else:
#s += "TOP %s " % (select._limit,)
s += "TOP %s " % (select._limit,)
if select._offset:
if not select._limit:
# FIXME: sybase doesn't allow an offset without a limit
# so use a huge value for TOP here
s += "TOP 1000000 "
s += "START AT %s " % (select._offset + 1,)
return s
def get_from_hint_text(self, table, text):
return text
def limit_clause(self, select):
# Limit in sybase is after the select keyword
return ""
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % (
field, self.process(extract.expr, **kw))
def visit_now_func(self, fn, **kw):
return "GETDATE()"
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR"
# which SQLAlchemy doesn't use
return ''
def order_by_clause(self, select, **kw):
kw['literal_binds'] = True
order_by = self.process(select._order_by_clause, **kw)
# SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
class SybaseDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(column.type)
if column.table is None:
raise exc.CompileError(
"The Sybase dialect requires Table-bound "
"columns in order to generate DDL")
seq_col = column.table._autoincrement_column
# install a IDENTITY Sequence if we have an implicit IDENTITY column
if seq_col is column:
sequence = isinstance(column.default, sa_schema.Sequence) \
and column.default
if sequence:
start, increment = sequence.start or 1, \
sequence.increment or 1
else:
start, increment = 1, 1
if (start, increment) == (1, 1):
colspec += " IDENTITY"
else:
# TODO: need correct syntax for this
colspec += " IDENTITY(%s,%s)" % (start, increment)
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if column.nullable is not None:
if not column.nullable or column.primary_key:
colspec += " NOT NULL"
else:
colspec += " NULL"
return colspec
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s.%s" % (
self.preparer.quote_identifier(index.table.name),
self._prepared_index_name(drop.element,
include_schema=False)
)
class SybaseIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class SybaseDialect(default.DefaultDialect):
name = 'sybase'
supports_unicode_statements = False
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_native_boolean = False
supports_unicode_binds = False
postfetch_lastrowid = True
colspecs = {}
ischema_names = ischema_names
type_compiler = SybaseTypeCompiler
statement_compiler = SybaseSQLCompiler
ddl_compiler = SybaseDDLCompiler
preparer = SybaseIdentifierPreparer
inspector = SybaseInspector
def _get_default_schema_name(self, connection):
return connection.scalar(
text("SELECT user_name() as user_name",
typemap={'user_name': Unicode})
)
def initialize(self, connection):
super(SybaseDialect, self).initialize(connection)
if self.server_version_info is not None and\
self.server_version_info < (15, ):
self.max_identifier_length = 30
else:
self.max_identifier_length = 255
def get_table_id(self, connection, table_name, schema=None, **kw):
"""Fetch the id for schema.table_name.
Several reflection methods require the table id. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_id = None
if schema is None:
schema = self.default_schema_name
TABLEID_SQL = text("""
SELECT o.id AS id
FROM sysobjects o JOIN sysusers u ON o.uid=u.uid
WHERE u.name = :schema_name
AND o.name = :table_name
AND o.type in ('U', 'V')
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
if isinstance(table_name, unicode):
table_name = table_name.encode("ascii")
result = connection.execute(TABLEID_SQL,
schema_name=schema,
table_name=table_name)
table_id = result.scalar()
if table_id is None:
raise exc.NoSuchTableError(table_name)
return table_id
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
COLUMN_SQL = text("""
SELECT col.name AS name,
t.name AS type,
(col.status & 8) AS nullable,
(col.status & 128) AS autoincrement,
com.text AS 'default',
col.prec AS precision,
col.scale AS scale,
col.length AS length
FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON
col.cdefault = com.id
WHERE col.usertype = t.usertype
AND col.id = :table_id
ORDER BY col.colid
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = []
for (name, type_, nullable, autoincrement, default, precision, scale,
length) in results:
col_info = self._get_column_info(name, type_, bool(nullable),
bool(autoincrement), default, precision, scale,
length)
columns.append(col_info)
return columns
def _get_column_info(self, name, type_, nullable, autoincrement, default,
precision, scale, length):
coltype = self.ischema_names.get(type_, None)
kwargs = {}
if coltype in (NUMERIC, DECIMAL):
args = (precision, scale)
elif coltype == FLOAT:
args = (precision,)
elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR):
args = (length,)
else:
args = ()
if coltype:
coltype = coltype(*args, **kwargs)
#is this necessary
#if is_array:
# coltype = ARRAY(coltype)
else:
util.warn("Did not recognize type '%s' of column '%s'" %
(type_, name))
coltype = sqltypes.NULLTYPE
if default:
default = re.sub("DEFAULT", "", default).strip()
default = re.sub("^'(.*)'$", lambda m: m.group(1), default)
else:
default = None
column_info = dict(name=name, type=coltype, nullable=nullable,
default=default, autoincrement=autoincrement)
return column_info
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
table_cache = {}
column_cache = {}
foreign_keys = []
table_cache[table_id] = {"name": table_name, "schema": schema}
COLUMN_SQL = text("""
SELECT c.colid AS id, c.name AS name
FROM syscolumns c
WHERE c.id = :table_id
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = {}
for col in results:
columns[col["id"]] = col["name"]
column_cache[table_id] = columns
REFCONSTRAINT_SQL = text("""
SELECT o.name AS name, r.reftabid AS reftable_id,
r.keycnt AS 'count',
r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3,
r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6,
r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9,
r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12,
r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15,
r.fokey16 AS fokey16,
r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3,
r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6,
r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9,
r.refkey10 AS refkey10, r.refkey11 AS refkey11,
r.refkey12 AS refkey12, r.refkey13 AS refkey13,
r.refkey14 AS refkey14, r.refkey15 AS refkey15,
r.refkey16 AS refkey16
FROM sysreferences r JOIN sysobjects o on r.tableid = o.id
WHERE r.tableid = :table_id
""")
referential_constraints = connection.execute(REFCONSTRAINT_SQL,
table_id=table_id)
REFTABLE_SQL = text("""
SELECT o.name AS name, u.name AS 'schema'
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE o.id = :table_id
""")
for r in referential_constraints:
reftable_id = r["reftable_id"]
if reftable_id not in table_cache:
c = connection.execute(REFTABLE_SQL, table_id=reftable_id)
reftable = c.fetchone()
c.close()
table_info = {"name": reftable["name"], "schema": None}
if (schema is not None or
reftable["schema"] != self.default_schema_name):
table_info["schema"] = reftable["schema"]
table_cache[reftable_id] = table_info
results = connection.execute(COLUMN_SQL, table_id=reftable_id)
reftable_columns = {}
for col in results:
reftable_columns[col["id"]] = col["name"]
column_cache[reftable_id] = reftable_columns
reftable = table_cache[reftable_id]
reftable_columns = column_cache[reftable_id]
constrained_columns = []
referred_columns = []
for i in range(1, r["count"] + 1):
constrained_columns.append(columns[r["fokey%i" % i]])
referred_columns.append(reftable_columns[r["refkey%i" % i]])
fk_info = {
"constrained_columns": constrained_columns,
"referred_schema": reftable["schema"],
"referred_table": reftable["name"],
"referred_columns": referred_columns,
"name": r["name"]
}
foreign_keys.append(fk_info)
return foreign_keys
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
INDEX_SQL = text("""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
(i.status & 0x2) AS 'unique',
index_col(object_name(i.id), i.indid, 1) AS col_1,
index_col(object_name(i.id), i.indid, 2) AS col_2,
index_col(object_name(i.id), i.indid, 3) AS col_3,
index_col(object_name(i.id), i.indid, 4) AS col_4,
index_col(object_name(i.id), i.indid, 5) AS col_5,
index_col(object_name(i.id), i.indid, 6) AS col_6,
index_col(object_name(i.id), i.indid, 7) AS col_7,
index_col(object_name(i.id), i.indid, 8) AS col_8,
index_col(object_name(i.id), i.indid, 9) AS col_9,
index_col(object_name(i.id), i.indid, 10) AS col_10,
index_col(object_name(i.id), i.indid, 11) AS col_11,
index_col(object_name(i.id), i.indid, 12) AS col_12,
index_col(object_name(i.id), i.indid, 13) AS col_13,
index_col(object_name(i.id), i.indid, 14) AS col_14,
index_col(object_name(i.id), i.indid, 15) AS col_15,
index_col(object_name(i.id), i.indid, 16) AS col_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 0
AND i.indid BETWEEN 1 AND 254
""")
results = connection.execute(INDEX_SQL, table_id=table_id)
indexes = []
for r in results:
column_names = []
for i in range(1, r["count"]):
column_names.append(r["col_%i" % (i,)])
index_info = {"name": r["name"],
"unique": bool(r["unique"]),
"column_names": column_names}
indexes.append(index_info)
return indexes
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
PK_SQL = text("""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
index_col(object_name(i.id), i.indid, 1) AS pk_1,
index_col(object_name(i.id), i.indid, 2) AS pk_2,
index_col(object_name(i.id), i.indid, 3) AS pk_3,
index_col(object_name(i.id), i.indid, 4) AS pk_4,
index_col(object_name(i.id), i.indid, 5) AS pk_5,
index_col(object_name(i.id), i.indid, 6) AS pk_6,
index_col(object_name(i.id), i.indid, 7) AS pk_7,
index_col(object_name(i.id), i.indid, 8) AS pk_8,
index_col(object_name(i.id), i.indid, 9) AS pk_9,
index_col(object_name(i.id), i.indid, 10) AS pk_10,
index_col(object_name(i.id), i.indid, 11) AS pk_11,
index_col(object_name(i.id), i.indid, 12) AS pk_12,
index_col(object_name(i.id), i.indid, 13) AS pk_13,
index_col(object_name(i.id), i.indid, 14) AS pk_14,
index_col(object_name(i.id), i.indid, 15) AS pk_15,
index_col(object_name(i.id), i.indid, 16) AS pk_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 2048
AND i.indid BETWEEN 1 AND 254
""")
results = connection.execute(PK_SQL, table_id=table_id)
pks = results.fetchone()
results.close()
constrained_columns = []
for i in range(1, pks["count"] + 1):
constrained_columns.append(pks["pk_%i" % (i,)])
return {"constrained_columns": constrained_columns,
"name": pks["name"]}
@reflection.cache
def get_schema_names(self, connection, **kw):
SCHEMA_SQL = text("SELECT u.name AS name FROM sysusers u")
schemas = connection.execute(SCHEMA_SQL)
return [s["name"] for s in schemas]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
TABLE_SQL = text("""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'U'
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
tables = connection.execute(TABLE_SQL, schema_name=schema)
return [t["name"] for t in tables]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_DEF_SQL = text("""
SELECT c.text
FROM syscomments c JOIN sysobjects o ON c.id = o.id
WHERE o.name = :view_name
AND o.type = 'V'
""")
if util.py2k:
if isinstance(view_name, unicode):
view_name = view_name.encode("ascii")
view = connection.execute(VIEW_DEF_SQL, view_name=view_name)
return view.scalar()
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_SQL = text("""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'V'
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
views = connection.execute(VIEW_SQL, schema_name=schema)
return [v["name"] for v in views]
def has_table(self, connection, table_name, schema=None):
try:
self.get_table_id(connection, table_name, schema)
except exc.NoSuchTableError:
return False
else:
return True
| 35.300613
| 84
| 0.575739
|
import operator
import re
from sqlalchemy.sql import compiler, expression, text, bindparam
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import types as sqltypes
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import schema as sa_schema
from sqlalchemy import util, sql, exc
from sqlalchemy.types import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\
TEXT, DATE, DATETIME, FLOAT, NUMERIC,\
BIGINT, INT, INTEGER, SMALLINT, BINARY,\
VARBINARY, DECIMAL, TIMESTAMP, Unicode,\
UnicodeText, REAL
RESERVED_WORDS = set([
"add", "all", "alter", "and",
"any", "as", "asc", "backup",
"begin", "between", "bigint", "binary",
"bit", "bottom", "break", "by",
"call", "capability", "cascade", "case",
"cast", "char", "char_convert", "character",
"check", "checkpoint", "close", "comment",
"commit", "connect", "constraint", "contains",
"continue", "convert", "create", "cross",
"cube", "current", "current_timestamp", "current_user",
"cursor", "date", "dbspace", "deallocate",
"dec", "decimal", "declare", "default",
"delete", "deleting", "desc", "distinct",
"do", "double", "drop", "dynamic",
"else", "elseif", "encrypted", "end",
"endif", "escape", "except", "exception",
"exec", "execute", "existing", "exists",
"externlogin", "fetch", "first", "float",
"for", "force", "foreign", "forward",
"from", "full", "goto", "grant",
"group", "having", "holdlock", "identified",
"if", "in", "index", "index_lparen",
"inner", "inout", "insensitive", "insert",
"inserting", "install", "instead", "int",
"integer", "integrated", "intersect", "into",
"iq", "is", "isolation", "join",
"key", "lateral", "left", "like",
"lock", "login", "long", "match",
"membership", "message", "mode", "modify",
"natural", "new", "no", "noholdlock",
"not", "notify", "null", "numeric",
"of", "off", "on", "open",
"option", "options", "or", "order",
"others", "out", "outer", "over",
"passthrough", "precision", "prepare", "primary",
"print", "privileges", "proc", "procedure",
"publication", "raiserror", "readtext", "real",
"reference", "references", "release", "remote",
"remove", "rename", "reorganize", "resource",
"restore", "restrict", "return", "revoke",
"right", "rollback", "rollup", "save",
"savepoint", "scroll", "select", "sensitive",
"session", "set", "setuser", "share",
"smallint", "some", "sqlcode", "sqlstate",
"start", "stop", "subtrans", "subtransaction",
"synchronize", "syntax_error", "table", "temporary",
"then", "time", "timestamp", "tinyint",
"to", "top", "tran", "trigger",
"truncate", "tsequal", "unbounded", "union",
"unique", "unknown", "unsigned", "update",
"updating", "user", "using", "validate",
"values", "varbinary", "varchar", "variable",
"varying", "view", "wait", "waitfor",
"when", "where", "while", "window",
"with", "with_cube", "with_lparen", "with_rollup",
"within", "work", "writetext",
])
class _SybaseUnitypeMixin(object):
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return str(value)
else:
return None
return process
class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNICHAR'
class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNIVARCHAR'
class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText):
__visit_name__ = 'UNITEXT'
class TINYINT(sqltypes.Integer):
__visit_name__ = 'TINYINT'
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
class MONEY(sqltypes.TypeEngine):
__visit_name__ = "MONEY"
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = "SMALLMONEY"
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
class SybaseTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_):
return self.visit_IMAGE(type_)
def visit_boolean(self, type_):
return self.visit_BIT(type_)
def visit_unicode(self, type_):
return self.visit_NVARCHAR(type_)
def visit_UNICHAR(self, type_):
return "UNICHAR(%d)" % type_.length
def visit_UNIVARCHAR(self, type_):
return "UNIVARCHAR(%d)" % type_.length
def visit_UNITEXT(self, type_):
return "UNITEXT"
def visit_TINYINT(self, type_):
return "TINYINT"
def visit_IMAGE(self, type_):
return "IMAGE"
def visit_BIT(self, type_):
return "BIT"
def visit_MONEY(self, type_):
return "MONEY"
def visit_SMALLMONEY(self, type_):
return "SMALLMONEY"
def visit_UNIQUEIDENTIFIER(self, type_):
return "UNIQUEIDENTIFIER"
ischema_names = {
'bigint': BIGINT,
'int': INTEGER,
'integer': INTEGER,
'smallint': SMALLINT,
'tinyint': TINYINT,
'unsigned bigint': BIGINT,
'unsigned int': INTEGER,
'unsigned smallint': SMALLINT,
'numeric': NUMERIC,
'decimal': DECIMAL,
'dec': DECIMAL,
'float': FLOAT,
'double': NUMERIC,
'double precision': NUMERIC,
'real': REAL,
'smallmoney': SMALLMONEY,
'money': MONEY,
'smalldatetime': DATETIME,
'datetime': DATETIME,
'date': DATE,
'time': TIME,
'char': CHAR,
'character': CHAR,
'varchar': VARCHAR,
'character varying': VARCHAR,
'char varying': VARCHAR,
'unichar': UNICHAR,
'unicode character': UNIVARCHAR,
'nchar': NCHAR,
'national char': NCHAR,
'national character': NCHAR,
'nvarchar': NVARCHAR,
'nchar varying': NVARCHAR,
'national char varying': NVARCHAR,
'national character varying': NVARCHAR,
'text': TEXT,
'unitext': UNITEXT,
'binary': BINARY,
'varbinary': VARBINARY,
'image': IMAGE,
'bit': BIT,
'long varchar': TEXT,
'timestamp': TIMESTAMP,
'uniqueidentifier': UNIQUEIDENTIFIER,
}
class SybaseInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_id(self, table_name, schema=None):
return self.dialect.get_table_id(self.bind, table_name, schema,
info_cache=self.info_cache)
class SybaseExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
def set_ddl_autocommit(self, connection, value):
raise NotImplementedError()
def pre_exec(self):
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = \
seq_column.key in self.compiled_parameters[0]
else:
self._enable_identity_insert = False
if self._enable_identity_insert:
self.cursor.execute("SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl))
if self.isddl:
if not self.should_autocommit:
raise exc.InvalidRequestError(
"The Sybase dialect only supports "
"DDL in 'autocommit' mode at this time.")
self.root_connection.engine.logger.info(
"AUTOCOMMIT (Assuming no Sybase 'ddl in tran')")
self.set_ddl_autocommit(
self.root_connection.connection.connection,
True)
def post_exec(self):
if self.isddl:
self.set_ddl_autocommit(self.root_connection, False)
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer.
format_table(self.compiled.statement.table)
)
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT @@identity AS lastrowid")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class SybaseSQLCompiler(compiler.SQLCompiler):
ansi_bind_rules = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond'
})
def get_select_precolumns(self, select):
s = select._distinct and "DISTINCT " or ""
# bind params for FIRST / TOP
if select._limit:
#if select._limit == 1:
#s += "FIRST "
#else:
#s += "TOP %s " % (select._limit,)
s += "TOP %s " % (select._limit,)
if select._offset:
if not select._limit:
# FIXME: sybase doesn't allow an offset without a limit
s += "TOP 1000000 "
s += "START AT %s " % (select._offset + 1,)
return s
def get_from_hint_text(self, table, text):
return text
def limit_clause(self, select):
return ""
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % (
field, self.process(extract.expr, **kw))
def visit_now_func(self, fn, **kw):
return "GETDATE()"
def for_update_clause(self, select):
return ''
def order_by_clause(self, select, **kw):
kw['literal_binds'] = True
order_by = self.process(select._order_by_clause, **kw)
# SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
class SybaseDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(column.type)
if column.table is None:
raise exc.CompileError(
"The Sybase dialect requires Table-bound "
"columns in order to generate DDL")
seq_col = column.table._autoincrement_column
# install a IDENTITY Sequence if we have an implicit IDENTITY column
if seq_col is column:
sequence = isinstance(column.default, sa_schema.Sequence) \
and column.default
if sequence:
start, increment = sequence.start or 1, \
sequence.increment or 1
else:
start, increment = 1, 1
if (start, increment) == (1, 1):
colspec += " IDENTITY"
else:
# TODO: need correct syntax for this
colspec += " IDENTITY(%s,%s)" % (start, increment)
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if column.nullable is not None:
if not column.nullable or column.primary_key:
colspec += " NOT NULL"
else:
colspec += " NULL"
return colspec
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s.%s" % (
self.preparer.quote_identifier(index.table.name),
self._prepared_index_name(drop.element,
include_schema=False)
)
class SybaseIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class SybaseDialect(default.DefaultDialect):
name = 'sybase'
supports_unicode_statements = False
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_native_boolean = False
supports_unicode_binds = False
postfetch_lastrowid = True
colspecs = {}
ischema_names = ischema_names
type_compiler = SybaseTypeCompiler
statement_compiler = SybaseSQLCompiler
ddl_compiler = SybaseDDLCompiler
preparer = SybaseIdentifierPreparer
inspector = SybaseInspector
def _get_default_schema_name(self, connection):
return connection.scalar(
text("SELECT user_name() as user_name",
typemap={'user_name': Unicode})
)
def initialize(self, connection):
super(SybaseDialect, self).initialize(connection)
if self.server_version_info is not None and\
self.server_version_info < (15, ):
self.max_identifier_length = 30
else:
self.max_identifier_length = 255
def get_table_id(self, connection, table_name, schema=None, **kw):
table_id = None
if schema is None:
schema = self.default_schema_name
TABLEID_SQL = text("""
SELECT o.id AS id
FROM sysobjects o JOIN sysusers u ON o.uid=u.uid
WHERE u.name = :schema_name
AND o.name = :table_name
AND o.type in ('U', 'V')
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
if isinstance(table_name, unicode):
table_name = table_name.encode("ascii")
result = connection.execute(TABLEID_SQL,
schema_name=schema,
table_name=table_name)
table_id = result.scalar()
if table_id is None:
raise exc.NoSuchTableError(table_name)
return table_id
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
COLUMN_SQL = text("""
SELECT col.name AS name,
t.name AS type,
(col.status & 8) AS nullable,
(col.status & 128) AS autoincrement,
com.text AS 'default',
col.prec AS precision,
col.scale AS scale,
col.length AS length
FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON
col.cdefault = com.id
WHERE col.usertype = t.usertype
AND col.id = :table_id
ORDER BY col.colid
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = []
for (name, type_, nullable, autoincrement, default, precision, scale,
length) in results:
col_info = self._get_column_info(name, type_, bool(nullable),
bool(autoincrement), default, precision, scale,
length)
columns.append(col_info)
return columns
def _get_column_info(self, name, type_, nullable, autoincrement, default,
precision, scale, length):
coltype = self.ischema_names.get(type_, None)
kwargs = {}
if coltype in (NUMERIC, DECIMAL):
args = (precision, scale)
elif coltype == FLOAT:
args = (precision,)
elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR):
args = (length,)
else:
args = ()
if coltype:
coltype = coltype(*args, **kwargs)
#is this necessary
#if is_array:
# coltype = ARRAY(coltype)
else:
util.warn("Did not recognize type '%s' of column '%s'" %
(type_, name))
coltype = sqltypes.NULLTYPE
if default:
default = re.sub("DEFAULT", "", default).strip()
default = re.sub("^'(.*)'$", lambda m: m.group(1), default)
else:
default = None
column_info = dict(name=name, type=coltype, nullable=nullable,
default=default, autoincrement=autoincrement)
return column_info
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
table_cache = {}
column_cache = {}
foreign_keys = []
table_cache[table_id] = {"name": table_name, "schema": schema}
COLUMN_SQL = text("""
SELECT c.colid AS id, c.name AS name
FROM syscolumns c
WHERE c.id = :table_id
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = {}
for col in results:
columns[col["id"]] = col["name"]
column_cache[table_id] = columns
REFCONSTRAINT_SQL = text("""
SELECT o.name AS name, r.reftabid AS reftable_id,
r.keycnt AS 'count',
r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3,
r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6,
r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9,
r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12,
r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15,
r.fokey16 AS fokey16,
r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3,
r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6,
r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9,
r.refkey10 AS refkey10, r.refkey11 AS refkey11,
r.refkey12 AS refkey12, r.refkey13 AS refkey13,
r.refkey14 AS refkey14, r.refkey15 AS refkey15,
r.refkey16 AS refkey16
FROM sysreferences r JOIN sysobjects o on r.tableid = o.id
WHERE r.tableid = :table_id
""")
referential_constraints = connection.execute(REFCONSTRAINT_SQL,
table_id=table_id)
REFTABLE_SQL = text("""
SELECT o.name AS name, u.name AS 'schema'
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE o.id = :table_id
""")
for r in referential_constraints:
reftable_id = r["reftable_id"]
if reftable_id not in table_cache:
c = connection.execute(REFTABLE_SQL, table_id=reftable_id)
reftable = c.fetchone()
c.close()
table_info = {"name": reftable["name"], "schema": None}
if (schema is not None or
reftable["schema"] != self.default_schema_name):
table_info["schema"] = reftable["schema"]
table_cache[reftable_id] = table_info
results = connection.execute(COLUMN_SQL, table_id=reftable_id)
reftable_columns = {}
for col in results:
reftable_columns[col["id"]] = col["name"]
column_cache[reftable_id] = reftable_columns
reftable = table_cache[reftable_id]
reftable_columns = column_cache[reftable_id]
constrained_columns = []
referred_columns = []
for i in range(1, r["count"] + 1):
constrained_columns.append(columns[r["fokey%i" % i]])
referred_columns.append(reftable_columns[r["refkey%i" % i]])
fk_info = {
"constrained_columns": constrained_columns,
"referred_schema": reftable["schema"],
"referred_table": reftable["name"],
"referred_columns": referred_columns,
"name": r["name"]
}
foreign_keys.append(fk_info)
return foreign_keys
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
INDEX_SQL = text("""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
(i.status & 0x2) AS 'unique',
index_col(object_name(i.id), i.indid, 1) AS col_1,
index_col(object_name(i.id), i.indid, 2) AS col_2,
index_col(object_name(i.id), i.indid, 3) AS col_3,
index_col(object_name(i.id), i.indid, 4) AS col_4,
index_col(object_name(i.id), i.indid, 5) AS col_5,
index_col(object_name(i.id), i.indid, 6) AS col_6,
index_col(object_name(i.id), i.indid, 7) AS col_7,
index_col(object_name(i.id), i.indid, 8) AS col_8,
index_col(object_name(i.id), i.indid, 9) AS col_9,
index_col(object_name(i.id), i.indid, 10) AS col_10,
index_col(object_name(i.id), i.indid, 11) AS col_11,
index_col(object_name(i.id), i.indid, 12) AS col_12,
index_col(object_name(i.id), i.indid, 13) AS col_13,
index_col(object_name(i.id), i.indid, 14) AS col_14,
index_col(object_name(i.id), i.indid, 15) AS col_15,
index_col(object_name(i.id), i.indid, 16) AS col_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 0
AND i.indid BETWEEN 1 AND 254
""")
results = connection.execute(INDEX_SQL, table_id=table_id)
indexes = []
for r in results:
column_names = []
for i in range(1, r["count"]):
column_names.append(r["col_%i" % (i,)])
index_info = {"name": r["name"],
"unique": bool(r["unique"]),
"column_names": column_names}
indexes.append(index_info)
return indexes
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
PK_SQL = text("""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
index_col(object_name(i.id), i.indid, 1) AS pk_1,
index_col(object_name(i.id), i.indid, 2) AS pk_2,
index_col(object_name(i.id), i.indid, 3) AS pk_3,
index_col(object_name(i.id), i.indid, 4) AS pk_4,
index_col(object_name(i.id), i.indid, 5) AS pk_5,
index_col(object_name(i.id), i.indid, 6) AS pk_6,
index_col(object_name(i.id), i.indid, 7) AS pk_7,
index_col(object_name(i.id), i.indid, 8) AS pk_8,
index_col(object_name(i.id), i.indid, 9) AS pk_9,
index_col(object_name(i.id), i.indid, 10) AS pk_10,
index_col(object_name(i.id), i.indid, 11) AS pk_11,
index_col(object_name(i.id), i.indid, 12) AS pk_12,
index_col(object_name(i.id), i.indid, 13) AS pk_13,
index_col(object_name(i.id), i.indid, 14) AS pk_14,
index_col(object_name(i.id), i.indid, 15) AS pk_15,
index_col(object_name(i.id), i.indid, 16) AS pk_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 2048
AND i.indid BETWEEN 1 AND 254
""")
results = connection.execute(PK_SQL, table_id=table_id)
pks = results.fetchone()
results.close()
constrained_columns = []
for i in range(1, pks["count"] + 1):
constrained_columns.append(pks["pk_%i" % (i,)])
return {"constrained_columns": constrained_columns,
"name": pks["name"]}
@reflection.cache
def get_schema_names(self, connection, **kw):
SCHEMA_SQL = text("SELECT u.name AS name FROM sysusers u")
schemas = connection.execute(SCHEMA_SQL)
return [s["name"] for s in schemas]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
TABLE_SQL = text("""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'U'
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
tables = connection.execute(TABLE_SQL, schema_name=schema)
return [t["name"] for t in tables]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_DEF_SQL = text("""
SELECT c.text
FROM syscomments c JOIN sysobjects o ON c.id = o.id
WHERE o.name = :view_name
AND o.type = 'V'
""")
if util.py2k:
if isinstance(view_name, unicode):
view_name = view_name.encode("ascii")
view = connection.execute(VIEW_DEF_SQL, view_name=view_name)
return view.scalar()
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_SQL = text("""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'V'
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
views = connection.execute(VIEW_SQL, schema_name=schema)
return [v["name"] for v in views]
def has_table(self, connection, table_name, schema=None):
try:
self.get_table_id(connection, table_name, schema)
except exc.NoSuchTableError:
return False
else:
return True
| true
| true
|
1c42d6673b8c1236d5c417327374f950d9c36a31
| 22,994
|
py
|
Python
|
scripts/NcbiTaxonomy/ncbitaxonomy.py
|
andrese52/CAMISIM
|
7d1c3ce707deec8901fa9d5a40fd7f37478e65f5
|
[
"Apache-2.0"
] | null | null | null |
scripts/NcbiTaxonomy/ncbitaxonomy.py
|
andrese52/CAMISIM
|
7d1c3ce707deec8901fa9d5a40fd7f37478e65f5
|
[
"Apache-2.0"
] | null | null | null |
scripts/NcbiTaxonomy/ncbitaxonomy.py
|
andrese52/CAMISIM
|
7d1c3ce707deec8901fa9d5a40fd7f37478e65f5
|
[
"Apache-2.0"
] | null | null | null |
# original from Dmitrij Turaev
__author__ = 'Peter Hofmann'
__version__ = '0.1.5'
import os
import time
import fnmatch
import tempfile
from taxonomynode import TaxonomyNode
from scripts.Validator.validator import Validator
from scripts.Archive.archive import Archive
class NcbiTaxonomy(Validator):
"""
Loading NCBI from SQL dump into dictionary for fast processing
@type name_to_taxids: dict[str, set[str]]
@type taxid_to_parent_taxid: dict[str, str]
@type taxid_to_name: dict[str, str]
@type taxid_to_rank: dict[str, str]
@type taxid_old_to_taxid_new: dict[str, str]
@type _has_node_tree: bool
"""
# TODO: if list of ranks given, validate ranks
default_ordered_legal_ranks = ['superkingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species', 'strain']
name_to_taxids = {}
taxid_to_parent_taxid = {}
taxid_to_name = {}
taxid_to_rank = {}
taxid_old_to_taxid_new = {}
_has_node_tree = False
def __init__(self, taxonomy_path="./", temporary_directory=None, build_node_tree=False, verbose=True, logfile=None):
"""
Loading NCBI from SQL dump files into dictionary.
@attention: building a node tree requires several gigabytes of RAM !!!
@param taxonomy_path: directory containing ncbi dump
@type taxonomy_path: str | unicode
@param build_node_tree: Building a node tree, maybe useful if subtree is needed.
@type build_node_tree: bool
@param verbose: If False, messages are only written to the logfile, if given
@type verbose: bool
@param logfile: file stream or file path of logfile
@type logfile: None | file | FileIO | StringIO | str
@return: None
@rtype: None
"""
super(NcbiTaxonomy, self).__init__(label="NcbiTaxonomy", logfile=logfile, verbose=verbose)
assert isinstance(taxonomy_path, str), "Invalid taxonomy directory."
assert temporary_directory is None or self.validate_dir(temporary_directory)
assert isinstance(build_node_tree, bool)
assert os.path.exists(taxonomy_path), "Invalid taxonomy directory."
self._tmp_dir = None
if not self.validate_dir(taxonomy_path, silent=True):
archive = Archive()
assert archive.is_archive(taxonomy_path), "Can not read taxonomy. Unknown archive."
if temporary_directory is None:
self._tmp_dir = tempfile.mkdtemp()
else:
self._tmp_dir = tempfile.mkdtemp(dir=temporary_directory)
archive.extract_all(taxonomy_path, self._tmp_dir)
folder_name = os.listdir(self._tmp_dir)[0]
taxonomy_path = os.path.join(self._tmp_dir, folder_name)
assert self.validate_dir(taxonomy_path, file_names=["names.dmp", "merged.dmp", "nodes.dmp"])
taxonomy_path = self.get_full_path(taxonomy_path)
self._file_path_ncbi_names = os.path.join(taxonomy_path, "names.dmp")
self._file_path_ncbi_merged = os.path.join(taxonomy_path, "merged.dmp")
self._file_path_ncbi_nodes = os.path.join(taxonomy_path, "nodes.dmp")
# self._gi_taxid_file = os.path.join(taxonomy_directory, "gi_taxid_nucl.dmp")
start = time.time()
if len(NcbiTaxonomy.taxid_to_name) == 0:
NcbiTaxonomy._has_node_tree = build_node_tree
self._build_ncbi_taxonomy(build_node_tree)
self._read_names_file()
self._read_merged_file()
elif not NcbiTaxonomy._has_node_tree and build_node_tree:
self._build_ncbi_taxonomy(build_node_tree)
else:
self._logger.info("Using previously loaded Taxonomy")
end = time.time()
self._logger.info("Done ({}s)".format(round(end - start), 1))
def __exit__(self, type, value, traceback):
super(NcbiTaxonomy, self).__exit__(type, value, traceback)
if self.validate_dir(self._tmp_dir, silent=True):
import shutil
shutil.rmtree(self._tmp_dir)
self.tmp_dir = None
def __del__(self):
super(NcbiTaxonomy, self).__del__()
if self.validate_dir(self._tmp_dir, silent=True):
import shutil
shutil.rmtree(self._tmp_dir)
self.tmp_dir = None
def has_taxid(self, taxid):
"""
Return current taxid, in case it was merged
@attention: taxid is not accepted as digit!!!
@param taxid: ncbi taxonomic identifier
@type taxid: str
@return: True if taxid exists in taxdump
@rtype: bool
"""
assert isinstance(taxid, str)
if taxid in NcbiTaxonomy.taxid_to_rank:
return True
return False
def get_updated_taxid(self, taxid):
"""
Return current taxid, in case it was merged
@attention: taxid is not accepted as digit!!!
@param taxid: ncbi taxonomic identifier
@type taxid: str
@return: ncbi taxonomic identifier
@rtype: str | unicode
"""
assert isinstance(taxid, str)
if taxid in NcbiTaxonomy.taxid_to_rank:
return taxid
if taxid not in NcbiTaxonomy.taxid_old_to_taxid_new:
self._logger.error("Invalid taxid: '{}'".format(taxid))
raise ValueError("Invalid taxid")
taxid_new = NcbiTaxonomy.taxid_old_to_taxid_new[taxid]
self._logger.warning("Merged id: '{}' -> '{}'".format(taxid, taxid_new))
return taxid_new
def get_scientific_name(self, taxid):
"""
Return scientific name of ncbi taxonomic identifier
@attention: taxid is not accepted as digit!!!
@param taxid: ncbi taxonomic identifier
@type taxid: str
@return: ncbi scientific name
@rtype: str | unicode
"""
assert isinstance(taxid, str)
taxid = self.get_updated_taxid(taxid)
if taxid in NcbiTaxonomy.taxid_to_name:
return NcbiTaxonomy.taxid_to_name[taxid]
self._logger.error("No name available for taxid: {}".format(taxid))
raise ValueError("Invalid taxid")
def get_taxids_by_scientific_name(self, scientific_name, silent=False):
"""
Return all available taxid that fit the scientific name
@attention: Several taxid might be a hit for one scientific name
@param scientific_name: ncbi scientific name or synonym
@type scientific_name: str
@return: list of ncbi taxonomic identifiers
@rtype: set[str | unicode] | None
"""
assert isinstance(scientific_name, str)
scientific_name = scientific_name.lower()
if scientific_name in NcbiTaxonomy.name_to_taxids:
return set(NcbiTaxonomy.name_to_taxids[scientific_name])
if not silent:
self._logger.error("No taxid available for scientific_name: {}".format(scientific_name))
raise ValueError("Invalid scientific name")
return None
def get_taxids_by_scientific_name_wildcard(self, scientific_name):
"""
Return all available taxid that fit the scientific name
@attention: Several taxid might be a hit for one scientific name
@param scientific_name: ncbi scientific name or synonym
@type scientific_name: str
@return: set of ncbi taxonomic identifiers
@rtype: set[str | unicode] | None
"""
assert isinstance(scientific_name, str)
scientific_name = scientific_name.lower()
matches = fnmatch.filter(self.name_to_taxids.keys(), scientific_name)
set_of_tax_id = set()
for match in matches:
set_of_tax_id.update(set(self.name_to_taxids[match]))
if len(set_of_tax_id) > 1:
self._logger.warning(
"Several matches '{}' found for scientific_name: '{}'".format(", ".join(matches), scientific_name))
return set_of_tax_id
elif len(set_of_tax_id) == 0:
return None
return set_of_tax_id
def get_lineage_of_legal_ranks(self, taxid, ranks=None, default_value=None, as_name=False, inherit_rank=False):
"""
Return lineage of a specific taxonomic identifier, filtered by a list of legal ranks
@attention: The list of ranks determines the order of the returned taxonomic identifiers
@param taxid: ncbi taxonomic identifier
@type taxid: str
@param ranks: List of ncbi ranks in lower case
@type ranks: list[str]
@param default_value: Value at rank indexes at which the taxid of that specific rank is undefined
@type default_value: None | str
@param as_name: return scientific name if true, not taxonomic id
@type as_name: bool
@param inherit_rank: name unnamed rank names by known ones, species -> root
@type inherit_rank: bool
@return: list of ncbi taxonomic identifiers
@rtype: list[str|unicode|None]
"""
assert isinstance(taxid, str)
taxid = self.get_updated_taxid(taxid)
if ranks is None:
ranks = NcbiTaxonomy.default_ordered_legal_ranks
lineage = [default_value] * len(ranks)
original_rank = self.get_rank_of_taxid(taxid)
if original_rank is not None and original_rank in ranks:
if as_name:
lineage[ranks.index(original_rank)] = NcbiTaxonomy.taxid_to_name[taxid]
else:
lineage[ranks.index(original_rank)] = taxid
while taxid != "1":
taxid = NcbiTaxonomy.taxid_to_parent_taxid[taxid]
rank = NcbiTaxonomy.taxid_to_rank[taxid]
if rank in ranks:
if as_name:
lineage[ranks.index(rank)] = NcbiTaxonomy.taxid_to_name[taxid]
else:
lineage[ranks.index(rank)] = taxid
# todo: sort ranks
if inherit_rank:
rank_previous = default_value
tmp_list = enumerate(lineage)
if self.default_ordered_legal_ranks.index(ranks[0]) < self.default_ordered_legal_ranks.index(ranks[-1]):
tmp_list = reversed(list(enumerate(lineage)))
for index, value in tmp_list:
if value == default_value:
lineage[index] = rank_previous
else:
rank_previous = value
return lineage
def get_lineage(self, taxid):
"""
Return lineage of a specific taxonomic identifier, filtered by a list of legal ranks
@param taxid: ncbi taxonomic identifier
@type taxid: str
@return: list of ncbi taxonomic identifiers
@rtype: list[str|unicode]
"""
assert isinstance(taxid, str)
taxid = self.get_updated_taxid(taxid)
if NcbiTaxonomy._has_node_tree:
return TaxonomyNode.by_name[taxid].get_lineage()
lineage = [taxid]
while taxid != "1":
taxid = NcbiTaxonomy.taxid_to_parent_taxid[taxid]
lineage.append(taxid)
return lineage
def get_parent_taxid_of_legal_ranks(self, taxid, ranks=None):
"""
Returns taxonomic identifier of the first parent of legal rank and its rank
@param taxid: ncbi taxonomic identifier
@type taxid: str
@param ranks: List of ncbi ranks in lower case
@type ranks: list[str]
@return: tuple ncbi taxonomic identifiers and its rank
@rtype: tuple
"""
assert isinstance(taxid, str)
taxid = self.get_updated_taxid(taxid)
if ranks is None:
ranks = NcbiTaxonomy.default_ordered_legal_ranks
if taxid not in NcbiTaxonomy.taxid_to_parent_taxid:
self._logger.error("No parent taxid available for taxid: {}".format(taxid))
raise ValueError("Invalid taxid")
taxid = NcbiTaxonomy.taxid_to_parent_taxid[taxid]
while taxid is not None and taxid != "1" and NcbiTaxonomy.taxid_to_rank[taxid] not in ranks:
taxid = NcbiTaxonomy.taxid_to_parent_taxid[taxid]
if NcbiTaxonomy.taxid_to_rank[taxid] not in ranks:
return None, None
return taxid, NcbiTaxonomy.taxid_to_rank[taxid]
def get_parent_taxid(self, taxid):
"""
Return taxonomic identifier of the parent node
@param taxid: ncbi taxonomic identifier
@type taxid: str
@return: ncbi taxonomic identifiers
@rtype: str | unicode
"""
assert isinstance(taxid, str)
taxid = self.get_updated_taxid(taxid)
if taxid in NcbiTaxonomy.taxid_to_parent_taxid:
return NcbiTaxonomy.taxid_to_parent_taxid[taxid]
self._logger.error("No parent taxid available for taxid: {}".format(taxid))
raise ValueError("Invalid taxid")
def get_rank_of_taxid(self, taxid):
"""
Return rank of ncbi taxonomic identifier
@param taxid: ncbi taxonomic identifier
@type taxid: str
@return: ncbi rank of taxonomic identifiers
@rtype: str | unicode
"""
assert isinstance(taxid, str)
taxid = self.get_updated_taxid(taxid)
if taxid in NcbiTaxonomy.taxid_to_rank:
return NcbiTaxonomy.taxid_to_rank[taxid]
self._logger.error("No rank available for taxid: {}".format(taxid))
raise ValueError("Invalid taxid")
def _add_nodes(self, taxid, parent_taxid='', rank='', name=''):
"""insert nodes into taxonomy tree."""
new_node = TaxonomyNode.by_name.get(taxid)
if new_node is None:
TaxonomyNode(taxid, parent_taxid, rank, name)
# check rank
if rank == 'no rank':
return
ind1 = TaxonomyNode.allranks.index(rank)
try:
if not TaxonomyNode.by_name[parent_taxid].rank == 'no rank':
ind2 = TaxonomyNode.allranks.index(TaxonomyNode.by_name[parent_taxid].rank)
assert ind1 >= ind2
# e.g. Ovis aries platyura ('species'), Oves aries ('species')
except KeyError:
self._logger.debug("__add_nodes KeyError: {}".format(parent_taxid))
pass
# add new node to parent's all_child_nodes
# while parent_taxid in Node.byname:
# Node.byname[parent_taxid].all_child_nodes.add(newnode)
# parent_taxid = Node.byname[parent_taxid].taxid
@staticmethod
def _insert_into_dict(taxid, name, my_dict):
name = name.lower()
assert int(taxid)
if name not in my_dict:
my_dict[name] = set()
my_dict[name].add(taxid)
def _build_ncbi_taxonomy(self, build_node_tree):
""" parse NCBI taxonomy files."""
self._logger.info("Building taxonomy tree...")
if build_node_tree:
TaxonomyNode.by_name.clear()
# names.dmp (taxid, name, unique name, name class):
# 521095 | Atopobium parvulum ATCC 33793 | | synonym |
# 521095 | Atopobium parvulum DSM 20469 | | scientific name |
# 521095 | Atopobium parvulum str. DSM 20469 | | equivalent name |
# 521095 | Atopobium parvulum strain DSM 20469 | | equivalent name |
# e.g. entries for "1382" in names.dmp:
# 1382 | "Streptococcus parvulus" Weinberg et al. 1937 | | synonym |
# 1382 | Atopobium parvulum | | scientific name |
# 1382 | Atopobium parvulum (Weinberg et al. 1937) Collins and Wallbanks 1993 | | synonym |
# 1382 | Peptostreptococcus parvulus | | synonym |
# 1382 | Peptostreptococcus parvulus (Weinberg et al. 1937) Smith 1957 (Approved Lists 1980) | |synonym |
# 1382 | Streptococcus parvulus | | synonym |
# 1382 | Streptococcus parvulus (Weinberg et al. 1937) Cato 1983 | | synonym |
# 1382 | not "Streptococcus parvulus" Levinthal 1928 | | synonym |
self._logger.info("Reading 'nodes' file:\t'{}'".format(self._file_path_ncbi_nodes))
with open(self._file_path_ncbi_nodes) as file_handler:
for line in file_handler:
elements = [el.strip() for el in line.split('|')]
taxid, parent_taxid, rank = elements[0:3]
rank = rank.lower() # should be lower-case in file, but can't be bad to doublecheck
NcbiTaxonomy.taxid_to_parent_taxid[taxid] = parent_taxid
NcbiTaxonomy.taxid_to_rank[taxid] = rank
if not build_node_tree:
continue
assert taxid not in TaxonomyNode.by_name
self._add_nodes(taxid, parent_taxid=parent_taxid, rank=rank)
with open(self._file_path_ncbi_names) as file_handler:
for line in file_handler:
taxid, name, unique, name_class, sonst = [el.strip() for el in line.split('|')]
self._insert_into_dict(taxid, name, NcbiTaxonomy.name_to_taxids)
if not build_node_tree:
continue
try:
my_node = TaxonomyNode.by_name[taxid]
assert taxid == my_node.taxid
except KeyError:
self._logger.error("build_ncbi_taxonomy KeyError: {}".format(taxid))
continue
if name_class == 'scientific name':
my_node.unique_name = unique
my_node.scientific_name = name
elif name_class == 'synonym':
my_node.synonyms.append(name)
# example: Bacteroides corrodens: Campylobacter ureolyticus (taxid 827), Eikenella corrodens (taxid 539)
self._insert_into_dict(taxid, name, TaxonomyNode.by_synonym)
elif name_class == 'equivalent name':
my_node.equivalent_name.append(name)
self._insert_into_dict(taxid, name, TaxonomyNode.by_equivalent)
elif name_class == 'in-part' or name_class == 'includes' or \
name_class == 'blast name' or name_class == 'genbank common name' or\
name_class == 'misspelling' or name_class == 'authority':
pass
# update the taxonomy!
TaxonomyNode.update()
# read NCBI names file
def _read_names_file(self):
with open(self._file_path_ncbi_names) as fin:
self._logger.info("Reading 'names' file:\t'{}'".format(self._file_path_ncbi_names))
for line in fin:
# 65 | Herpetosiphon aurantiacus | | scientific name |
taxid, name, disambiguation, nametype, more = line.strip().split('|')
if nametype.strip() == 'scientific name':
NcbiTaxonomy.taxid_to_name[taxid.strip()] = name.strip()
# read NCBI merged file
def _read_merged_file(self):
with open(self._file_path_ncbi_merged) as fin:
self._logger.info("Reading 'merged' file:\t'{}'".format(self._file_path_ncbi_merged))
for line in fin:
# 5085 | 746128 |
old_taxid, new_taxid, sonst = line.strip().split('|')
NcbiTaxonomy.taxid_old_to_taxid_new[old_taxid.strip()] = new_taxid.strip()
# ###############
# newick
# ###############
# ['superkingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species', 'strain']
def _add_lineage_to_tree(self, root, lineage):
"""
Adding a lineage to a dictionary based tree
@param root: Root node
@type root: dict[str,dict]
@param lineage: A lineage
@type lineage: list[str]
@rtype: None
"""
node = root
for taxid in lineage:
if taxid is None:
continue
if taxid not in node:
node[taxid] = {}
node = node[taxid]
# (A,B,(C,D)E)F;
def _node_to_newick(self, node, node_name):
"""
Create a newick sting based on a tree
@param node:
@type node: dict[str,dict]
@param node_name:
@type node_name: str
@return: newick string
@rtype: str
"""
if len(node) == 0:
return node_name
child_nodes = []
for name in sorted(node.keys()):
child_nodes.append(self._node_to_newick(node[name], name))
return "({}){}".format(",".join(child_nodes), node_name)
def to_newick(self, stream, ranks=None):
"""
Export taxonomy as newick formated string.
@attention: Always rooted with id '1'
@param stream: Output stream
@type stream: file | FileIO | StringIO
@param ranks: List of legal ranks
@type ranks: list[str]
@rtype: None
"""
# build tree
if ranks is None:
ranks = self.default_ordered_legal_ranks
root = {}
for taxid in sorted(self.taxid_to_rank.keys()):
lineage = self.get_lineage_of_legal_ranks(taxid, ranks=ranks)
self._add_lineage_to_tree(root, lineage)
# build newick string
stream.write("{};\n".format(self._node_to_newick(root, '1')))
def to_map(self, stream):
"""
Exporting a map of all taxonomic ids to its respective taxonomic name.
@param stream: Output stream
@type stream: file | FileIO | StringIO
@rtype: None
"""
# for taxid in set_of_strains:
for taxid, name in self.taxid_to_name.iteritems():
stream.write("{}\t{}\n".format(taxid, name))
def lca(self, tax_id1, tax_id2):
"""
@param tax_id1: ncbi taxonomic identifier
@type tax_id1: str
@param tax_id2: ncbi taxonomic identifier
@type tax_id2: str
@return: ncbi taxonomic identifier
@rtype: str
"""
ranks = self.default_ordered_legal_ranks
ranks.reverse()
consistent_lineage = True
lineage1 = self.get_lineage_of_legal_ranks(tax_id1, ranks=ranks)
lineage2 = self.get_lineage_of_legal_ranks(tax_id2, ranks=ranks)
for index, value in enumerate(lineage1):
if value is None:
continue
if lineage2[index] is None:
continue
if value != lineage2[index]:
consistent_lineage = False
continue
if not consistent_lineage:
self._logger.info("Inconsitent lineage: {} vs {}".format(tax_id1, tax_id2))
return value
if not consistent_lineage:
self._logger.info("Inconsitent lineage: {} vs {}".format(tax_id1, tax_id2))
return "1"
| 39.508591
| 132
| 0.608507
|
__author__ = 'Peter Hofmann'
__version__ = '0.1.5'
import os
import time
import fnmatch
import tempfile
from taxonomynode import TaxonomyNode
from scripts.Validator.validator import Validator
from scripts.Archive.archive import Archive
class NcbiTaxonomy(Validator):
default_ordered_legal_ranks = ['superkingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species', 'strain']
name_to_taxids = {}
taxid_to_parent_taxid = {}
taxid_to_name = {}
taxid_to_rank = {}
taxid_old_to_taxid_new = {}
_has_node_tree = False
def __init__(self, taxonomy_path="./", temporary_directory=None, build_node_tree=False, verbose=True, logfile=None):
super(NcbiTaxonomy, self).__init__(label="NcbiTaxonomy", logfile=logfile, verbose=verbose)
assert isinstance(taxonomy_path, str), "Invalid taxonomy directory."
assert temporary_directory is None or self.validate_dir(temporary_directory)
assert isinstance(build_node_tree, bool)
assert os.path.exists(taxonomy_path), "Invalid taxonomy directory."
self._tmp_dir = None
if not self.validate_dir(taxonomy_path, silent=True):
archive = Archive()
assert archive.is_archive(taxonomy_path), "Can not read taxonomy. Unknown archive."
if temporary_directory is None:
self._tmp_dir = tempfile.mkdtemp()
else:
self._tmp_dir = tempfile.mkdtemp(dir=temporary_directory)
archive.extract_all(taxonomy_path, self._tmp_dir)
folder_name = os.listdir(self._tmp_dir)[0]
taxonomy_path = os.path.join(self._tmp_dir, folder_name)
assert self.validate_dir(taxonomy_path, file_names=["names.dmp", "merged.dmp", "nodes.dmp"])
taxonomy_path = self.get_full_path(taxonomy_path)
self._file_path_ncbi_names = os.path.join(taxonomy_path, "names.dmp")
self._file_path_ncbi_merged = os.path.join(taxonomy_path, "merged.dmp")
self._file_path_ncbi_nodes = os.path.join(taxonomy_path, "nodes.dmp")
start = time.time()
if len(NcbiTaxonomy.taxid_to_name) == 0:
NcbiTaxonomy._has_node_tree = build_node_tree
self._build_ncbi_taxonomy(build_node_tree)
self._read_names_file()
self._read_merged_file()
elif not NcbiTaxonomy._has_node_tree and build_node_tree:
self._build_ncbi_taxonomy(build_node_tree)
else:
self._logger.info("Using previously loaded Taxonomy")
end = time.time()
self._logger.info("Done ({}s)".format(round(end - start), 1))
def __exit__(self, type, value, traceback):
super(NcbiTaxonomy, self).__exit__(type, value, traceback)
if self.validate_dir(self._tmp_dir, silent=True):
import shutil
shutil.rmtree(self._tmp_dir)
self.tmp_dir = None
def __del__(self):
super(NcbiTaxonomy, self).__del__()
if self.validate_dir(self._tmp_dir, silent=True):
import shutil
shutil.rmtree(self._tmp_dir)
self.tmp_dir = None
def has_taxid(self, taxid):
assert isinstance(taxid, str)
if taxid in NcbiTaxonomy.taxid_to_rank:
return True
return False
def get_updated_taxid(self, taxid):
assert isinstance(taxid, str)
if taxid in NcbiTaxonomy.taxid_to_rank:
return taxid
if taxid not in NcbiTaxonomy.taxid_old_to_taxid_new:
self._logger.error("Invalid taxid: '{}'".format(taxid))
raise ValueError("Invalid taxid")
taxid_new = NcbiTaxonomy.taxid_old_to_taxid_new[taxid]
self._logger.warning("Merged id: '{}' -> '{}'".format(taxid, taxid_new))
return taxid_new
def get_scientific_name(self, taxid):
assert isinstance(taxid, str)
taxid = self.get_updated_taxid(taxid)
if taxid in NcbiTaxonomy.taxid_to_name:
return NcbiTaxonomy.taxid_to_name[taxid]
self._logger.error("No name available for taxid: {}".format(taxid))
raise ValueError("Invalid taxid")
def get_taxids_by_scientific_name(self, scientific_name, silent=False):
assert isinstance(scientific_name, str)
scientific_name = scientific_name.lower()
if scientific_name in NcbiTaxonomy.name_to_taxids:
return set(NcbiTaxonomy.name_to_taxids[scientific_name])
if not silent:
self._logger.error("No taxid available for scientific_name: {}".format(scientific_name))
raise ValueError("Invalid scientific name")
return None
def get_taxids_by_scientific_name_wildcard(self, scientific_name):
assert isinstance(scientific_name, str)
scientific_name = scientific_name.lower()
matches = fnmatch.filter(self.name_to_taxids.keys(), scientific_name)
set_of_tax_id = set()
for match in matches:
set_of_tax_id.update(set(self.name_to_taxids[match]))
if len(set_of_tax_id) > 1:
self._logger.warning(
"Several matches '{}' found for scientific_name: '{}'".format(", ".join(matches), scientific_name))
return set_of_tax_id
elif len(set_of_tax_id) == 0:
return None
return set_of_tax_id
def get_lineage_of_legal_ranks(self, taxid, ranks=None, default_value=None, as_name=False, inherit_rank=False):
assert isinstance(taxid, str)
taxid = self.get_updated_taxid(taxid)
if ranks is None:
ranks = NcbiTaxonomy.default_ordered_legal_ranks
lineage = [default_value] * len(ranks)
original_rank = self.get_rank_of_taxid(taxid)
if original_rank is not None and original_rank in ranks:
if as_name:
lineage[ranks.index(original_rank)] = NcbiTaxonomy.taxid_to_name[taxid]
else:
lineage[ranks.index(original_rank)] = taxid
while taxid != "1":
taxid = NcbiTaxonomy.taxid_to_parent_taxid[taxid]
rank = NcbiTaxonomy.taxid_to_rank[taxid]
if rank in ranks:
if as_name:
lineage[ranks.index(rank)] = NcbiTaxonomy.taxid_to_name[taxid]
else:
lineage[ranks.index(rank)] = taxid
if inherit_rank:
rank_previous = default_value
tmp_list = enumerate(lineage)
if self.default_ordered_legal_ranks.index(ranks[0]) < self.default_ordered_legal_ranks.index(ranks[-1]):
tmp_list = reversed(list(enumerate(lineage)))
for index, value in tmp_list:
if value == default_value:
lineage[index] = rank_previous
else:
rank_previous = value
return lineage
def get_lineage(self, taxid):
assert isinstance(taxid, str)
taxid = self.get_updated_taxid(taxid)
if NcbiTaxonomy._has_node_tree:
return TaxonomyNode.by_name[taxid].get_lineage()
lineage = [taxid]
while taxid != "1":
taxid = NcbiTaxonomy.taxid_to_parent_taxid[taxid]
lineage.append(taxid)
return lineage
def get_parent_taxid_of_legal_ranks(self, taxid, ranks=None):
assert isinstance(taxid, str)
taxid = self.get_updated_taxid(taxid)
if ranks is None:
ranks = NcbiTaxonomy.default_ordered_legal_ranks
if taxid not in NcbiTaxonomy.taxid_to_parent_taxid:
self._logger.error("No parent taxid available for taxid: {}".format(taxid))
raise ValueError("Invalid taxid")
taxid = NcbiTaxonomy.taxid_to_parent_taxid[taxid]
while taxid is not None and taxid != "1" and NcbiTaxonomy.taxid_to_rank[taxid] not in ranks:
taxid = NcbiTaxonomy.taxid_to_parent_taxid[taxid]
if NcbiTaxonomy.taxid_to_rank[taxid] not in ranks:
return None, None
return taxid, NcbiTaxonomy.taxid_to_rank[taxid]
def get_parent_taxid(self, taxid):
assert isinstance(taxid, str)
taxid = self.get_updated_taxid(taxid)
if taxid in NcbiTaxonomy.taxid_to_parent_taxid:
return NcbiTaxonomy.taxid_to_parent_taxid[taxid]
self._logger.error("No parent taxid available for taxid: {}".format(taxid))
raise ValueError("Invalid taxid")
def get_rank_of_taxid(self, taxid):
assert isinstance(taxid, str)
taxid = self.get_updated_taxid(taxid)
if taxid in NcbiTaxonomy.taxid_to_rank:
return NcbiTaxonomy.taxid_to_rank[taxid]
self._logger.error("No rank available for taxid: {}".format(taxid))
raise ValueError("Invalid taxid")
def _add_nodes(self, taxid, parent_taxid='', rank='', name=''):
new_node = TaxonomyNode.by_name.get(taxid)
if new_node is None:
TaxonomyNode(taxid, parent_taxid, rank, name)
if rank == 'no rank':
return
ind1 = TaxonomyNode.allranks.index(rank)
try:
if not TaxonomyNode.by_name[parent_taxid].rank == 'no rank':
ind2 = TaxonomyNode.allranks.index(TaxonomyNode.by_name[parent_taxid].rank)
assert ind1 >= ind2
except KeyError:
self._logger.debug("__add_nodes KeyError: {}".format(parent_taxid))
pass
# while parent_taxid in Node.byname:
# Node.byname[parent_taxid].all_child_nodes.add(newnode)
# parent_taxid = Node.byname[parent_taxid].taxid
@staticmethod
def _insert_into_dict(taxid, name, my_dict):
name = name.lower()
assert int(taxid)
if name not in my_dict:
my_dict[name] = set()
my_dict[name].add(taxid)
def _build_ncbi_taxonomy(self, build_node_tree):
self._logger.info("Building taxonomy tree...")
if build_node_tree:
TaxonomyNode.by_name.clear()
# names.dmp (taxid, name, unique name, name class):
# 521095 | Atopobium parvulum ATCC 33793 | | synonym |
# 521095 | Atopobium parvulum DSM 20469 | | scientific name |
# 521095 | Atopobium parvulum str. DSM 20469 | | equivalent name |
# 521095 | Atopobium parvulum strain DSM 20469 | | equivalent name |
# e.g. entries for "1382" in names.dmp:
# 1382 | "Streptococcus parvulus" Weinberg et al. 1937 | | synonym |
# 1382 | Atopobium parvulum | | scientific name |
# 1382 | Atopobium parvulum (Weinberg et al. 1937) Collins and Wallbanks 1993 | | synonym |
# 1382 | Peptostreptococcus parvulus | | synonym |
# 1382 | Peptostreptococcus parvulus (Weinberg et al. 1937) Smith 1957 (Approved Lists 1980) | |synonym |
# 1382 | Streptococcus parvulus | | synonym |
# 1382 | Streptococcus parvulus (Weinberg et al. 1937) Cato 1983 | | synonym |
# 1382 | not "Streptococcus parvulus" Levinthal 1928 | | synonym |
self._logger.info("Reading 'nodes' file:\t'{}'".format(self._file_path_ncbi_nodes))
with open(self._file_path_ncbi_nodes) as file_handler:
for line in file_handler:
elements = [el.strip() for el in line.split('|')]
taxid, parent_taxid, rank = elements[0:3]
rank = rank.lower() # should be lower-case in file, but can't be bad to doublecheck
NcbiTaxonomy.taxid_to_parent_taxid[taxid] = parent_taxid
NcbiTaxonomy.taxid_to_rank[taxid] = rank
if not build_node_tree:
continue
assert taxid not in TaxonomyNode.by_name
self._add_nodes(taxid, parent_taxid=parent_taxid, rank=rank)
with open(self._file_path_ncbi_names) as file_handler:
for line in file_handler:
taxid, name, unique, name_class, sonst = [el.strip() for el in line.split('|')]
self._insert_into_dict(taxid, name, NcbiTaxonomy.name_to_taxids)
if not build_node_tree:
continue
try:
my_node = TaxonomyNode.by_name[taxid]
assert taxid == my_node.taxid
except KeyError:
self._logger.error("build_ncbi_taxonomy KeyError: {}".format(taxid))
continue
if name_class == 'scientific name':
my_node.unique_name = unique
my_node.scientific_name = name
elif name_class == 'synonym':
my_node.synonyms.append(name)
self._insert_into_dict(taxid, name, TaxonomyNode.by_synonym)
elif name_class == 'equivalent name':
my_node.equivalent_name.append(name)
self._insert_into_dict(taxid, name, TaxonomyNode.by_equivalent)
elif name_class == 'in-part' or name_class == 'includes' or \
name_class == 'blast name' or name_class == 'genbank common name' or\
name_class == 'misspelling' or name_class == 'authority':
pass
TaxonomyNode.update()
def _read_names_file(self):
with open(self._file_path_ncbi_names) as fin:
self._logger.info("Reading 'names' file:\t'{}'".format(self._file_path_ncbi_names))
for line in fin:
taxid, name, disambiguation, nametype, more = line.strip().split('|')
if nametype.strip() == 'scientific name':
NcbiTaxonomy.taxid_to_name[taxid.strip()] = name.strip()
def _read_merged_file(self):
with open(self._file_path_ncbi_merged) as fin:
self._logger.info("Reading 'merged' file:\t'{}'".format(self._file_path_ncbi_merged))
for line in fin:
old_taxid, new_taxid, sonst = line.strip().split('|')
NcbiTaxonomy.taxid_old_to_taxid_new[old_taxid.strip()] = new_taxid.strip()
node = node[taxid]
def _node_to_newick(self, node, node_name):
if len(node) == 0:
return node_name
child_nodes = []
for name in sorted(node.keys()):
child_nodes.append(self._node_to_newick(node[name], name))
return "({}){}".format(",".join(child_nodes), node_name)
def to_newick(self, stream, ranks=None):
if ranks is None:
ranks = self.default_ordered_legal_ranks
root = {}
for taxid in sorted(self.taxid_to_rank.keys()):
lineage = self.get_lineage_of_legal_ranks(taxid, ranks=ranks)
self._add_lineage_to_tree(root, lineage)
stream.write("{};\n".format(self._node_to_newick(root, '1')))
def to_map(self, stream):
for taxid, name in self.taxid_to_name.iteritems():
stream.write("{}\t{}\n".format(taxid, name))
def lca(self, tax_id1, tax_id2):
ranks = self.default_ordered_legal_ranks
ranks.reverse()
consistent_lineage = True
lineage1 = self.get_lineage_of_legal_ranks(tax_id1, ranks=ranks)
lineage2 = self.get_lineage_of_legal_ranks(tax_id2, ranks=ranks)
for index, value in enumerate(lineage1):
if value is None:
continue
if lineage2[index] is None:
continue
if value != lineage2[index]:
consistent_lineage = False
continue
if not consistent_lineage:
self._logger.info("Inconsitent lineage: {} vs {}".format(tax_id1, tax_id2))
return value
if not consistent_lineage:
self._logger.info("Inconsitent lineage: {} vs {}".format(tax_id1, tax_id2))
return "1"
| true
| true
|
1c42d6849c679e43dc2152022dd003a9307fffe0
| 1,386
|
py
|
Python
|
tools/video/reader.py
|
nghiaplt/SlowFast
|
326fd3c54408dab17d6383948a884a4d8f5da278
|
[
"Apache-2.0"
] | null | null | null |
tools/video/reader.py
|
nghiaplt/SlowFast
|
326fd3c54408dab17d6383948a884a4d8f5da278
|
[
"Apache-2.0"
] | null | null | null |
tools/video/reader.py
|
nghiaplt/SlowFast
|
326fd3c54408dab17d6383948a884a4d8f5da278
|
[
"Apache-2.0"
] | null | null | null |
import cv2
class VideoReader(object):
def __init__(self, cfg):
self.source = cfg.DEMO.DATA_SOURCE if cfg.DEMO.DATA_SOURCE != - \
1 else cfg.DEMO.DATA_VIDEO
self.display_width = cfg.DEMO.DISPLAY_WIDTH
self.display_height = cfg.DEMO.DISPLAY_HEIGHT
try: # OpenCV needs int to read from webcam
self.source = int(self.source)
except ValueError:
pass
self.cap = cv2.VideoCapture(self.source)
if self.display_width > 0 and self.display_height > 0:
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.display_width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.display_height)
else:
self.display_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.display_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
if not self.cap.isOpened():
raise IOError('Video {} cannot be opened'.format(self.source))
def __iter__(self):
return self
def __next__(self):
was_read, frame = self.cap.read()
if not was_read:
# raise StopIteration
# reiterate the video instead of quiting.
self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
frame = None
return was_read, frame
def clean(self):
self.cap.release()
cv2.destroyAllWindows()
| 30.8
| 78
| 0.621212
|
import cv2
class VideoReader(object):
def __init__(self, cfg):
self.source = cfg.DEMO.DATA_SOURCE if cfg.DEMO.DATA_SOURCE != - \
1 else cfg.DEMO.DATA_VIDEO
self.display_width = cfg.DEMO.DISPLAY_WIDTH
self.display_height = cfg.DEMO.DISPLAY_HEIGHT
try:
self.source = int(self.source)
except ValueError:
pass
self.cap = cv2.VideoCapture(self.source)
if self.display_width > 0 and self.display_height > 0:
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.display_width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.display_height)
else:
self.display_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.display_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
if not self.cap.isOpened():
raise IOError('Video {} cannot be opened'.format(self.source))
def __iter__(self):
return self
def __next__(self):
was_read, frame = self.cap.read()
if not was_read:
self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
frame = None
return was_read, frame
def clean(self):
self.cap.release()
cv2.destroyAllWindows()
| true
| true
|
1c42d8a2ddd5404915c72f7f80b16840660da047
| 13,271
|
py
|
Python
|
log_mito_act/model_269.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_mito_act/model_269.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_mito_act/model_269.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('ParpC')
Monomer('Xiap', ['Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd'])
Monomer('C3pro', ['Apop'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 67250.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None) + BidU(C8A=None) | C8A(BidU=1) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1) % BidU(C8A=1) >> C8A(BidU=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None) | Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None) >> Xiap(Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None), C8A_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None), C8pro_0)
Initial(C3pro(Apop=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
| 79.467066
| 614
| 0.809736
|
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('ParpC')
Monomer('Xiap', ['Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd'])
Monomer('C3pro', ['Apop'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 67250.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None) + BidU(C8A=None) | C8A(BidU=1) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1) % BidU(C8A=1) >> C8A(BidU=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None) | Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None) >> Xiap(Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None), C8A_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None), C8pro_0)
Initial(C3pro(Apop=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
| true
| true
|
1c42d8fac077eeaae5c8cc7c84694efe9c4c9b9c
| 2,396
|
py
|
Python
|
MarketVersion/ConsumerCES_original.py
|
ntuecon/2018groupCE
|
51c4442bcae8fbd3841b12b8f87d5eefe11e23f8
|
[
"MIT"
] | 3
|
2018-03-13T07:30:47.000Z
|
2018-06-12T15:02:44.000Z
|
MarketVersion/ConsumerCES_original.py
|
ntuecon/2018groupCE
|
51c4442bcae8fbd3841b12b8f87d5eefe11e23f8
|
[
"MIT"
] | null | null | null |
MarketVersion/ConsumerCES_original.py
|
ntuecon/2018groupCE
|
51c4442bcae8fbd3841b12b8f87d5eefe11e23f8
|
[
"MIT"
] | 1
|
2018-03-20T08:10:22.000Z
|
2018-03-20T08:10:22.000Z
|
class Consumer:
"""This class is the optimization of individual choice of consumer"""
#def __init__(self,GoodPrices,FacPrices):
def __init__(self,alpha,beta,theta):
import numpy as np
#self.GoodPrices=np.array(GoodPrices)
#self.FacPrices=np.array(FacPrices)
self.alpha=np.array(alpha)
self.gamma=1.0
self.rho=0.0
self.beta=1.0*beta
self.theta=1.0*np.array(theta)
self.ng=len(self.alpha)
self.nf=len(self.theta)
def utility(self,GFvec,sign=1.0):
from math import log
import numpy as np
"""What's below is the linear algebra version of above equation"""
"""Objective function of consumer utility"""
GFvec=np.array(GFvec[0:self.ng+self.nf])
#GFvec=np.array(GFvec[0:self.ng])
return sign*((self.alpha.dot(GFvec[0:self.ng]**self.gamma))**((1-self.rho)/self.gamma)-np.ones(len(self.theta)).dot(self.beta*GFvec[self.ng:(self.ng+self.nf)]**(self.theta+1)/(self.theta+1)))
#return sign*(self.alpha.dot(np.log(GFvec)))
"""def budget(self,GFvec):
import numpy as np
return 100-self.GoodPrices.dot(GFvec)"""
def cons(self):
"""
1.Budget constraint
2&3.Nonnegative criterias
"""
import numpy as np
return ({'type' : 'ineq',
'fun' : lambda GFvec: self.budget(GFvec)},
{'type' : 'ineq',
'fun' : lambda GFvec: GFvec})
"""'fun' : lambda goods: np.array(self.FacPrices.dot(GFvec[self.ng:(self.ng+self.nf)])-self.GoodPrices.dot(GFvec[0:self.ng]))},"""
def utility_max(self):
import numpy as np
from scipy.optimize import minimize
"""
1.The package of minimize can be use as maximize ,if the
objective function is multiply by -1.
2."cons" set as the constrain of optimization problem.
3.If we use SLSQP method, the jacobian of objective function is necessary.
The jacobian means the partial derivative of every independent variables.
"""
#GFvec=[[]]*(self.ng+self.nf)
"""res = minimize(self.utility, np.ones(self.ng+self.nf), args=(-1.0,),"""
res = minimize(self.utility, [10.0]*(self.ng+self.nf), args=(-1.0,),
constraints=self.cons(), method='SLSQP', options={'disp': True})
return res.x
| 42.035088
| 199
| 0.597245
|
class Consumer:
def __init__(self,alpha,beta,theta):
import numpy as np
self.alpha=np.array(alpha)
self.gamma=1.0
self.rho=0.0
self.beta=1.0*beta
self.theta=1.0*np.array(theta)
self.ng=len(self.alpha)
self.nf=len(self.theta)
def utility(self,GFvec,sign=1.0):
from math import log
import numpy as np
GFvec=np.array(GFvec[0:self.ng+self.nf])
return sign*((self.alpha.dot(GFvec[0:self.ng]**self.gamma))**((1-self.rho)/self.gamma)-np.ones(len(self.theta)).dot(self.beta*GFvec[self.ng:(self.ng+self.nf)]**(self.theta+1)/(self.theta+1)))
def cons(self):
import numpy as np
return ({'type' : 'ineq',
'fun' : lambda GFvec: self.budget(GFvec)},
{'type' : 'ineq',
'fun' : lambda GFvec: GFvec})
def utility_max(self):
import numpy as np
from scipy.optimize import minimize
res = minimize(self.utility, [10.0]*(self.ng+self.nf), args=(-1.0,),
constraints=self.cons(), method='SLSQP', options={'disp': True})
return res.x
| true
| true
|
1c42d9b8fbcd7a21db15d1087e2f4fe97fb013bd
| 376
|
py
|
Python
|
apis/urls.py
|
jeffshek/betterself
|
51468253fc31373eb96e0e82189b9413f3d76ff5
|
[
"MIT"
] | 98
|
2017-07-29T14:26:36.000Z
|
2022-02-28T04:10:15.000Z
|
apis/urls.py
|
jeffshek/betterself
|
51468253fc31373eb96e0e82189b9413f3d76ff5
|
[
"MIT"
] | 1,483
|
2017-05-30T00:05:56.000Z
|
2022-03-31T12:37:06.000Z
|
apis/urls.py
|
lawrendran/betterself
|
51468253fc31373eb96e0e82189b9413f3d76ff5
|
[
"MIT"
] | 13
|
2017-11-08T00:02:35.000Z
|
2022-02-28T04:10:32.000Z
|
from django.conf.urls import include, url
# note for api urls, even though app is plural, link is singular!
# aka /api/v1, NOT /apis/v1
urlpatterns = [
url(r'^v1/', include('apis.betterself.v1.urls')),
url(r'^v1/rescuetime/', include('apis.rescuetime.v1.urls')),
url(r'^fitbit/', include('apis.fitbit.urls')),
url(r'^twilio/', include('apis.twilio.urls')),
]
| 34.181818
| 65
| 0.662234
|
from django.conf.urls import include, url
urlpatterns = [
url(r'^v1/', include('apis.betterself.v1.urls')),
url(r'^v1/rescuetime/', include('apis.rescuetime.v1.urls')),
url(r'^fitbit/', include('apis.fitbit.urls')),
url(r'^twilio/', include('apis.twilio.urls')),
]
| true
| true
|
1c42dbdcbc2327f0dbb255eddbbde2de931a4503
| 1,043
|
py
|
Python
|
third_party/mosquitto/test/broker/02-subpub-qos0.py
|
HowJMay/simple-tangle-accelerator
|
d79bfda23a0fcf67d5a7f9e66f02efa3e73ba381
|
[
"MIT"
] | null | null | null |
third_party/mosquitto/test/broker/02-subpub-qos0.py
|
HowJMay/simple-tangle-accelerator
|
d79bfda23a0fcf67d5a7f9e66f02efa3e73ba381
|
[
"MIT"
] | null | null | null |
third_party/mosquitto/test/broker/02-subpub-qos0.py
|
HowJMay/simple-tangle-accelerator
|
d79bfda23a0fcf67d5a7f9e66f02efa3e73ba381
|
[
"MIT"
] | 1
|
2021-05-04T16:09:27.000Z
|
2021-05-04T16:09:27.000Z
|
#!/usr/bin/env python3
# Test whether a client subscribed to a topic receives its own message sent to that topic.
from mosq_test_helper import *
rc = 1
mid = 53
keepalive = 60
connect_packet = mosq_test.gen_connect("subpub-qos0-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
subscribe_packet = mosq_test.gen_subscribe(mid, "subpub/qos0", 0)
suback_packet = mosq_test.gen_suback(mid, 0)
publish_packet = mosq_test.gen_publish("subpub/qos0", qos=0, payload="message")
port = mosq_test.get_port()
broker = mosq_test.start_broker(filename=os.path.basename(__file__), port=port)
try:
sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=20, port=port)
mosq_test.do_send_receive(sock, subscribe_packet, suback_packet, "suback")
mosq_test.do_send_receive(sock, publish_packet, publish_packet, "publish")
rc = 0
sock.close()
finally:
broker.terminate()
broker.wait()
(stdo, stde) = broker.communicate()
if rc:
print(stde.decode('utf-8'))
exit(rc)
| 26.74359
| 93
| 0.74209
|
from mosq_test_helper import *
rc = 1
mid = 53
keepalive = 60
connect_packet = mosq_test.gen_connect("subpub-qos0-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
subscribe_packet = mosq_test.gen_subscribe(mid, "subpub/qos0", 0)
suback_packet = mosq_test.gen_suback(mid, 0)
publish_packet = mosq_test.gen_publish("subpub/qos0", qos=0, payload="message")
port = mosq_test.get_port()
broker = mosq_test.start_broker(filename=os.path.basename(__file__), port=port)
try:
sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=20, port=port)
mosq_test.do_send_receive(sock, subscribe_packet, suback_packet, "suback")
mosq_test.do_send_receive(sock, publish_packet, publish_packet, "publish")
rc = 0
sock.close()
finally:
broker.terminate()
broker.wait()
(stdo, stde) = broker.communicate()
if rc:
print(stde.decode('utf-8'))
exit(rc)
| true
| true
|
1c42dc2b919e1e96da2ac976df6330be3dea9190
| 775
|
py
|
Python
|
django_q/migrations/0007_ormq.py
|
Balletie/django-q
|
03abbc960f8c35d0c4206c60ad01f08085539609
|
[
"MIT"
] | null | null | null |
django_q/migrations/0007_ormq.py
|
Balletie/django-q
|
03abbc960f8c35d0c4206c60ad01f08085539609
|
[
"MIT"
] | null | null | null |
django_q/migrations/0007_ormq.py
|
Balletie/django-q
|
03abbc960f8c35d0c4206c60ad01f08085539609
|
[
"MIT"
] | 2
|
2020-11-10T01:14:24.000Z
|
2021-06-11T12:50:19.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('django_q', '0006_auto_20150805_1817'),
]
operations = [
migrations.CreateModel(
name='OrmQ',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('key', models.CharField(max_length=100)),
('payload', models.TextField()),
('lock', models.DateTimeField(null=True)),
],
options={
'verbose_name_plural': 'Queued tasks',
'verbose_name': 'Queued task',
},
),
]
| 27.678571
| 114
| 0.541935
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('django_q', '0006_auto_20150805_1817'),
]
operations = [
migrations.CreateModel(
name='OrmQ',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('key', models.CharField(max_length=100)),
('payload', models.TextField()),
('lock', models.DateTimeField(null=True)),
],
options={
'verbose_name_plural': 'Queued tasks',
'verbose_name': 'Queued task',
},
),
]
| true
| true
|
1c42ddcd3886317cd61f04751a3f3f161a5a9dda
| 392
|
py
|
Python
|
fragroutepluspy/modules/mod_echo.py
|
CreatePhotonW/fragroutepluspy
|
a00ae3aeb4bbd2c6adaad29b32ae8b496dac6203
|
[
"MIT"
] | 1
|
2021-01-29T13:27:16.000Z
|
2021-01-29T13:27:16.000Z
|
fragroutepluspy/modules/mod_echo.py
|
CreatePhotonW/fragroutepluspy
|
a00ae3aeb4bbd2c6adaad29b32ae8b496dac6203
|
[
"MIT"
] | null | null | null |
fragroutepluspy/modules/mod_echo.py
|
CreatePhotonW/fragroutepluspy
|
a00ae3aeb4bbd2c6adaad29b32ae8b496dac6203
|
[
"MIT"
] | 1
|
2021-01-28T16:34:39.000Z
|
2021-01-28T16:34:39.000Z
|
from .mod import Mod
class Echo(Mod):
name = "echo"
usage = "echo <string> ..."
description = """Echo the string argument(s) to standard output."""
def parse_args(self, args):
self.message = None
if len(args) < 1:
raise Mod.ArgumentException(self)
self.message = " ".join(args)
def apply(self, packets):
print(self.message)
| 23.058824
| 71
| 0.584184
|
from .mod import Mod
class Echo(Mod):
name = "echo"
usage = "echo <string> ..."
description = """Echo the string argument(s) to standard output."""
def parse_args(self, args):
self.message = None
if len(args) < 1:
raise Mod.ArgumentException(self)
self.message = " ".join(args)
def apply(self, packets):
print(self.message)
| true
| true
|
1c42df1abd266d6b760f81e1219aaabbc3e40b0f
| 20,727
|
py
|
Python
|
userbot/modules/stickers.py
|
ryzxzn/Man-Userbot
|
e7d15c073e5c7d536205b36b6e975b294ed4a8c7
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/stickers.py
|
ryzxzn/Man-Userbot
|
e7d15c073e5c7d536205b36b6e975b294ed4a8c7
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/stickers.py
|
ryzxzn/Man-Userbot
|
e7d15c073e5c7d536205b36b6e975b294ed4a8c7
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
# Recode by @mrismanaziz
# FROM Man-Userbot <https://github.com/mrismanaziz/Man-Userbot>
# t.me/SharingUserbot & t.me/Lunatic0de
import asyncio
import io
import math
import random
import urllib.request
from os import remove
from PIL import Image
from telethon import events
from telethon.errors.rpcerrorlist import YouBlockedUserError
from telethon.tl.functions.messages import GetStickerSetRequest
from telethon.tl.types import (
DocumentAttributeFilename,
DocumentAttributeSticker,
InputStickerSetID,
MessageMediaPhoto,
)
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP
from userbot import S_PACK_NAME as custompack
from userbot import bot
from userbot.events import man_cmd
from userbot.utils import edit_or_reply
KANGING_STR = [
"Colong Sticker dulu yee kan",
"Ini Sticker aku colong yaa DUARR!",
"Waw Stickernya Bagus Nih...Colong Dulu Yekan..",
"ehh, keren nih... gua colong ya stickernya...",
"Boleh juga ni Sticker Colong ahh~",
]
@bot.on(man_cmd(outgoing=True, pattern=r"(?:tikel|kang)\s?(.)?"))
async def kang(args):
user = await bot.get_me()
if not user.username:
user.username = user.first_name
message = await args.get_reply_message()
photo = None
emojibypass = False
is_anim = False
emoji = None
if not message or not message.media:
return await args.edit("`Maaf , Saya Gagal Mengambil Sticker Ini!`")
if isinstance(message.media, MessageMediaPhoto):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
photo = await bot.download_media(message.photo, photo)
elif "image" in message.media.document.mime_type.split("/"):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
await bot.download_file(message.media.document, photo)
if (
DocumentAttributeFilename(file_name="sticker.webp")
in message.media.document.attributes
):
emoji = message.media.document.attributes[1].alt
if emoji != "✨":
emojibypass = True
elif "tgsticker" in message.media.document.mime_type:
await args.edit(f"`{random.choice(KANGING_STR)}`")
await bot.download_file(message.media.document, "AnimatedSticker.tgs")
attributes = message.media.document.attributes
for attribute in attributes:
if isinstance(attribute, DocumentAttributeSticker):
emoji = attribute.alt
emojibypass = True
is_anim = True
photo = 1
else:
return await args.edit("`File Tidak Didukung !`")
if photo:
splat = args.text.split()
if not emojibypass:
emoji = "✨"
pack = 1
if len(splat) == 3:
pack = splat[2] # User sent both
emoji = splat[1]
elif len(splat) == 2:
if splat[1].isnumeric():
# User wants to push into different pack, but is okay with
# thonk as emote.
pack = int(splat[1])
else:
# User sent just custom emote, wants to push to default
# pack
emoji = splat[1]
u_id = user.id
f_name = user.first_name
packname = f"Sticker_u{u_id}_Ke{pack}"
custom_packnick = f"{custompack}" or f"{f_name} Sticker Pack"
packnick = f"{custom_packnick}"
cmd = "/newpack"
file = io.BytesIO()
if not is_anim:
image = await resize_photo(photo)
file.name = "sticker.png"
image.save(file, "PNG")
else:
packname += "_anim"
packnick += " (Animated)"
cmd = "/newanimated"
response = urllib.request.urlopen(
urllib.request.Request(f"http://t.me/addstickers/{packname}")
)
htmlstr = response.read().decode("utf8").split("\n")
if (
" A <strong>Telegram</strong> user has created the <strong>Sticker Set</strong>."
not in htmlstr
):
async with bot.conversation("Stickers") as conv:
await conv.send_message("/addsticker")
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packname)
x = await conv.get_response()
while "120" in x.text:
pack += 1
packname = f"Sticker_u{u_id}_Ke{pack}"
packnick = f"{custom_packnick}"
await args.edit(
"`Membuat Sticker Pack Baru "
+ str(pack)
+ " Karena Sticker Pack Sudah Penuh`"
)
await conv.send_message(packname)
x = await conv.get_response()
if x.text == "Gagal Memilih Pack.":
await conv.send_message(cmd)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file("AnimatedSticker.tgs")
remove("AnimatedSticker.tgs")
else:
file.seek(0)
await conv.send_file(file, force_document=True)
await conv.get_response()
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
# Ensure user doesn't get spamming notifications
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
return await args.edit(
"`Sticker ditambahkan ke pack yang berbeda !"
"\nIni pack yang baru saja dibuat!"
f"\nTekan [Sticker Pack](t.me/addstickers/{packname}) Untuk Melihat Sticker Pack",
parse_mode="md",
)
if is_anim:
await conv.send_file("AnimatedSticker.tgs")
remove("AnimatedSticker.tgs")
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "Sorry, the file type is invalid." in rsp.text:
return await args.edit(
"**Gagal Menambahkan Sticker, Gunakan @Stickers Bot Untuk Menambahkan Sticker Anda.**"
)
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/done")
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
else:
await args.edit("`Membuat Sticker Pack Baru`")
async with bot.conversation("Stickers") as conv:
await conv.send_message(cmd)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file("AnimatedSticker.tgs")
remove("AnimatedSticker.tgs")
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "Sorry, the file type is invalid." in rsp.text:
return await args.edit(
"**Gagal Menambahkan Sticker, Gunakan @Stickers Bot Untuk Menambahkan Sticker.**"
)
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
# Ensure user doesn't get spamming notifications
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(
"** Sticker Berhasil Ditambahkan!**"
f"\n 👻 **[KLIK DISINI](t.me/addstickers/{packname})** 👻\n**Untuk Menggunakan Stickers**",
parse_mode="md",
)
async def resize_photo(photo):
image = Image.open(photo)
if (image.width and image.height) < 512:
size1 = image.width
size2 = image.height
if size1 > size2:
scale = 512 / size1
size1new = 512
size2new = size2 * scale
else:
scale = 512 / size2
size1new = size1 * scale
size2new = 512
size1new = math.floor(size1new)
size2new = math.floor(size2new)
sizenew = (size1new, size2new)
image = image.resize(sizenew)
else:
maxsize = (512, 512)
image.thumbnail(maxsize)
return image
@bot.on(man_cmd(outgoing=True, pattern=r"stickerinfo$"))
async def get_pack_info(event):
if not event.is_reply:
return await event.edit("**Mohon Balas Ke Sticker**")
rep_msg = await event.get_reply_message()
if not rep_msg.document:
return await event.edit("**Balas ke sticker untuk melihat detail pack**")
try:
stickerset_attr = rep_msg.document.attributes[1]
await event.edit("`Processing...`")
except BaseException:
return await event.edit("**Ini bukan sticker, Mohon balas ke sticker.**")
if not isinstance(stickerset_attr, DocumentAttributeSticker):
return await event.edit("**Ini bukan sticker, Mohon balas ke sticker.**")
get_stickerset = await bot(
GetStickerSetRequest(
InputStickerSetID(
id=stickerset_attr.stickerset.id,
access_hash=stickerset_attr.stickerset.access_hash,
)
)
)
pack_emojis = []
for document_sticker in get_stickerset.packs:
if document_sticker.emoticon not in pack_emojis:
pack_emojis.append(document_sticker.emoticon)
OUTPUT = (
f"➠ **Nama Sticker:** [{get_stickerset.set.title}](http://t.me/addstickers/{get_stickerset.set.short_name})\n"
f"➠ **Official:** `{get_stickerset.set.official}`\n"
f"➠ **Arsip:** `{get_stickerset.set.archived}`\n"
f"➠ **Sticker Dalam Pack:** `{len(get_stickerset.packs)}`\n"
f"➠ **Emoji Dalam Pack:** {' '.join(pack_emojis)}"
)
await event.edit(OUTPUT)
@bot.on(man_cmd(outgoing=True, pattern=r"delsticker ?(.*)"))
async def _(event):
if event.fwd_from:
return
if not event.reply_to_msg_id:
await event.edit("**Mohon Reply ke Sticker yang ingin anda Hapus.**")
return
reply_message = await event.get_reply_message()
chat = "@Stickers"
if reply_message.sender.bot:
await edit_or_reply(event, "**Mohon Reply ke Sticker.**")
return
await event.edit("`Processing...`")
async with bot.conversation(chat) as conv:
try:
response = conv.wait_event(
events.NewMessage(incoming=True, from_users=429000)
)
await conv.send_message("/delsticker")
await conv.get_response()
await asyncio.sleep(2)
await bot.forward_messages(chat, reply_message)
response = await response
except YouBlockedUserError:
await event.reply("**Silahkan Buka Blokir @Stikers dan coba lagi**")
return
if response.text.startswith(
"Sorry, I can't do this, it seems that you are not the owner of the relevant pack."
):
await event.edit(
"**Maaf, Sepertinya Anda bukan Pemilik Sticker pack ini.**"
)
elif response.text.startswith(
"You don't have any sticker packs yet. You can create one using the /newpack command."
):
await event.edit("**Anda Tidak Memiliki Stiker untuk di Hapus**")
elif response.text.startswith("Please send me the sticker."):
await event.edit("**Tolong Reply ke Sticker yang ingin dihapus**")
elif response.text.startswith("Invalid pack selected."):
await event.edit("**Maaf Paket yang dipilih tidak valid.**")
else:
await event.edit("**Berhasil Menghapus Stiker.**")
@bot.on(man_cmd(outgoing=True, pattern=r"editsticker ?(.*)"))
async def _(event):
if event.fwd_from:
return
if not event.reply_to_msg_id:
await event.edit("**Mohon Reply ke Sticker dan Berikan emoji.**")
return
reply_message = await event.get_reply_message()
emot = event.pattern_match.group(1)
if reply_message.sender.bot:
await edit_or_reply(event, "**Mohon Reply ke Sticker.**")
return
await event.edit("`Processing...`")
if emot == "":
await event.edit("**Silahkan Kirimkan Emot Baru.**")
else:
chat = "@Stickers"
async with bot.conversation(chat) as conv:
try:
response = conv.wait_event(
events.NewMessage(incoming=True, from_users=429000)
)
await conv.send_message("/editsticker")
await conv.get_response()
await asyncio.sleep(2)
await bot.forward_messages(chat, reply_message)
await conv.get_response()
await asyncio.sleep(2)
await conv.send_message(f"{emot}")
response = await response
except YouBlockedUserError:
await event.reply("**Buka blokir @Stiker dan coba lagi**")
return
if response.text.startswith("Invalid pack selected."):
await event.edit("**Maaf Paket yang dipilih tidak valid.**")
elif response.text.startswith(
"Please send us an emoji that best describes your sticker."
):
await event.edit(
"**Silahkan Kirimkan emoji yang paling menggambarkan stiker Anda.**"
)
else:
await event.edit(
f"**Berhasil Mengedit Emoji Stiker**\n**Emoji Baru:** {emot}"
)
@bot.on(man_cmd(outgoing=True, pattern=r"getsticker$"))
async def sticker_to_png(sticker):
if not sticker.is_reply:
await sticker.edit("`NULL information to fetch...`")
return False
img = await sticker.get_reply_message()
if not img.document:
await sticker.edit("`Mohon Balas Ke Sticker`")
return False
try:
img.document.attributes[1]
except Exception:
await sticker.edit("`Maaf , Ini Bukanlah Sticker`")
return
with io.BytesIO() as image:
await sticker.client.download_media(img, image)
image.name = "sticker.png"
image.seek(0)
try:
await img.reply(file=image, force_document=True)
except Exception:
await sticker.edit("`Tidak Dapat Mengirim File...`")
else:
await sticker.delete()
return
@bot.on(man_cmd(outgoing=True, pattern=r"findsticker (.*)"))
async def cb_sticker(event):
query = event.pattern_match.group(1)
if not query:
return await event.edit("`Masukan Nama Sticker Pack!`")
await event.edit("`Searching sticker packs...`")
text = requests.get("https://combot.org/telegram/stickers?q=" + query).text
soup = bs(text, "lxml")
results = soup.find_all("div", {"class": "sticker-pack__header"})
if not results:
return await event.edit("`Tidak Menemukan Sticker Pack :(`")
reply = f"**Keyword Sticker Pack:**\n {query}\n\n**Hasil:**\n"
for pack in results:
if pack.button:
packtitle = (pack.find("div", "sticker-pack__title")).get_text()
packlink = (pack.a).get("href")
reply += f"- [{packtitle}]({packlink})\n\n"
await event.edit(reply)
CMD_HELP.update(
{
"stickers": f"**Plugin : **`stickers`\
\n\n • **Syntax :** `{cmd}kang` atau `{cmd}tikel` [emoji]\
\n • **Function : **Balas .kang Ke Sticker Atau Gambar Untuk Menambahkan Ke Sticker Pack Mu\
\n\n • **Syntax :** `{cmd}kang` [emoji] atau `{cmd}tikel` [emoji]\
\n • **Function : **Balas {cmd}kang emoji Ke Sticker Atau Gambar Untuk Menambahkan dan costum emoji sticker Ke Pack Mu\
\n\n • **Syntax :** `{cmd}delsticker` <reply sticker>\
\n • **Function : **Untuk Menghapus sticker dari Sticker Pack.\
\n\n • **Syntax :** `{cmd}editsticker` <reply sticker> <emoji>\
\n • **Function : **Untuk Mengedit emoji stiker dengan emoji yang baru.\
\n\n • **Syntax :** `{cmd}stickerinfo`\
\n • **Function : **Untuk Mendapatkan Informasi Sticker Pack.\
\n\n • **Syntax :** `{cmd}findsticker` <nama pack sticker>\
\n • **Function : **Untuk Mencari Sticker Pack.\
\n\n • **NOTE:** Untuk Membuat Sticker Pack baru Gunakan angka dibelakang `{cmd}kang`\
\n • **CONTOH:** `{cmd}kang 2` untuk membuat dan menyimpan ke sticker pack ke 2\
"
}
)
CMD_HELP.update(
{
"sticker_v2": f"**Plugin : **`stickers`\
\n\n • **Syntax :** `{cmd}getsticker`\
\n • **Function : **Balas Ke Stcker Untuk Mendapatkan File 'PNG' Sticker.\
\n\n • **Syntax :** `{cmd}get`\
\n • **Function : **Balas ke sticker untuk mendapatkan file 'PNG' sticker\
\n\n • **Syntax :** `{cmd}stoi`\
\n • **Function : **Balas Ke Stcker Untuk Mendapatkan File 'PNG' Sticker.\
\n\n • **Syntax :** `{cmd}itos`\
\n • **Function : **Balas ke sticker atau gambar .itos untuk mengambil sticker bukan ke pack\
"
}
)
| 41.043564
| 129
| 0.569595
|
import asyncio
import io
import math
import random
import urllib.request
from os import remove
from PIL import Image
from telethon import events
from telethon.errors.rpcerrorlist import YouBlockedUserError
from telethon.tl.functions.messages import GetStickerSetRequest
from telethon.tl.types import (
DocumentAttributeFilename,
DocumentAttributeSticker,
InputStickerSetID,
MessageMediaPhoto,
)
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP
from userbot import S_PACK_NAME as custompack
from userbot import bot
from userbot.events import man_cmd
from userbot.utils import edit_or_reply
KANGING_STR = [
"Colong Sticker dulu yee kan",
"Ini Sticker aku colong yaa DUARR!",
"Waw Stickernya Bagus Nih...Colong Dulu Yekan..",
"ehh, keren nih... gua colong ya stickernya...",
"Boleh juga ni Sticker Colong ahh~",
]
@bot.on(man_cmd(outgoing=True, pattern=r"(?:tikel|kang)\s?(.)?"))
async def kang(args):
user = await bot.get_me()
if not user.username:
user.username = user.first_name
message = await args.get_reply_message()
photo = None
emojibypass = False
is_anim = False
emoji = None
if not message or not message.media:
return await args.edit("`Maaf , Saya Gagal Mengambil Sticker Ini!`")
if isinstance(message.media, MessageMediaPhoto):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
photo = await bot.download_media(message.photo, photo)
elif "image" in message.media.document.mime_type.split("/"):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
await bot.download_file(message.media.document, photo)
if (
DocumentAttributeFilename(file_name="sticker.webp")
in message.media.document.attributes
):
emoji = message.media.document.attributes[1].alt
if emoji != "✨":
emojibypass = True
elif "tgsticker" in message.media.document.mime_type:
await args.edit(f"`{random.choice(KANGING_STR)}`")
await bot.download_file(message.media.document, "AnimatedSticker.tgs")
attributes = message.media.document.attributes
for attribute in attributes:
if isinstance(attribute, DocumentAttributeSticker):
emoji = attribute.alt
emojibypass = True
is_anim = True
photo = 1
else:
return await args.edit("`File Tidak Didukung !`")
if photo:
splat = args.text.split()
if not emojibypass:
emoji = "✨"
pack = 1
if len(splat) == 3:
pack = splat[2]
emoji = splat[1]
elif len(splat) == 2:
if splat[1].isnumeric():
pack = int(splat[1])
else:
emoji = splat[1]
u_id = user.id
f_name = user.first_name
packname = f"Sticker_u{u_id}_Ke{pack}"
custom_packnick = f"{custompack}" or f"{f_name} Sticker Pack"
packnick = f"{custom_packnick}"
cmd = "/newpack"
file = io.BytesIO()
if not is_anim:
image = await resize_photo(photo)
file.name = "sticker.png"
image.save(file, "PNG")
else:
packname += "_anim"
packnick += " (Animated)"
cmd = "/newanimated"
response = urllib.request.urlopen(
urllib.request.Request(f"http://t.me/addstickers/{packname}")
)
htmlstr = response.read().decode("utf8").split("\n")
if (
" A <strong>Telegram</strong> user has created the <strong>Sticker Set</strong>."
not in htmlstr
):
async with bot.conversation("Stickers") as conv:
await conv.send_message("/addsticker")
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packname)
x = await conv.get_response()
while "120" in x.text:
pack += 1
packname = f"Sticker_u{u_id}_Ke{pack}"
packnick = f"{custom_packnick}"
await args.edit(
"`Membuat Sticker Pack Baru "
+ str(pack)
+ " Karena Sticker Pack Sudah Penuh`"
)
await conv.send_message(packname)
x = await conv.get_response()
if x.text == "Gagal Memilih Pack.":
await conv.send_message(cmd)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file("AnimatedSticker.tgs")
remove("AnimatedSticker.tgs")
else:
file.seek(0)
await conv.send_file(file, force_document=True)
await conv.get_response()
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
return await args.edit(
"`Sticker ditambahkan ke pack yang berbeda !"
"\nIni pack yang baru saja dibuat!"
f"\nTekan [Sticker Pack](t.me/addstickers/{packname}) Untuk Melihat Sticker Pack",
parse_mode="md",
)
if is_anim:
await conv.send_file("AnimatedSticker.tgs")
remove("AnimatedSticker.tgs")
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "Sorry, the file type is invalid." in rsp.text:
return await args.edit(
"**Gagal Menambahkan Sticker, Gunakan @Stickers Bot Untuk Menambahkan Sticker Anda.**"
)
await conv.send_message(emoji)
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/done")
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
else:
await args.edit("`Membuat Sticker Pack Baru`")
async with bot.conversation("Stickers") as conv:
await conv.send_message(cmd)
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file("AnimatedSticker.tgs")
remove("AnimatedSticker.tgs")
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "Sorry, the file type is invalid." in rsp.text:
return await args.edit(
"**Gagal Menambahkan Sticker, Gunakan @Stickers Bot Untuk Menambahkan Sticker.**"
)
await conv.send_message(emoji)
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
# Ensure user doesn't get spamming notifications
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(
"** Sticker Berhasil Ditambahkan!**"
f"\n 👻 **[KLIK DISINI](t.me/addstickers/{packname})** 👻\n**Untuk Menggunakan Stickers**",
parse_mode="md",
)
async def resize_photo(photo):
image = Image.open(photo)
if (image.width and image.height) < 512:
size1 = image.width
size2 = image.height
if size1 > size2:
scale = 512 / size1
size1new = 512
size2new = size2 * scale
else:
scale = 512 / size2
size1new = size1 * scale
size2new = 512
size1new = math.floor(size1new)
size2new = math.floor(size2new)
sizenew = (size1new, size2new)
image = image.resize(sizenew)
else:
maxsize = (512, 512)
image.thumbnail(maxsize)
return image
@bot.on(man_cmd(outgoing=True, pattern=r"stickerinfo$"))
async def get_pack_info(event):
if not event.is_reply:
return await event.edit("**Mohon Balas Ke Sticker**")
rep_msg = await event.get_reply_message()
if not rep_msg.document:
return await event.edit("**Balas ke sticker untuk melihat detail pack**")
try:
stickerset_attr = rep_msg.document.attributes[1]
await event.edit("`Processing...`")
except BaseException:
return await event.edit("**Ini bukan sticker, Mohon balas ke sticker.**")
if not isinstance(stickerset_attr, DocumentAttributeSticker):
return await event.edit("**Ini bukan sticker, Mohon balas ke sticker.**")
get_stickerset = await bot(
GetStickerSetRequest(
InputStickerSetID(
id=stickerset_attr.stickerset.id,
access_hash=stickerset_attr.stickerset.access_hash,
)
)
)
pack_emojis = []
for document_sticker in get_stickerset.packs:
if document_sticker.emoticon not in pack_emojis:
pack_emojis.append(document_sticker.emoticon)
OUTPUT = (
f"➠ **Nama Sticker:** [{get_stickerset.set.title}](http://t.me/addstickers/{get_stickerset.set.short_name})\n"
f"➠ **Official:** `{get_stickerset.set.official}`\n"
f"➠ **Arsip:** `{get_stickerset.set.archived}`\n"
f"➠ **Sticker Dalam Pack:** `{len(get_stickerset.packs)}`\n"
f"➠ **Emoji Dalam Pack:** {' '.join(pack_emojis)}"
)
await event.edit(OUTPUT)
@bot.on(man_cmd(outgoing=True, pattern=r"delsticker ?(.*)"))
async def _(event):
if event.fwd_from:
return
if not event.reply_to_msg_id:
await event.edit("**Mohon Reply ke Sticker yang ingin anda Hapus.**")
return
reply_message = await event.get_reply_message()
chat = "@Stickers"
if reply_message.sender.bot:
await edit_or_reply(event, "**Mohon Reply ke Sticker.**")
return
await event.edit("`Processing...`")
async with bot.conversation(chat) as conv:
try:
response = conv.wait_event(
events.NewMessage(incoming=True, from_users=429000)
)
await conv.send_message("/delsticker")
await conv.get_response()
await asyncio.sleep(2)
await bot.forward_messages(chat, reply_message)
response = await response
except YouBlockedUserError:
await event.reply("**Silahkan Buka Blokir @Stikers dan coba lagi**")
return
if response.text.startswith(
"Sorry, I can't do this, it seems that you are not the owner of the relevant pack."
):
await event.edit(
"**Maaf, Sepertinya Anda bukan Pemilik Sticker pack ini.**"
)
elif response.text.startswith(
"You don't have any sticker packs yet. You can create one using the /newpack command."
):
await event.edit("**Anda Tidak Memiliki Stiker untuk di Hapus**")
elif response.text.startswith("Please send me the sticker."):
await event.edit("**Tolong Reply ke Sticker yang ingin dihapus**")
elif response.text.startswith("Invalid pack selected."):
await event.edit("**Maaf Paket yang dipilih tidak valid.**")
else:
await event.edit("**Berhasil Menghapus Stiker.**")
@bot.on(man_cmd(outgoing=True, pattern=r"editsticker ?(.*)"))
async def _(event):
if event.fwd_from:
return
if not event.reply_to_msg_id:
await event.edit("**Mohon Reply ke Sticker dan Berikan emoji.**")
return
reply_message = await event.get_reply_message()
emot = event.pattern_match.group(1)
if reply_message.sender.bot:
await edit_or_reply(event, "**Mohon Reply ke Sticker.**")
return
await event.edit("`Processing...`")
if emot == "":
await event.edit("**Silahkan Kirimkan Emot Baru.**")
else:
chat = "@Stickers"
async with bot.conversation(chat) as conv:
try:
response = conv.wait_event(
events.NewMessage(incoming=True, from_users=429000)
)
await conv.send_message("/editsticker")
await conv.get_response()
await asyncio.sleep(2)
await bot.forward_messages(chat, reply_message)
await conv.get_response()
await asyncio.sleep(2)
await conv.send_message(f"{emot}")
response = await response
except YouBlockedUserError:
await event.reply("**Buka blokir @Stiker dan coba lagi**")
return
if response.text.startswith("Invalid pack selected."):
await event.edit("**Maaf Paket yang dipilih tidak valid.**")
elif response.text.startswith(
"Please send us an emoji that best describes your sticker."
):
await event.edit(
"**Silahkan Kirimkan emoji yang paling menggambarkan stiker Anda.**"
)
else:
await event.edit(
f"**Berhasil Mengedit Emoji Stiker**\n**Emoji Baru:** {emot}"
)
@bot.on(man_cmd(outgoing=True, pattern=r"getsticker$"))
async def sticker_to_png(sticker):
if not sticker.is_reply:
await sticker.edit("`NULL information to fetch...`")
return False
img = await sticker.get_reply_message()
if not img.document:
await sticker.edit("`Mohon Balas Ke Sticker`")
return False
try:
img.document.attributes[1]
except Exception:
await sticker.edit("`Maaf , Ini Bukanlah Sticker`")
return
with io.BytesIO() as image:
await sticker.client.download_media(img, image)
image.name = "sticker.png"
image.seek(0)
try:
await img.reply(file=image, force_document=True)
except Exception:
await sticker.edit("`Tidak Dapat Mengirim File...`")
else:
await sticker.delete()
return
@bot.on(man_cmd(outgoing=True, pattern=r"findsticker (.*)"))
async def cb_sticker(event):
query = event.pattern_match.group(1)
if not query:
return await event.edit("`Masukan Nama Sticker Pack!`")
await event.edit("`Searching sticker packs...`")
text = requests.get("https://combot.org/telegram/stickers?q=" + query).text
soup = bs(text, "lxml")
results = soup.find_all("div", {"class": "sticker-pack__header"})
if not results:
return await event.edit("`Tidak Menemukan Sticker Pack :(`")
reply = f"**Keyword Sticker Pack:**\n {query}\n\n**Hasil:**\n"
for pack in results:
if pack.button:
packtitle = (pack.find("div", "sticker-pack__title")).get_text()
packlink = (pack.a).get("href")
reply += f"- [{packtitle}]({packlink})\n\n"
await event.edit(reply)
CMD_HELP.update(
{
"stickers": f"**Plugin : **`stickers`\
\n\n • **Syntax :** `{cmd}kang` atau `{cmd}tikel` [emoji]\
\n • **Function : **Balas .kang Ke Sticker Atau Gambar Untuk Menambahkan Ke Sticker Pack Mu\
\n\n • **Syntax :** `{cmd}kang` [emoji] atau `{cmd}tikel` [emoji]\
\n • **Function : **Balas {cmd}kang emoji Ke Sticker Atau Gambar Untuk Menambahkan dan costum emoji sticker Ke Pack Mu\
\n\n • **Syntax :** `{cmd}delsticker` <reply sticker>\
\n • **Function : **Untuk Menghapus sticker dari Sticker Pack.\
\n\n • **Syntax :** `{cmd}editsticker` <reply sticker> <emoji>\
\n • **Function : **Untuk Mengedit emoji stiker dengan emoji yang baru.\
\n\n • **Syntax :** `{cmd}stickerinfo`\
\n • **Function : **Untuk Mendapatkan Informasi Sticker Pack.\
\n\n • **Syntax :** `{cmd}findsticker` <nama pack sticker>\
\n • **Function : **Untuk Mencari Sticker Pack.\
\n\n • **NOTE:** Untuk Membuat Sticker Pack baru Gunakan angka dibelakang `{cmd}kang`\
\n • **CONTOH:** `{cmd}kang 2` untuk membuat dan menyimpan ke sticker pack ke 2\
"
}
)
CMD_HELP.update(
{
"sticker_v2": f"**Plugin : **`stickers`\
\n\n • **Syntax :** `{cmd}getsticker`\
\n • **Function : **Balas Ke Stcker Untuk Mendapatkan File 'PNG' Sticker.\
\n\n • **Syntax :** `{cmd}get`\
\n • **Function : **Balas ke sticker untuk mendapatkan file 'PNG' sticker\
\n\n • **Syntax :** `{cmd}stoi`\
\n • **Function : **Balas Ke Stcker Untuk Mendapatkan File 'PNG' Sticker.\
\n\n • **Syntax :** `{cmd}itos`\
\n • **Function : **Balas ke sticker atau gambar .itos untuk mengambil sticker bukan ke pack\
"
}
)
| true
| true
|
1c42dfc738ba57702806720d9a8f918f90f70b8b
| 8,504
|
py
|
Python
|
ports/esp32/modules/sdcard.py
|
buginventor/lv_micropython
|
bf62dfc78497d47ced3b0931a270e553d4d2552b
|
[
"MIT"
] | 150
|
2020-05-24T17:42:24.000Z
|
2022-03-28T12:47:53.000Z
|
ports/esp32/modules/sdcard.py
|
buginventor/lv_micropython
|
bf62dfc78497d47ced3b0931a270e553d4d2552b
|
[
"MIT"
] | 24
|
2020-05-19T10:46:39.000Z
|
2022-01-25T22:47:44.000Z
|
ports/esp32/modules/sdcard.py
|
buginventor/lv_micropython
|
bf62dfc78497d47ced3b0931a270e553d4d2552b
|
[
"MIT"
] | 81
|
2020-05-19T03:57:34.000Z
|
2022-03-18T03:34:08.000Z
|
"""
MicroPython driver for SD cards using SPI bus.
Requires an SPI bus and a CS pin. Provides readblocks and writeblocks
methods so the device can be mounted as a filesystem.
Example usage on pyboard:
import pyb, sdcard, os
sd = sdcard.SDCard(pyb.SPI(1), pyb.Pin.board.X5)
pyb.mount(sd, '/sd2')
os.listdir('/')
Example usage on ESP8266:
import machine, sdcard, os
sd = sdcard.SDCard(machine.SPI(1), machine.Pin(15))
os.mount(sd, '/sd')
os.listdir('/')
"""
from micropython import const
import time
_CMD_TIMEOUT = const(100)
_R1_IDLE_STATE = const(1 << 0)
#R1_ERASE_RESET = const(1 << 1)
_R1_ILLEGAL_COMMAND = const(1 << 2)
#R1_COM_CRC_ERROR = const(1 << 3)
#R1_ERASE_SEQUENCE_ERROR = const(1 << 4)
#R1_ADDRESS_ERROR = const(1 << 5)
#R1_PARAMETER_ERROR = const(1 << 6)
_TOKEN_CMD25 = const(0xfc)
_TOKEN_STOP_TRAN = const(0xfd)
_TOKEN_DATA = const(0xfe)
class SDCard:
def __init__(self, spi, cs):
self.spi = spi
self.cs = cs
self.cmdbuf = bytearray(6)
self.dummybuf = bytearray(512)
self.tokenbuf = bytearray(1)
for i in range(512):
self.dummybuf[i] = 0xff
self.dummybuf_memoryview = memoryview(self.dummybuf)
# initialise the card
self.init_card()
def init_spi(self, baudrate):
try:
master = self.spi.MASTER
except AttributeError:
# on ESP8266
self.spi.init(baudrate=baudrate, phase=0, polarity=0)
else:
# on pyboard
self.spi.init(master, baudrate=baudrate, phase=0, polarity=0)
def init_card(self):
# init CS pin
self.cs.init(self.cs.OUT, value=1)
# init SPI bus; use low data rate for initialisation
self.init_spi(100000)
# clock card at least 100 cycles with cs high
for i in range(16):
self.spi.write(b'\xff')
# CMD0: init card; should return _R1_IDLE_STATE (allow 5 attempts)
for _ in range(5):
if self.cmd(0, 0, 0x95) == _R1_IDLE_STATE:
break
else:
raise OSError("no SD card")
# CMD8: determine card version
r = self.cmd(8, 0x01aa, 0x87, 4)
if r == _R1_IDLE_STATE:
self.init_card_v2()
elif r == (_R1_IDLE_STATE | _R1_ILLEGAL_COMMAND):
self.init_card_v1()
else:
raise OSError("couldn't determine SD card version")
# get the number of sectors
# CMD9: response R2 (R1 byte + 16-byte block read)
if self.cmd(9, 0, 0, 0, False) != 0:
raise OSError("no response from SD card")
csd = bytearray(16)
self.readinto(csd)
if csd[0] & 0xc0 == 0x40: # CSD version 2.0
self.sectors = ((csd[8] << 8 | csd[9]) + 1) * 1024
elif csd[0] & 0xc0 == 0x00: # CSD version 1.0 (old, <=2GB)
c_size = csd[6] & 0b11 | csd[7] << 2 | (csd[8] & 0b11000000) << 4
c_size_mult = ((csd[9] & 0b11) << 1) | csd[10] >> 7
self.sectors = (c_size + 1) * (2 ** (c_size_mult + 2))
else:
raise OSError("SD card CSD format not supported")
#print('sectors', self.sectors)
# CMD16: set block length to 512 bytes
if self.cmd(16, 512, 0) != 0:
raise OSError("can't set 512 block size")
# set to high data rate now that it's initialised
self.init_spi(1320000)
def init_card_v1(self):
for i in range(_CMD_TIMEOUT):
self.cmd(55, 0, 0)
if self.cmd(41, 0, 0) == 0:
self.cdv = 512
#print("[SDCard] v1 card")
return
raise OSError("timeout waiting for v1 card")
def init_card_v2(self):
for i in range(_CMD_TIMEOUT):
time.sleep_ms(50)
self.cmd(58, 0, 0, 4)
self.cmd(55, 0, 0)
if self.cmd(41, 0x40000000, 0) == 0:
self.cmd(58, 0, 0, 4)
self.cdv = 1
#print("[SDCard] v2 card")
return
raise OSError("timeout waiting for v2 card")
def cmd(self, cmd, arg, crc, final=0, release=True, skip1=False):
self.cs(0)
# create and send the command
buf = self.cmdbuf
buf[0] = 0x40 | cmd
buf[1] = arg >> 24
buf[2] = arg >> 16
buf[3] = arg >> 8
buf[4] = arg
buf[5] = crc
self.spi.write(buf)
if skip1:
self.spi.readinto(self.tokenbuf, 0xff)
# wait for the response (response[7] == 0)
for i in range(_CMD_TIMEOUT):
self.spi.readinto(self.tokenbuf, 0xff)
response = self.tokenbuf[0]
if not (response & 0x80):
# this could be a big-endian integer that we are getting here
for j in range(final):
self.spi.write(b'\xff')
if release:
self.cs(1)
self.spi.write(b'\xff')
return response
# timeout
self.cs(1)
self.spi.write(b'\xff')
return -1
def readinto(self, buf):
self.cs(0)
# read until start byte (0xff)
while True:
self.spi.readinto(self.tokenbuf, 0xff)
if self.tokenbuf[0] == _TOKEN_DATA:
break
# read data
mv = self.dummybuf_memoryview
if len(buf) != len(mv):
mv = mv[:len(buf)]
self.spi.write_readinto(mv, buf)
# read checksum
self.spi.write(b'\xff')
self.spi.write(b'\xff')
self.cs(1)
self.spi.write(b'\xff')
def write(self, token, buf):
self.cs(0)
# send: start of block, data, checksum
self.spi.read(1, token)
self.spi.write(buf)
self.spi.write(b'\xff')
self.spi.write(b'\xff')
# check the response
if (self.spi.read(1, 0xff)[0] & 0x1f) != 0x05:
self.cs(1)
self.spi.write(b'\xff')
return
# wait for write to finish
while self.spi.read(1, 0xff)[0] == 0:
pass
self.cs(1)
self.spi.write(b'\xff')
def write_token(self, token):
self.cs(0)
self.spi.read(1, token)
self.spi.write(b'\xff')
# wait for write to finish
while self.spi.read(1, 0xff)[0] == 0x00:
pass
self.cs(1)
self.spi.write(b'\xff')
def readblocks(self, block_num, buf):
nblocks = len(buf) // 512
assert nblocks and not len(buf) % 512, 'Buffer length is invalid'
if nblocks == 1:
# CMD17: set read address for single block
if self.cmd(17, block_num * self.cdv, 0, release=False) != 0:
# release the card
self.cs(1)
raise OSError(5) # EIO
# receive the data and release card
self.readinto(buf)
else:
# CMD18: set read address for multiple blocks
if self.cmd(18, block_num * self.cdv, 0, release=False) != 0:
# release the card
self.cs(1)
raise OSError(5) # EIO
offset = 0
mv = memoryview(buf)
while nblocks:
# receive the data and release card
self.readinto(mv[offset : offset + 512])
offset += 512
nblocks -= 1
if self.cmd(12, 0, 0xff, skip1=True):
raise OSError(5) # EIO
def writeblocks(self, block_num, buf):
nblocks, err = divmod(len(buf), 512)
assert nblocks and not err, 'Buffer length is invalid'
if nblocks == 1:
# CMD24: set write address for single block
if self.cmd(24, block_num * self.cdv, 0) != 0:
raise OSError(5) # EIO
# send the data
self.write(_TOKEN_DATA, buf)
else:
# CMD25: set write address for first block
if self.cmd(25, block_num * self.cdv, 0) != 0:
raise OSError(5) # EIO
# send the data
offset = 0
mv = memoryview(buf)
while nblocks:
self.write(_TOKEN_CMD25, mv[offset : offset + 512])
offset += 512
nblocks -= 1
self.write_token(_TOKEN_STOP_TRAN)
def ioctl(self, op, arg):
if op == 4: # get number of blocks
return self.sectors
| 30.480287
| 77
| 0.52587
|
from micropython import const
import time
_CMD_TIMEOUT = const(100)
_R1_IDLE_STATE = const(1 << 0)
_R1_ILLEGAL_COMMAND = const(1 << 2)
_TOKEN_CMD25 = const(0xfc)
_TOKEN_STOP_TRAN = const(0xfd)
_TOKEN_DATA = const(0xfe)
class SDCard:
def __init__(self, spi, cs):
self.spi = spi
self.cs = cs
self.cmdbuf = bytearray(6)
self.dummybuf = bytearray(512)
self.tokenbuf = bytearray(1)
for i in range(512):
self.dummybuf[i] = 0xff
self.dummybuf_memoryview = memoryview(self.dummybuf)
self.init_card()
def init_spi(self, baudrate):
try:
master = self.spi.MASTER
except AttributeError:
self.spi.init(baudrate=baudrate, phase=0, polarity=0)
else:
self.spi.init(master, baudrate=baudrate, phase=0, polarity=0)
def init_card(self):
self.cs.init(self.cs.OUT, value=1)
self.init_spi(100000)
for i in range(16):
self.spi.write(b'\xff')
for _ in range(5):
if self.cmd(0, 0, 0x95) == _R1_IDLE_STATE:
break
else:
raise OSError("no SD card")
r = self.cmd(8, 0x01aa, 0x87, 4)
if r == _R1_IDLE_STATE:
self.init_card_v2()
elif r == (_R1_IDLE_STATE | _R1_ILLEGAL_COMMAND):
self.init_card_v1()
else:
raise OSError("couldn't determine SD card version")
# get the number of sectors
# CMD9: response R2 (R1 byte + 16-byte block read)
if self.cmd(9, 0, 0, 0, False) != 0:
raise OSError("no response from SD card")
csd = bytearray(16)
self.readinto(csd)
if csd[0] & 0xc0 == 0x40: # CSD version 2.0
self.sectors = ((csd[8] << 8 | csd[9]) + 1) * 1024
elif csd[0] & 0xc0 == 0x00: # CSD version 1.0 (old, <=2GB)
c_size = csd[6] & 0b11 | csd[7] << 2 | (csd[8] & 0b11000000) << 4
c_size_mult = ((csd[9] & 0b11) << 1) | csd[10] >> 7
self.sectors = (c_size + 1) * (2 ** (c_size_mult + 2))
else:
raise OSError("SD card CSD format not supported")
#print('sectors', self.sectors)
# CMD16: set block length to 512 bytes
if self.cmd(16, 512, 0) != 0:
raise OSError("can't set 512 block size")
self.init_spi(1320000)
def init_card_v1(self):
for i in range(_CMD_TIMEOUT):
self.cmd(55, 0, 0)
if self.cmd(41, 0, 0) == 0:
self.cdv = 512
#print("[SDCard] v1 card")
return
raise OSError("timeout waiting for v1 card")
def init_card_v2(self):
for i in range(_CMD_TIMEOUT):
time.sleep_ms(50)
self.cmd(58, 0, 0, 4)
self.cmd(55, 0, 0)
if self.cmd(41, 0x40000000, 0) == 0:
self.cmd(58, 0, 0, 4)
self.cdv = 1
#print("[SDCard] v2 card")
return
raise OSError("timeout waiting for v2 card")
def cmd(self, cmd, arg, crc, final=0, release=True, skip1=False):
self.cs(0)
# create and send the command
buf = self.cmdbuf
buf[0] = 0x40 | cmd
buf[1] = arg >> 24
buf[2] = arg >> 16
buf[3] = arg >> 8
buf[4] = arg
buf[5] = crc
self.spi.write(buf)
if skip1:
self.spi.readinto(self.tokenbuf, 0xff)
# wait for the response (response[7] == 0)
for i in range(_CMD_TIMEOUT):
self.spi.readinto(self.tokenbuf, 0xff)
response = self.tokenbuf[0]
if not (response & 0x80):
# this could be a big-endian integer that we are getting here
for j in range(final):
self.spi.write(b'\xff')
if release:
self.cs(1)
self.spi.write(b'\xff')
return response
# timeout
self.cs(1)
self.spi.write(b'\xff')
return -1
def readinto(self, buf):
self.cs(0)
# read until start byte (0xff)
while True:
self.spi.readinto(self.tokenbuf, 0xff)
if self.tokenbuf[0] == _TOKEN_DATA:
break
# read data
mv = self.dummybuf_memoryview
if len(buf) != len(mv):
mv = mv[:len(buf)]
self.spi.write_readinto(mv, buf)
# read checksum
self.spi.write(b'\xff')
self.spi.write(b'\xff')
self.cs(1)
self.spi.write(b'\xff')
def write(self, token, buf):
self.cs(0)
# send: start of block, data, checksum
self.spi.read(1, token)
self.spi.write(buf)
self.spi.write(b'\xff')
self.spi.write(b'\xff')
# check the response
if (self.spi.read(1, 0xff)[0] & 0x1f) != 0x05:
self.cs(1)
self.spi.write(b'\xff')
return
# wait for write to finish
while self.spi.read(1, 0xff)[0] == 0:
pass
self.cs(1)
self.spi.write(b'\xff')
def write_token(self, token):
self.cs(0)
self.spi.read(1, token)
self.spi.write(b'\xff')
# wait for write to finish
while self.spi.read(1, 0xff)[0] == 0x00:
pass
self.cs(1)
self.spi.write(b'\xff')
def readblocks(self, block_num, buf):
nblocks = len(buf) // 512
assert nblocks and not len(buf) % 512, 'Buffer length is invalid'
if nblocks == 1:
# CMD17: set read address for single block
if self.cmd(17, block_num * self.cdv, 0, release=False) != 0:
# release the card
self.cs(1)
raise OSError(5) # EIO
# receive the data and release card
self.readinto(buf)
else:
# CMD18: set read address for multiple blocks
if self.cmd(18, block_num * self.cdv, 0, release=False) != 0:
# release the card
self.cs(1)
raise OSError(5) # EIO
offset = 0
mv = memoryview(buf)
while nblocks:
# receive the data and release card
self.readinto(mv[offset : offset + 512])
offset += 512
nblocks -= 1
if self.cmd(12, 0, 0xff, skip1=True):
raise OSError(5) # EIO
def writeblocks(self, block_num, buf):
nblocks, err = divmod(len(buf), 512)
assert nblocks and not err, 'Buffer length is invalid'
if nblocks == 1:
# CMD24: set write address for single block
if self.cmd(24, block_num * self.cdv, 0) != 0:
raise OSError(5) # EIO
# send the data
self.write(_TOKEN_DATA, buf)
else:
# CMD25: set write address for first block
if self.cmd(25, block_num * self.cdv, 0) != 0:
raise OSError(5) # EIO
# send the data
offset = 0
mv = memoryview(buf)
while nblocks:
self.write(_TOKEN_CMD25, mv[offset : offset + 512])
offset += 512
nblocks -= 1
self.write_token(_TOKEN_STOP_TRAN)
def ioctl(self, op, arg):
if op == 4: # get number of blocks
return self.sectors
| true
| true
|
1c42dff8e71bfcd2130ff71be639cdd2ea134e7e
| 8,155
|
py
|
Python
|
metasv/extract_pairs.py
|
willrockout/metasv
|
b46f15cbe8a28941661855da6587451c971dc2e3
|
[
"BSD-2-Clause"
] | 43
|
2015-01-12T20:58:24.000Z
|
2021-11-24T07:30:06.000Z
|
metasv/extract_pairs.py
|
willrockout/metasv
|
b46f15cbe8a28941661855da6587451c971dc2e3
|
[
"BSD-2-Clause"
] | 80
|
2015-01-08T00:34:55.000Z
|
2022-02-16T08:30:34.000Z
|
metasv/extract_pairs.py
|
willrockout/metasv
|
b46f15cbe8a28941661855da6587451c971dc2e3
|
[
"BSD-2-Clause"
] | 25
|
2015-04-30T06:30:28.000Z
|
2022-02-22T02:48:20.000Z
|
import argparse
import logging
import multiprocessing
import time
from functools import partial, update_wrapper
from defaults import EXTRACTION_MAX_READ_PAIRS, EXTRACTION_MAX_NM, EXTRACTION_MAX_INTERVAL_TRUNCATION, EXTRACTION_TRUNCATION_PAD
import pysam
compl_table = [chr(i) for i in xrange(256)]
compl_table[ord('A')] = 'T'
compl_table[ord('C')] = 'G'
compl_table[ord('G')] = 'C'
compl_table[ord('T')] = 'A'
def compl(seq):
return "".join([compl_table[ord(i)] for i in seq])
def get_sequence_quality(aln):
if not aln.is_reverse:
return aln.seq.upper(), aln.qual
return compl(aln.seq.upper())[::-1], aln.qual[::-1]
def write_read(fd, aln):
end_id = 1 if aln.is_read1 else 2
sequence, quality = get_sequence_quality(aln)
fd.write("@%s/%d\n%s\n+\n%s\n" % (aln.qname, end_id, sequence, quality))
def is_hq(aln, chr_tid, chr_start, chr_end):
return aln.is_unmapped or aln.mapq>0 or (not (aln.tid==chr_tid and chr_start<=aln.pos<=chr_end))
def all_pair(aln, mate, chr_tid, chr_start, chr_end):
return True
def all_pair_hq(aln, mate, chr_tid, chr_start, chr_end):
return is_hq(aln, chr_tid, chr_start, chr_end) and is_hq(mate, chr_tid, chr_start, chr_end)
def get_nm(aln):
nm_str = aln.opt("NM")
return int(nm_str) if nm_str else 0
def perfect_aln(aln):
return not aln.is_unmapped and aln.is_proper_pair and len(aln.cigar) == 1 and get_nm(aln) <= EXTRACTION_MAX_NM
def non_perfect(aln, mate, chr_tid, chr_start, chr_end):
return not (perfect_aln(aln) and perfect_aln(mate))
def non_perfect_hq(aln, mate, chr_tid, chr_start, chr_end):
return (not (perfect_aln(aln) and perfect_aln(mate))) and is_hq(aln, chr_tid, chr_start, chr_end) and is_hq(mate, chr_tid, chr_start, chr_end)
def discordant(aln, mate, chr_tid, chr_start, chr_end, isize_min=300, isize_max=400):
if aln.tlen == 0: return True
return not (isize_min <= abs(aln.tlen) <= isize_max)
def discordant_with_normal_orientation(aln, mate, chr_tid, chr_start, chr_end, isize_min=300, isize_max=400):
if aln.tlen == 0: return True
if aln.is_reverse and mate.is_reverse or not aln.is_reverse and not mate.is_reverse: return False
return not (isize_min <= abs(aln.tlen) <= isize_max)
def get_mate(aln, bam_handles):
mate = None
for bam_handle in bam_handles:
try:
mate = bam_handle.mate(aln)
except ValueError:
pass
if mate is not None:
return mate
return mate
def extract_read_pairs(bam_handles, region, prefix, extract_fns, pad=0, max_read_pairs = EXTRACTION_MAX_READ_PAIRS,
truncation_pad_read_extract = EXTRACTION_TRUNCATION_PAD,
max_interval_len_truncation = EXTRACTION_MAX_INTERVAL_TRUNCATION, sv_type=''):
logger = logging.getLogger("%s-%s" % (extract_read_pairs.__name__, multiprocessing.current_process()))
extract_fn_names = [extract_fn.__name__ for extract_fn in extract_fns]
logger.info("Extracting reads for region %s with padding %d using functions %s" % (
region, pad, extract_fn_names))
chr_name = str(region.split(':')[0])
chr_start = int(region.split(':')[1].split("-")[0]) - pad
chr_end = int(region.split(':')[1].split('-')[1]) + pad
selected_pair_counts = [0] * len(extract_fn_names)
start_time = time.time()
if chr_start < 0:
regions_to_extract = []
logger.error("Skipping read extraction since interval too close to chromosome beginning")
else:
# Read alignments from the interval in memory and build a dictionary to get mate instead of calling bammate.mate() function
regions_to_extract = [(chr_name, chr_start, chr_end)]
if abs(chr_end-chr_start)>max_interval_len_truncation and sv_type in ["INV","DEL","DUP"]:
# For large SVs, middle sequences has no effect on genotyping. So, we only extract reads around breakpoints to speed up
truncate_start = chr_start + pad + truncation_pad_read_extract
truncate_end = chr_end - (pad + truncation_pad_read_extract)
logger.info("Truncate the reads in [%d-%d] for %s_%d_%d" % (truncate_start,truncate_end,chr_name,chr_start,chr_end))
regions_to_extract = [(chr_name, chr_start, truncate_start-1), (chr_name, truncate_end+1, chr_end)]
aln_list = [aln for (chr_, start_, end_) in regions_to_extract for bam_handle in bam_handles for aln in bam_handle.fetch(chr_, start=start_, end=end_) if not aln.is_secondary]
aln_dict = {}
for aln in aln_list:
if aln.qname not in aln_dict:
aln_dict[aln.qname] = [None, None]
aln_dict[aln.qname][0 if aln.is_read1 else 1] = aln
aln_pairs = []
if len(aln_dict) <= max_read_pairs:
logger.info("Building mate dictionary from %d reads" % len(aln_list))
for aln_pair in aln_dict.values():
missing_index = 0 if aln_pair[0] is None else (1 if aln_pair[1] is None else 2)
if missing_index < 2:
mate = get_mate(aln_pair[1 - missing_index], bam_handles)
if mate is not None:
aln_pair[missing_index] = mate
aln_pairs.append(aln_pair)
else:
aln_pairs.append(aln_pair)
else:
logger.info("Too many reads encountered for %s. Skipping read extraction. (%d >%d)"%(region, len(aln_dict),max_read_pairs))
ends = [(open("%s_%s_1.fq" % (prefix, name), "w"), open("%s_%s_2.fq" % (prefix, name), "w")) for name in
extract_fn_names]
chr_tid = bam_handles[0].gettid(chr_name) if bam_handles else -1
for first, second in aln_pairs:
for fn_index, extract_fn in enumerate(extract_fns):
if extract_fn(first, second,chr_tid,chr_start,chr_end):
write_read(ends[fn_index][0], first)
write_read(ends[fn_index][1], second)
selected_pair_counts[fn_index] += 1
for end1, end2 in ends:
end1.close()
end2.close()
logger.info("Examined %d pairs in %g seconds" % (len(aln_pairs), time.time() - start_time))
logger.info("Extraction counts %s" % (zip(extract_fn_names, selected_pair_counts)))
return zip([(end[0].name, end[1].name) for end in ends], selected_pair_counts)
if __name__ == "__main__":
FORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
parser = argparse.ArgumentParser(description="Extract reads and mates from a region for spades assembly",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--bams", nargs='+', help="BAM files to extract reads from", required=True, default=[])
parser.add_argument("--region", help="Samtools region string", required=True)
parser.add_argument("--prefix", help="Output FASTQ prefix", required=True)
parser.add_argument("--extract_fn", help="Extraction function", choices=["all_pair", "non_perfect", "discordant"],
default="all_pair")
parser.add_argument("--pad", help="Padding to apply on both sides of the interval", type=int, default=0)
parser.add_argument("--isize_min", help="Minimum insert size", default=200, type=int)
parser.add_argument("--isize_max", help="Maximum insert size", default=500, type=int)
parser.add_argument("--max_read_pairs", help="Maximum read pairs to extract for an interval",
default=EXTRACTION_MAX_READ_PAIRS, type=int)
args = parser.parse_args()
if args.extract_fn == 'all_pair':
extract_fn = all_pair
elif args.extract_fn == 'non_perfect':
extract_fn = non_perfect
else:
extract_fn = partial(discordant, isize_min=args.isize_min, isize_max=args.isize_max)
update_wrapper(extract_fn, discordant)
bam_handles = [pysam.Samfile(bam, "rb") for bam in args.bams]
extract_read_pairs(bam_handles, args.region, args.prefix, [extract_fn], pad=args.pad,
max_read_pairs=args.max_read_pairs)
for bam_handle in bam_handles:
bam_handle.close()
| 42.473958
| 179
| 0.675659
|
import argparse
import logging
import multiprocessing
import time
from functools import partial, update_wrapper
from defaults import EXTRACTION_MAX_READ_PAIRS, EXTRACTION_MAX_NM, EXTRACTION_MAX_INTERVAL_TRUNCATION, EXTRACTION_TRUNCATION_PAD
import pysam
compl_table = [chr(i) for i in xrange(256)]
compl_table[ord('A')] = 'T'
compl_table[ord('C')] = 'G'
compl_table[ord('G')] = 'C'
compl_table[ord('T')] = 'A'
def compl(seq):
return "".join([compl_table[ord(i)] for i in seq])
def get_sequence_quality(aln):
if not aln.is_reverse:
return aln.seq.upper(), aln.qual
return compl(aln.seq.upper())[::-1], aln.qual[::-1]
def write_read(fd, aln):
end_id = 1 if aln.is_read1 else 2
sequence, quality = get_sequence_quality(aln)
fd.write("@%s/%d\n%s\n+\n%s\n" % (aln.qname, end_id, sequence, quality))
def is_hq(aln, chr_tid, chr_start, chr_end):
return aln.is_unmapped or aln.mapq>0 or (not (aln.tid==chr_tid and chr_start<=aln.pos<=chr_end))
def all_pair(aln, mate, chr_tid, chr_start, chr_end):
return True
def all_pair_hq(aln, mate, chr_tid, chr_start, chr_end):
return is_hq(aln, chr_tid, chr_start, chr_end) and is_hq(mate, chr_tid, chr_start, chr_end)
def get_nm(aln):
nm_str = aln.opt("NM")
return int(nm_str) if nm_str else 0
def perfect_aln(aln):
return not aln.is_unmapped and aln.is_proper_pair and len(aln.cigar) == 1 and get_nm(aln) <= EXTRACTION_MAX_NM
def non_perfect(aln, mate, chr_tid, chr_start, chr_end):
return not (perfect_aln(aln) and perfect_aln(mate))
def non_perfect_hq(aln, mate, chr_tid, chr_start, chr_end):
return (not (perfect_aln(aln) and perfect_aln(mate))) and is_hq(aln, chr_tid, chr_start, chr_end) and is_hq(mate, chr_tid, chr_start, chr_end)
def discordant(aln, mate, chr_tid, chr_start, chr_end, isize_min=300, isize_max=400):
if aln.tlen == 0: return True
return not (isize_min <= abs(aln.tlen) <= isize_max)
def discordant_with_normal_orientation(aln, mate, chr_tid, chr_start, chr_end, isize_min=300, isize_max=400):
if aln.tlen == 0: return True
if aln.is_reverse and mate.is_reverse or not aln.is_reverse and not mate.is_reverse: return False
return not (isize_min <= abs(aln.tlen) <= isize_max)
def get_mate(aln, bam_handles):
mate = None
for bam_handle in bam_handles:
try:
mate = bam_handle.mate(aln)
except ValueError:
pass
if mate is not None:
return mate
return mate
def extract_read_pairs(bam_handles, region, prefix, extract_fns, pad=0, max_read_pairs = EXTRACTION_MAX_READ_PAIRS,
truncation_pad_read_extract = EXTRACTION_TRUNCATION_PAD,
max_interval_len_truncation = EXTRACTION_MAX_INTERVAL_TRUNCATION, sv_type=''):
logger = logging.getLogger("%s-%s" % (extract_read_pairs.__name__, multiprocessing.current_process()))
extract_fn_names = [extract_fn.__name__ for extract_fn in extract_fns]
logger.info("Extracting reads for region %s with padding %d using functions %s" % (
region, pad, extract_fn_names))
chr_name = str(region.split(':')[0])
chr_start = int(region.split(':')[1].split("-")[0]) - pad
chr_end = int(region.split(':')[1].split('-')[1]) + pad
selected_pair_counts = [0] * len(extract_fn_names)
start_time = time.time()
if chr_start < 0:
regions_to_extract = []
logger.error("Skipping read extraction since interval too close to chromosome beginning")
else:
regions_to_extract = [(chr_name, chr_start, chr_end)]
if abs(chr_end-chr_start)>max_interval_len_truncation and sv_type in ["INV","DEL","DUP"]:
truncate_start = chr_start + pad + truncation_pad_read_extract
truncate_end = chr_end - (pad + truncation_pad_read_extract)
logger.info("Truncate the reads in [%d-%d] for %s_%d_%d" % (truncate_start,truncate_end,chr_name,chr_start,chr_end))
regions_to_extract = [(chr_name, chr_start, truncate_start-1), (chr_name, truncate_end+1, chr_end)]
aln_list = [aln for (chr_, start_, end_) in regions_to_extract for bam_handle in bam_handles for aln in bam_handle.fetch(chr_, start=start_, end=end_) if not aln.is_secondary]
aln_dict = {}
for aln in aln_list:
if aln.qname not in aln_dict:
aln_dict[aln.qname] = [None, None]
aln_dict[aln.qname][0 if aln.is_read1 else 1] = aln
aln_pairs = []
if len(aln_dict) <= max_read_pairs:
logger.info("Building mate dictionary from %d reads" % len(aln_list))
for aln_pair in aln_dict.values():
missing_index = 0 if aln_pair[0] is None else (1 if aln_pair[1] is None else 2)
if missing_index < 2:
mate = get_mate(aln_pair[1 - missing_index], bam_handles)
if mate is not None:
aln_pair[missing_index] = mate
aln_pairs.append(aln_pair)
else:
aln_pairs.append(aln_pair)
else:
logger.info("Too many reads encountered for %s. Skipping read extraction. (%d >%d)"%(region, len(aln_dict),max_read_pairs))
ends = [(open("%s_%s_1.fq" % (prefix, name), "w"), open("%s_%s_2.fq" % (prefix, name), "w")) for name in
extract_fn_names]
chr_tid = bam_handles[0].gettid(chr_name) if bam_handles else -1
for first, second in aln_pairs:
for fn_index, extract_fn in enumerate(extract_fns):
if extract_fn(first, second,chr_tid,chr_start,chr_end):
write_read(ends[fn_index][0], first)
write_read(ends[fn_index][1], second)
selected_pair_counts[fn_index] += 1
for end1, end2 in ends:
end1.close()
end2.close()
logger.info("Examined %d pairs in %g seconds" % (len(aln_pairs), time.time() - start_time))
logger.info("Extraction counts %s" % (zip(extract_fn_names, selected_pair_counts)))
return zip([(end[0].name, end[1].name) for end in ends], selected_pair_counts)
if __name__ == "__main__":
FORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
parser = argparse.ArgumentParser(description="Extract reads and mates from a region for spades assembly",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--bams", nargs='+', help="BAM files to extract reads from", required=True, default=[])
parser.add_argument("--region", help="Samtools region string", required=True)
parser.add_argument("--prefix", help="Output FASTQ prefix", required=True)
parser.add_argument("--extract_fn", help="Extraction function", choices=["all_pair", "non_perfect", "discordant"],
default="all_pair")
parser.add_argument("--pad", help="Padding to apply on both sides of the interval", type=int, default=0)
parser.add_argument("--isize_min", help="Minimum insert size", default=200, type=int)
parser.add_argument("--isize_max", help="Maximum insert size", default=500, type=int)
parser.add_argument("--max_read_pairs", help="Maximum read pairs to extract for an interval",
default=EXTRACTION_MAX_READ_PAIRS, type=int)
args = parser.parse_args()
if args.extract_fn == 'all_pair':
extract_fn = all_pair
elif args.extract_fn == 'non_perfect':
extract_fn = non_perfect
else:
extract_fn = partial(discordant, isize_min=args.isize_min, isize_max=args.isize_max)
update_wrapper(extract_fn, discordant)
bam_handles = [pysam.Samfile(bam, "rb") for bam in args.bams]
extract_read_pairs(bam_handles, args.region, args.prefix, [extract_fn], pad=args.pad,
max_read_pairs=args.max_read_pairs)
for bam_handle in bam_handles:
bam_handle.close()
| true
| true
|
1c42e032f0792d13a5bee37f78155fb80de52228
| 29,706
|
py
|
Python
|
external-deps/spyder-kernels/spyder_kernels/console/kernel.py
|
fumitoh/spyder
|
12294fec88a2f61c756538ac38bd748d8e7b3f82
|
[
"MIT"
] | 1
|
2021-07-08T01:27:25.000Z
|
2021-07-08T01:27:25.000Z
|
external-deps/spyder-kernels/spyder_kernels/console/kernel.py
|
fumitoh/spyder
|
12294fec88a2f61c756538ac38bd748d8e7b3f82
|
[
"MIT"
] | null | null | null |
external-deps/spyder-kernels/spyder_kernels/console/kernel.py
|
fumitoh/spyder
|
12294fec88a2f61c756538ac38bd748d8e7b3f82
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Kernels Contributors
#
# Licensed under the terms of the MIT License
# (see spyder_kernels/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Spyder kernel for Jupyter.
"""
# Standard library imports
from distutils.version import LooseVersion
import os
import sys
import threading
# Third-party imports
import ipykernel
from ipykernel.ipkernel import IPythonKernel
from ipykernel.zmqshell import ZMQInteractiveShell
from traitlets.config.loader import LazyConfigValue
# Local imports
from spyder_kernels.py3compat import TEXT_TYPES, to_text_string
from spyder_kernels.comms.frontendcomm import FrontendComm
from spyder_kernels.py3compat import PY3, input
from spyder_kernels.utils.iofuncs import iofunctions
from spyder_kernels.utils.mpl import (
MPL_BACKENDS_FROM_SPYDER, MPL_BACKENDS_TO_SPYDER, INLINE_FIGURE_FORMATS)
from spyder_kernels.utils.nsview import get_remote_data, make_remote_view
# Excluded variables from the Variable Explorer (i.e. they are not
# shown at all there)
EXCLUDED_NAMES = ['In', 'Out', 'exit', 'get_ipython', 'quit']
class SpyderShell(ZMQInteractiveShell):
"""Spyder shell."""
def ask_exit(self):
"""Engage the exit actions."""
self.kernel.frontend_comm.close_thread()
return super(SpyderShell, self).ask_exit()
def get_local_scope(self, stack_depth):
"""Get local scope at given frame depth."""
frame = sys._getframe(stack_depth + 1)
if self.kernel._pdb_frame is frame:
# we also give the globals because they might not be in
# self.user_ns
namespace = frame.f_globals.copy()
namespace.update(self.kernel._pdb_locals)
return namespace
else:
return frame.f_locals
class SpyderKernel(IPythonKernel):
"""Spyder kernel for Jupyter."""
shell_class = SpyderShell
def __init__(self, *args, **kwargs):
super(SpyderKernel, self).__init__(*args, **kwargs)
self.comm_manager.get_comm = self._get_comm
self.frontend_comm = FrontendComm(self)
# All functions that can be called through the comm
handlers = {
'set_breakpoints': self.set_spyder_breakpoints,
'set_pdb_ignore_lib': self.set_pdb_ignore_lib,
'set_pdb_execute_events': self.set_pdb_execute_events,
'set_pdb_use_exclamation_mark': self.set_pdb_use_exclamation_mark,
'get_value': self.get_value,
'load_data': self.load_data,
'save_namespace': self.save_namespace,
'is_defined': self.is_defined,
'get_doc': self.get_doc,
'get_source': self.get_source,
'set_value': self.set_value,
'remove_value': self.remove_value,
'copy_value': self.copy_value,
'set_cwd': self.set_cwd,
'get_cwd': self.get_cwd,
'get_syspath': self.get_syspath,
'get_env': self.get_env,
'close_all_mpl_figures': self.close_all_mpl_figures,
'show_mpl_backend_errors': self.show_mpl_backend_errors,
'get_namespace_view': self.get_namespace_view,
'set_namespace_view_settings': self.set_namespace_view_settings,
'get_var_properties': self.get_var_properties,
'set_sympy_forecolor': self.set_sympy_forecolor,
'update_syspath': self.update_syspath,
'is_special_kernel_valid': self.is_special_kernel_valid,
'get_matplotlib_backend': self.get_matplotlib_backend,
'pdb_input_reply': self.pdb_input_reply,
'_interrupt_eventloop': self._interrupt_eventloop,
}
for call_id in handlers:
self.frontend_comm.register_call_handler(
call_id, handlers[call_id])
self.namespace_view_settings = {}
self._pdb_obj = None
self._pdb_step = None
self._do_publish_pdb_state = True
self._mpl_backend_error = None
self._running_namespace = None
self._pdb_input_line = None
# -- Public API -----------------------------------------------------------
def frontend_call(self, blocking=False, broadcast=True,
timeout=None, callback=None):
"""Call the frontend."""
# If not broadcast, send only to the calling comm
if broadcast:
comm_id = None
else:
comm_id = self.frontend_comm.calling_comm_id
return self.frontend_comm.remote_call(
blocking=blocking,
comm_id=comm_id,
callback=callback,
timeout=timeout)
# --- For the Variable Explorer
def set_namespace_view_settings(self, settings):
"""Set namespace_view_settings."""
self.namespace_view_settings = settings
def get_namespace_view(self):
"""
Return the namespace view
This is a dictionary with the following structure
{'a':
{
'type': 'str',
'size': 1,
'view': '1',
'python_type': 'int',
'numpy_type': 'Unknown'
}
}
Here:
* 'a' is the variable name.
* 'type' and 'size' are self-evident.
* 'view' is its value or its repr computed with
`value_to_display`.
* 'python_type' is its Python type computed with
`get_type_string`.
* 'numpy_type' is its Numpy type (if any) computed with
`get_numpy_type_string`.
"""
settings = self.namespace_view_settings
if settings:
ns = self._get_current_namespace()
view = make_remote_view(ns, settings, EXCLUDED_NAMES)
return view
else:
return None
def get_var_properties(self):
"""
Get some properties of the variables in the current
namespace
"""
settings = self.namespace_view_settings
if settings:
ns = self._get_current_namespace()
data = get_remote_data(ns, settings, mode='editable',
more_excluded_names=EXCLUDED_NAMES)
properties = {}
for name, value in list(data.items()):
properties[name] = {
'is_list': isinstance(value, (tuple, list)),
'is_dict': isinstance(value, dict),
'is_set': isinstance(value, set),
'len': self._get_len(value),
'is_array': self._is_array(value),
'is_image': self._is_image(value),
'is_data_frame': self._is_data_frame(value),
'is_series': self._is_series(value),
'array_shape': self._get_array_shape(value),
'array_ndim': self._get_array_ndim(value)
}
return properties
else:
return None
def get_value(self, name):
"""Get the value of a variable"""
ns = self._get_current_namespace()
self._do_publish_pdb_state = False
return ns[name]
def set_value(self, name, value):
"""Set the value of a variable"""
ns = self._get_reference_namespace(name)
ns[name] = value
self.log.debug(ns)
def remove_value(self, name):
"""Remove a variable"""
ns = self._get_reference_namespace(name)
ns.pop(name)
def copy_value(self, orig_name, new_name):
"""Copy a variable"""
ns = self._get_reference_namespace(orig_name)
ns[new_name] = ns[orig_name]
def load_data(self, filename, ext, overwrite=False):
"""
Load data from filename.
Use 'overwrite' to determine if conflicts between variable names need
to be handle or not.
For example, if a loaded variable is call 'var'
and there is already a variable 'var' in the namespace, having
'overwrite=True' will cause 'var' to be updated.
In the other hand, with 'overwrite=False', a new variable will be
created with a sufix starting with 000 i.e 'var000' (default behavior).
"""
from spyder_kernels.utils.misc import fix_reference_name
glbs = self._mglobals()
load_func = iofunctions.load_funcs[ext]
data, error_message = load_func(filename)
if error_message:
return error_message
if not overwrite:
# We convert to list since we mutate this dictionary
for key in list(data.keys()):
new_key = fix_reference_name(key, blacklist=list(glbs.keys()))
if new_key != key:
data[new_key] = data.pop(key)
try:
glbs.update(data)
except Exception as error:
return str(error)
return None
def save_namespace(self, filename):
"""Save namespace into filename"""
ns = self._get_current_namespace()
settings = self.namespace_view_settings
data = get_remote_data(ns, settings, mode='picklable',
more_excluded_names=EXCLUDED_NAMES).copy()
return iofunctions.save(data, filename)
# --- For Pdb
def is_debugging(self):
"""
Check if we are currently debugging.
"""
return bool(self._pdb_frame)
def _do_complete(self, code, cursor_pos):
"""Call parent class do_complete"""
return super(SpyderKernel, self).do_complete(code, cursor_pos)
def do_complete(self, code, cursor_pos):
"""
Call PdB complete if we are debugging.
Public method of ipykernel overwritten for debugging.
"""
if self.is_debugging():
return self._pdb_obj.do_complete(code, cursor_pos)
return self._do_complete(code, cursor_pos)
def publish_pdb_state(self):
"""
Publish Variable Explorer state and Pdb step through
send_spyder_msg.
"""
if self._pdb_obj and self._do_publish_pdb_state:
state = dict(namespace_view = self.get_namespace_view(),
var_properties = self.get_var_properties(),
step = self._pdb_step)
self.frontend_call(blocking=False).pdb_state(state)
self._do_publish_pdb_state = True
def set_spyder_breakpoints(self, breakpoints):
"""
Handle a message from the frontend
"""
if self._pdb_obj:
self._pdb_obj.set_spyder_breakpoints(breakpoints)
def set_pdb_ignore_lib(self, state):
"""
Change the "Ignore libraries while stepping" debugger setting.
"""
if self._pdb_obj:
self._pdb_obj.pdb_ignore_lib = state
def set_pdb_execute_events(self, state):
"""
Handle a message from the frontend
"""
if self._pdb_obj:
self._pdb_obj.pdb_execute_events = state
def set_pdb_use_exclamation_mark(self, state):
"""
Set an option on the current debugging session to decide wether
the Pdb commands needs to be prefixed by '!'
"""
if self._pdb_obj:
self._pdb_obj.pdb_use_exclamation_mark = state
def pdb_input_reply(self, line, echo_stack_entry=True):
"""Get a pdb command from the frontend."""
if self._pdb_obj:
self._pdb_obj._disable_next_stack_entry = not echo_stack_entry
self._pdb_input_line = line
if self.eventloop:
# Interrupting the eventloop is only implemented when a message is
# received on the shell channel, but this message is queued and
# won't be processed because an `execute` message is being
# processed. Therefore we process the message here (comm channel)
# and request a dummy message to be sent on the shell channel to
# stop the eventloop. This will call back `_interrupt_eventloop`.
self.frontend_call().request_interrupt_eventloop()
def cmd_input(self, prompt=''):
"""
Special input function for commands.
Runs the eventloop while debugging.
"""
# Only works if the comm is open and this is a pdb prompt.
if not self.frontend_comm.is_open() or not self._pdb_frame:
return input(prompt)
# Flush output before making the request.
sys.stderr.flush()
sys.stdout.flush()
# Send the input request.
self._pdb_input_line = None
self.frontend_call().pdb_input(prompt)
# Allow GUI event loop to update
if PY3:
is_main_thread = (
threading.current_thread() is threading.main_thread())
else:
is_main_thread = isinstance(
threading.current_thread(), threading._MainThread)
# Get input by running eventloop
if is_main_thread and self.eventloop:
while self._pdb_input_line is None:
eventloop = self.eventloop
if eventloop:
eventloop(self)
else:
break
# Get input by blocking
if self._pdb_input_line is None:
self.frontend_comm.wait_until(
lambda: self._pdb_input_line is not None)
return self._pdb_input_line
def _interrupt_eventloop(self):
"""Interrupts the eventloop."""
# Receiving the request is enough to stop the eventloop.
pass
# --- For the Help plugin
def is_defined(self, obj, force_import=False):
"""Return True if object is defined in current namespace"""
from spyder_kernels.utils.dochelpers import isdefined
ns = self._get_current_namespace(with_magics=True)
return isdefined(obj, force_import=force_import, namespace=ns)
def get_doc(self, objtxt):
"""Get object documentation dictionary"""
try:
import matplotlib
matplotlib.rcParams['docstring.hardcopy'] = True
except:
pass
from spyder_kernels.utils.dochelpers import getdoc
obj, valid = self._eval(objtxt)
if valid:
return getdoc(obj)
def get_source(self, objtxt):
"""Get object source"""
from spyder_kernels.utils.dochelpers import getsource
obj, valid = self._eval(objtxt)
if valid:
return getsource(obj)
# -- For Matplolib
def get_matplotlib_backend(self):
"""Get current matplotlib backend."""
try:
import matplotlib
return MPL_BACKENDS_TO_SPYDER[matplotlib.get_backend()]
except Exception:
return None
def set_matplotlib_backend(self, backend, pylab=False):
"""Set matplotlib backend given a Spyder backend option."""
mpl_backend = MPL_BACKENDS_FROM_SPYDER[to_text_string(backend)]
self._set_mpl_backend(mpl_backend, pylab=pylab)
def set_mpl_inline_figure_format(self, figure_format):
"""Set the inline figure format to use with matplotlib."""
mpl_figure_format = INLINE_FIGURE_FORMATS[figure_format]
self._set_config_option(
'InlineBackend.figure_format', mpl_figure_format)
def set_mpl_inline_resolution(self, resolution):
"""Set inline figure resolution."""
if LooseVersion(ipykernel.__version__) < LooseVersion('4.5'):
option = 'savefig.dpi'
else:
option = 'figure.dpi'
self._set_mpl_inline_rc_config(option, resolution)
def set_mpl_inline_figure_size(self, width, height):
"""Set inline figure size."""
value = (width, height)
self._set_mpl_inline_rc_config('figure.figsize', value)
def set_mpl_inline_bbox_inches(self, bbox_inches):
"""
Set inline print figure bbox inches.
The change is done by updating the 'print_figure_kwargs' config dict.
"""
from IPython.core.getipython import get_ipython
config = get_ipython().kernel.config
inline_config = (
config['InlineBackend'] if 'InlineBackend' in config else {})
print_figure_kwargs = (
inline_config['print_figure_kwargs']
if 'print_figure_kwargs' in inline_config else {})
bbox_inches_dict = {
'bbox_inches': 'tight' if bbox_inches else None}
print_figure_kwargs.update(bbox_inches_dict)
# This seems to be necessary for newer versions of Traitlets because
# print_figure_kwargs doesn't return a dict.
if isinstance(print_figure_kwargs, LazyConfigValue):
figure_kwargs_dict = print_figure_kwargs.to_dict().get('update')
if figure_kwargs_dict:
print_figure_kwargs = figure_kwargs_dict
self._set_config_option(
'InlineBackend.print_figure_kwargs', print_figure_kwargs)
# -- For completions
def set_jedi_completer(self, use_jedi):
"""Enable/Disable jedi as the completer for the kernel."""
self._set_config_option('IPCompleter.use_jedi', use_jedi)
def set_greedy_completer(self, use_greedy):
"""Enable/Disable greedy completer for the kernel."""
self._set_config_option('IPCompleter.greedy', use_greedy)
def set_autocall(self, autocall):
"""Enable/Disable autocall funtionality."""
self._set_config_option('ZMQInteractiveShell.autocall', autocall)
# --- Additional methods
def set_cwd(self, dirname):
"""Set current working directory."""
os.chdir(dirname)
def get_cwd(self):
"""Get current working directory."""
try:
return os.getcwd()
except (IOError, OSError):
pass
def get_syspath(self):
"""Return sys.path contents."""
return sys.path[:]
def get_env(self):
"""Get environment variables."""
return os.environ.copy()
def close_all_mpl_figures(self):
"""Close all Matplotlib figures."""
try:
import matplotlib.pyplot as plt
plt.close('all')
del plt
except:
pass
def is_special_kernel_valid(self):
"""
Check if optional dependencies are available for special consoles.
"""
try:
if os.environ.get('SPY_AUTOLOAD_PYLAB_O') == 'True':
import matplotlib
elif os.environ.get('SPY_SYMPY_O') == 'True':
import sympy
elif os.environ.get('SPY_RUN_CYTHON') == 'True':
import cython
except Exception:
# Use Exception instead of ImportError here because modules can
# fail to be imported due to a lot of issues.
if os.environ.get('SPY_AUTOLOAD_PYLAB_O') == 'True':
return u'matplotlib'
elif os.environ.get('SPY_SYMPY_O') == 'True':
return u'sympy'
elif os.environ.get('SPY_RUN_CYTHON') == 'True':
return u'cython'
return None
def update_syspath(self, path_dict, new_path_dict):
"""
Update the PYTHONPATH of the kernel.
`path_dict` and `new_path_dict` have the paths as keys and the state
as values. The state is `True` for active and `False` for inactive.
`path_dict` corresponds to the previous state of the PYTHONPATH.
`new_path_dict` corresponds to the new state of the PYTHONPATH.
"""
# Remove old paths
for path in path_dict:
while path in sys.path:
sys.path.remove(path)
# Add new paths
# We do this in reverse order as we use `sys.path.insert(1, path)`.
# This ensures the end result has the correct path order.
for path, active in reversed(new_path_dict.items()):
if active:
sys.path.insert(1, path)
# -- Private API ---------------------------------------------------
# --- For the Variable Explorer
def _get_current_namespace(self, with_magics=False):
"""
Return current namespace
This is globals() if not debugging, or a dictionary containing
both locals() and globals() for current frame when debugging
"""
ns = {}
if self._running_namespace is None:
ns.update(self._mglobals())
else:
running_globals, running_locals = self._running_namespace
ns.update(running_globals)
if running_locals is not None:
ns.update(running_locals)
if self._pdb_frame is not None:
ns.update(self._pdb_locals)
# Add magics to ns so we can show help about them on the Help
# plugin
if with_magics:
line_magics = self.shell.magics_manager.magics['line']
cell_magics = self.shell.magics_manager.magics['cell']
ns.update(line_magics)
ns.update(cell_magics)
return ns
def _get_reference_namespace(self, name):
"""
Return namespace where reference name is defined
It returns the globals() if reference has not yet been defined
"""
glbs = self._mglobals()
if self._pdb_frame is None:
return glbs
else:
lcls = self._pdb_locals
if name in lcls:
return lcls
else:
return glbs
def _mglobals(self):
"""Return current globals -- handles Pdb frames"""
if self._pdb_frame is not None:
return self._pdb_frame.f_globals
else:
return self.shell.user_ns
def _get_len(self, var):
"""Return sequence length"""
try:
return len(var)
except:
return None
def _is_array(self, var):
"""Return True if variable is a NumPy array"""
try:
import numpy
return isinstance(var, numpy.ndarray)
except:
return False
def _is_image(self, var):
"""Return True if variable is a PIL.Image image"""
try:
from PIL import Image
return isinstance(var, Image.Image)
except:
return False
def _is_data_frame(self, var):
"""Return True if variable is a DataFrame"""
try:
from pandas import DataFrame
return isinstance(var, DataFrame)
except:
return False
def _is_series(self, var):
"""Return True if variable is a Series"""
try:
from pandas import Series
return isinstance(var, Series)
except:
return False
def _get_array_shape(self, var):
"""Return array's shape"""
try:
if self._is_array(var):
return var.shape
else:
return None
except:
return None
def _get_array_ndim(self, var):
"""Return array's ndim"""
try:
if self._is_array(var):
return var.ndim
else:
return None
except:
return None
# --- For Pdb
def _register_pdb_session(self, pdb_obj):
"""Register Pdb session to use it later"""
self._pdb_obj = pdb_obj
@property
def _pdb_frame(self):
"""Return current Pdb frame if there is any"""
if self._pdb_obj is not None and self._pdb_obj.curframe is not None:
return self._pdb_obj.curframe
@property
def _pdb_locals(self):
"""
Return current Pdb frame locals if available. Otherwise
return an empty dictionary
"""
if self._pdb_frame:
return self._pdb_obj.curframe_locals
else:
return {}
# --- For the Help plugin
def _eval(self, text):
"""
Evaluate text and return (obj, valid)
where *obj* is the object represented by *text*
and *valid* is True if object evaluation did not raise any exception
"""
from spyder_kernels.py3compat import is_text_string
assert is_text_string(text)
ns = self._get_current_namespace(with_magics=True)
try:
return eval(text, ns), True
except:
return None, False
# --- For Matplotlib
def _set_mpl_backend(self, backend, pylab=False):
"""
Set a backend for Matplotlib.
backend: A parameter that can be passed to %matplotlib
(e.g. 'inline' or 'tk').
pylab: Is the pylab magic should be used in order to populate the
namespace from numpy and matplotlib
"""
import traceback
from IPython.core.getipython import get_ipython
generic_error = (
"\n" + "="*73 + "\n"
"NOTE: The following error appeared when setting "
"your Matplotlib backend!!\n" + "="*73 + "\n\n"
"{0}"
)
magic = 'pylab' if pylab else 'matplotlib'
error = None
try:
get_ipython().run_line_magic(magic, backend)
except RuntimeError as err:
# This catches errors generated by ipykernel when
# trying to set a backend. See issue 5541
if "GUI eventloops" in str(err):
import matplotlib
previous_backend = matplotlib.get_backend()
if not backend in previous_backend.lower():
# Only inform about an error if the user selected backend
# and the one set by Matplotlib are different. Else this
# message is very confusing.
error = (
"\n"
"NOTE: Spyder *can't* set your selected Matplotlib "
"backend because there is a previous backend already "
"in use.\n\n"
"Your backend will be {0}".format(previous_backend)
)
del matplotlib
# This covers other RuntimeError's
else:
error = generic_error.format(traceback.format_exc())
except Exception:
error = generic_error.format(traceback.format_exc())
self._mpl_backend_error = error
def _set_config_option(self, option, value):
"""
Set config options using the %config magic.
As parameters:
option: config option, for example 'InlineBackend.figure_format'.
value: value of the option, for example 'SVG', 'Retina', etc.
"""
from IPython.core.getipython import get_ipython
try:
base_config = "{option} = "
value_line = (
"'{value}'" if isinstance(value, TEXT_TYPES) else "{value}")
config_line = base_config + value_line
get_ipython().run_line_magic(
'config',
config_line.format(option=option, value=value))
except Exception:
pass
def _set_mpl_inline_rc_config(self, option, value):
"""
Update any of the Matplolib rcParams given an option and value.
"""
try:
from matplotlib import rcParams
rcParams[option] = value
except Exception:
# Needed in case matplolib isn't installed
pass
def show_mpl_backend_errors(self):
"""Show Matplotlib backend errors after the prompt is ready."""
if self._mpl_backend_error is not None:
print(self._mpl_backend_error) # spyder: test-skip
def set_sympy_forecolor(self, background_color='dark'):
"""Set SymPy forecolor depending on console background."""
if os.environ.get('SPY_SYMPY_O') == 'True':
try:
from sympy import init_printing
from IPython.core.getipython import get_ipython
if background_color == 'dark':
init_printing(forecolor='White', ip=get_ipython())
elif background_color == 'light':
init_printing(forecolor='Black', ip=get_ipython())
except Exception:
pass
# --- Others
def _load_autoreload_magic(self):
"""Load %autoreload magic."""
from IPython.core.getipython import get_ipython
try:
get_ipython().run_line_magic('reload_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
except Exception:
pass
def _load_wurlitzer(self):
"""Load wurlitzer extension."""
# Wurlitzer has no effect on Windows
if not os.name == 'nt':
from IPython.core.getipython import get_ipython
# Enclose this in a try/except because if it fails the
# console will be totally unusable.
# Fixes spyder-ide/spyder#8668
try:
get_ipython().run_line_magic('reload_ext', 'wurlitzer')
except Exception:
pass
def _get_comm(self, comm_id):
"""
We need to redefine this method from ipykernel.comm_manager to
avoid showing a warning when the comm corresponding to comm_id
is not present.
Fixes spyder-ide/spyder#15498
"""
try:
return self.comm_manager.comms[comm_id]
except KeyError:
pass
| 34.948235
| 79
| 0.592709
|
from distutils.version import LooseVersion
import os
import sys
import threading
import ipykernel
from ipykernel.ipkernel import IPythonKernel
from ipykernel.zmqshell import ZMQInteractiveShell
from traitlets.config.loader import LazyConfigValue
from spyder_kernels.py3compat import TEXT_TYPES, to_text_string
from spyder_kernels.comms.frontendcomm import FrontendComm
from spyder_kernels.py3compat import PY3, input
from spyder_kernels.utils.iofuncs import iofunctions
from spyder_kernels.utils.mpl import (
MPL_BACKENDS_FROM_SPYDER, MPL_BACKENDS_TO_SPYDER, INLINE_FIGURE_FORMATS)
from spyder_kernels.utils.nsview import get_remote_data, make_remote_view
EXCLUDED_NAMES = ['In', 'Out', 'exit', 'get_ipython', 'quit']
class SpyderShell(ZMQInteractiveShell):
def ask_exit(self):
self.kernel.frontend_comm.close_thread()
return super(SpyderShell, self).ask_exit()
def get_local_scope(self, stack_depth):
frame = sys._getframe(stack_depth + 1)
if self.kernel._pdb_frame is frame:
namespace = frame.f_globals.copy()
namespace.update(self.kernel._pdb_locals)
return namespace
else:
return frame.f_locals
class SpyderKernel(IPythonKernel):
shell_class = SpyderShell
def __init__(self, *args, **kwargs):
super(SpyderKernel, self).__init__(*args, **kwargs)
self.comm_manager.get_comm = self._get_comm
self.frontend_comm = FrontendComm(self)
handlers = {
'set_breakpoints': self.set_spyder_breakpoints,
'set_pdb_ignore_lib': self.set_pdb_ignore_lib,
'set_pdb_execute_events': self.set_pdb_execute_events,
'set_pdb_use_exclamation_mark': self.set_pdb_use_exclamation_mark,
'get_value': self.get_value,
'load_data': self.load_data,
'save_namespace': self.save_namespace,
'is_defined': self.is_defined,
'get_doc': self.get_doc,
'get_source': self.get_source,
'set_value': self.set_value,
'remove_value': self.remove_value,
'copy_value': self.copy_value,
'set_cwd': self.set_cwd,
'get_cwd': self.get_cwd,
'get_syspath': self.get_syspath,
'get_env': self.get_env,
'close_all_mpl_figures': self.close_all_mpl_figures,
'show_mpl_backend_errors': self.show_mpl_backend_errors,
'get_namespace_view': self.get_namespace_view,
'set_namespace_view_settings': self.set_namespace_view_settings,
'get_var_properties': self.get_var_properties,
'set_sympy_forecolor': self.set_sympy_forecolor,
'update_syspath': self.update_syspath,
'is_special_kernel_valid': self.is_special_kernel_valid,
'get_matplotlib_backend': self.get_matplotlib_backend,
'pdb_input_reply': self.pdb_input_reply,
'_interrupt_eventloop': self._interrupt_eventloop,
}
for call_id in handlers:
self.frontend_comm.register_call_handler(
call_id, handlers[call_id])
self.namespace_view_settings = {}
self._pdb_obj = None
self._pdb_step = None
self._do_publish_pdb_state = True
self._mpl_backend_error = None
self._running_namespace = None
self._pdb_input_line = None
def frontend_call(self, blocking=False, broadcast=True,
timeout=None, callback=None):
if broadcast:
comm_id = None
else:
comm_id = self.frontend_comm.calling_comm_id
return self.frontend_comm.remote_call(
blocking=blocking,
comm_id=comm_id,
callback=callback,
timeout=timeout)
def set_namespace_view_settings(self, settings):
self.namespace_view_settings = settings
def get_namespace_view(self):
settings = self.namespace_view_settings
if settings:
ns = self._get_current_namespace()
view = make_remote_view(ns, settings, EXCLUDED_NAMES)
return view
else:
return None
def get_var_properties(self):
settings = self.namespace_view_settings
if settings:
ns = self._get_current_namespace()
data = get_remote_data(ns, settings, mode='editable',
more_excluded_names=EXCLUDED_NAMES)
properties = {}
for name, value in list(data.items()):
properties[name] = {
'is_list': isinstance(value, (tuple, list)),
'is_dict': isinstance(value, dict),
'is_set': isinstance(value, set),
'len': self._get_len(value),
'is_array': self._is_array(value),
'is_image': self._is_image(value),
'is_data_frame': self._is_data_frame(value),
'is_series': self._is_series(value),
'array_shape': self._get_array_shape(value),
'array_ndim': self._get_array_ndim(value)
}
return properties
else:
return None
def get_value(self, name):
ns = self._get_current_namespace()
self._do_publish_pdb_state = False
return ns[name]
def set_value(self, name, value):
ns = self._get_reference_namespace(name)
ns[name] = value
self.log.debug(ns)
def remove_value(self, name):
ns = self._get_reference_namespace(name)
ns.pop(name)
def copy_value(self, orig_name, new_name):
ns = self._get_reference_namespace(orig_name)
ns[new_name] = ns[orig_name]
def load_data(self, filename, ext, overwrite=False):
from spyder_kernels.utils.misc import fix_reference_name
glbs = self._mglobals()
load_func = iofunctions.load_funcs[ext]
data, error_message = load_func(filename)
if error_message:
return error_message
if not overwrite:
for key in list(data.keys()):
new_key = fix_reference_name(key, blacklist=list(glbs.keys()))
if new_key != key:
data[new_key] = data.pop(key)
try:
glbs.update(data)
except Exception as error:
return str(error)
return None
def save_namespace(self, filename):
ns = self._get_current_namespace()
settings = self.namespace_view_settings
data = get_remote_data(ns, settings, mode='picklable',
more_excluded_names=EXCLUDED_NAMES).copy()
return iofunctions.save(data, filename)
def is_debugging(self):
return bool(self._pdb_frame)
def _do_complete(self, code, cursor_pos):
return super(SpyderKernel, self).do_complete(code, cursor_pos)
def do_complete(self, code, cursor_pos):
if self.is_debugging():
return self._pdb_obj.do_complete(code, cursor_pos)
return self._do_complete(code, cursor_pos)
def publish_pdb_state(self):
if self._pdb_obj and self._do_publish_pdb_state:
state = dict(namespace_view = self.get_namespace_view(),
var_properties = self.get_var_properties(),
step = self._pdb_step)
self.frontend_call(blocking=False).pdb_state(state)
self._do_publish_pdb_state = True
def set_spyder_breakpoints(self, breakpoints):
if self._pdb_obj:
self._pdb_obj.set_spyder_breakpoints(breakpoints)
def set_pdb_ignore_lib(self, state):
if self._pdb_obj:
self._pdb_obj.pdb_ignore_lib = state
def set_pdb_execute_events(self, state):
if self._pdb_obj:
self._pdb_obj.pdb_execute_events = state
def set_pdb_use_exclamation_mark(self, state):
if self._pdb_obj:
self._pdb_obj.pdb_use_exclamation_mark = state
def pdb_input_reply(self, line, echo_stack_entry=True):
if self._pdb_obj:
self._pdb_obj._disable_next_stack_entry = not echo_stack_entry
self._pdb_input_line = line
if self.eventloop:
# processed. Therefore we process the message here (comm channel)
# and request a dummy message to be sent on the shell channel to
# stop the eventloop. This will call back `_interrupt_eventloop`.
self.frontend_call().request_interrupt_eventloop()
def cmd_input(self, prompt=''):
# Only works if the comm is open and this is a pdb prompt.
if not self.frontend_comm.is_open() or not self._pdb_frame:
return input(prompt)
# Flush output before making the request.
sys.stderr.flush()
sys.stdout.flush()
# Send the input request.
self._pdb_input_line = None
self.frontend_call().pdb_input(prompt)
# Allow GUI event loop to update
if PY3:
is_main_thread = (
threading.current_thread() is threading.main_thread())
else:
is_main_thread = isinstance(
threading.current_thread(), threading._MainThread)
# Get input by running eventloop
if is_main_thread and self.eventloop:
while self._pdb_input_line is None:
eventloop = self.eventloop
if eventloop:
eventloop(self)
else:
break
# Get input by blocking
if self._pdb_input_line is None:
self.frontend_comm.wait_until(
lambda: self._pdb_input_line is not None)
return self._pdb_input_line
def _interrupt_eventloop(self):
# Receiving the request is enough to stop the eventloop.
pass
# --- For the Help plugin
def is_defined(self, obj, force_import=False):
from spyder_kernels.utils.dochelpers import isdefined
ns = self._get_current_namespace(with_magics=True)
return isdefined(obj, force_import=force_import, namespace=ns)
def get_doc(self, objtxt):
try:
import matplotlib
matplotlib.rcParams['docstring.hardcopy'] = True
except:
pass
from spyder_kernels.utils.dochelpers import getdoc
obj, valid = self._eval(objtxt)
if valid:
return getdoc(obj)
def get_source(self, objtxt):
from spyder_kernels.utils.dochelpers import getsource
obj, valid = self._eval(objtxt)
if valid:
return getsource(obj)
# -- For Matplolib
def get_matplotlib_backend(self):
try:
import matplotlib
return MPL_BACKENDS_TO_SPYDER[matplotlib.get_backend()]
except Exception:
return None
def set_matplotlib_backend(self, backend, pylab=False):
mpl_backend = MPL_BACKENDS_FROM_SPYDER[to_text_string(backend)]
self._set_mpl_backend(mpl_backend, pylab=pylab)
def set_mpl_inline_figure_format(self, figure_format):
mpl_figure_format = INLINE_FIGURE_FORMATS[figure_format]
self._set_config_option(
'InlineBackend.figure_format', mpl_figure_format)
def set_mpl_inline_resolution(self, resolution):
if LooseVersion(ipykernel.__version__) < LooseVersion('4.5'):
option = 'savefig.dpi'
else:
option = 'figure.dpi'
self._set_mpl_inline_rc_config(option, resolution)
def set_mpl_inline_figure_size(self, width, height):
value = (width, height)
self._set_mpl_inline_rc_config('figure.figsize', value)
def set_mpl_inline_bbox_inches(self, bbox_inches):
from IPython.core.getipython import get_ipython
config = get_ipython().kernel.config
inline_config = (
config['InlineBackend'] if 'InlineBackend' in config else {})
print_figure_kwargs = (
inline_config['print_figure_kwargs']
if 'print_figure_kwargs' in inline_config else {})
bbox_inches_dict = {
'bbox_inches': 'tight' if bbox_inches else None}
print_figure_kwargs.update(bbox_inches_dict)
# This seems to be necessary for newer versions of Traitlets because
# print_figure_kwargs doesn't return a dict.
if isinstance(print_figure_kwargs, LazyConfigValue):
figure_kwargs_dict = print_figure_kwargs.to_dict().get('update')
if figure_kwargs_dict:
print_figure_kwargs = figure_kwargs_dict
self._set_config_option(
'InlineBackend.print_figure_kwargs', print_figure_kwargs)
def set_jedi_completer(self, use_jedi):
self._set_config_option('IPCompleter.use_jedi', use_jedi)
def set_greedy_completer(self, use_greedy):
self._set_config_option('IPCompleter.greedy', use_greedy)
def set_autocall(self, autocall):
self._set_config_option('ZMQInteractiveShell.autocall', autocall)
def set_cwd(self, dirname):
os.chdir(dirname)
def get_cwd(self):
try:
return os.getcwd()
except (IOError, OSError):
pass
def get_syspath(self):
return sys.path[:]
def get_env(self):
return os.environ.copy()
def close_all_mpl_figures(self):
try:
import matplotlib.pyplot as plt
plt.close('all')
del plt
except:
pass
def is_special_kernel_valid(self):
try:
if os.environ.get('SPY_AUTOLOAD_PYLAB_O') == 'True':
import matplotlib
elif os.environ.get('SPY_SYMPY_O') == 'True':
import sympy
elif os.environ.get('SPY_RUN_CYTHON') == 'True':
import cython
except Exception:
if os.environ.get('SPY_AUTOLOAD_PYLAB_O') == 'True':
return u'matplotlib'
elif os.environ.get('SPY_SYMPY_O') == 'True':
return u'sympy'
elif os.environ.get('SPY_RUN_CYTHON') == 'True':
return u'cython'
return None
def update_syspath(self, path_dict, new_path_dict):
for path in path_dict:
while path in sys.path:
sys.path.remove(path)
for path, active in reversed(new_path_dict.items()):
if active:
sys.path.insert(1, path)
def _get_current_namespace(self, with_magics=False):
ns = {}
if self._running_namespace is None:
ns.update(self._mglobals())
else:
running_globals, running_locals = self._running_namespace
ns.update(running_globals)
if running_locals is not None:
ns.update(running_locals)
if self._pdb_frame is not None:
ns.update(self._pdb_locals)
if with_magics:
line_magics = self.shell.magics_manager.magics['line']
cell_magics = self.shell.magics_manager.magics['cell']
ns.update(line_magics)
ns.update(cell_magics)
return ns
def _get_reference_namespace(self, name):
glbs = self._mglobals()
if self._pdb_frame is None:
return glbs
else:
lcls = self._pdb_locals
if name in lcls:
return lcls
else:
return glbs
def _mglobals(self):
if self._pdb_frame is not None:
return self._pdb_frame.f_globals
else:
return self.shell.user_ns
def _get_len(self, var):
try:
return len(var)
except:
return None
def _is_array(self, var):
try:
import numpy
return isinstance(var, numpy.ndarray)
except:
return False
def _is_image(self, var):
try:
from PIL import Image
return isinstance(var, Image.Image)
except:
return False
def _is_data_frame(self, var):
try:
from pandas import DataFrame
return isinstance(var, DataFrame)
except:
return False
def _is_series(self, var):
try:
from pandas import Series
return isinstance(var, Series)
except:
return False
def _get_array_shape(self, var):
try:
if self._is_array(var):
return var.shape
else:
return None
except:
return None
def _get_array_ndim(self, var):
try:
if self._is_array(var):
return var.ndim
else:
return None
except:
return None
def _register_pdb_session(self, pdb_obj):
self._pdb_obj = pdb_obj
@property
def _pdb_frame(self):
if self._pdb_obj is not None and self._pdb_obj.curframe is not None:
return self._pdb_obj.curframe
@property
def _pdb_locals(self):
if self._pdb_frame:
return self._pdb_obj.curframe_locals
else:
return {}
def _eval(self, text):
from spyder_kernels.py3compat import is_text_string
assert is_text_string(text)
ns = self._get_current_namespace(with_magics=True)
try:
return eval(text, ns), True
except:
return None, False
def _set_mpl_backend(self, backend, pylab=False):
import traceback
from IPython.core.getipython import get_ipython
generic_error = (
"\n" + "="*73 + "\n"
"NOTE: The following error appeared when setting "
"your Matplotlib backend!!\n" + "="*73 + "\n\n"
"{0}"
)
magic = 'pylab' if pylab else 'matplotlib'
error = None
try:
get_ipython().run_line_magic(magic, backend)
except RuntimeError as err:
if "GUI eventloops" in str(err):
import matplotlib
previous_backend = matplotlib.get_backend()
if not backend in previous_backend.lower():
error = (
"\n"
"NOTE: Spyder *can't* set your selected Matplotlib "
"backend because there is a previous backend already "
"in use.\n\n"
"Your backend will be {0}".format(previous_backend)
)
del matplotlib
# This covers other RuntimeError's
else:
error = generic_error.format(traceback.format_exc())
except Exception:
error = generic_error.format(traceback.format_exc())
self._mpl_backend_error = error
def _set_config_option(self, option, value):
from IPython.core.getipython import get_ipython
try:
base_config = "{option} = "
value_line = (
"'{value}'" if isinstance(value, TEXT_TYPES) else "{value}")
config_line = base_config + value_line
get_ipython().run_line_magic(
'config',
config_line.format(option=option, value=value))
except Exception:
pass
def _set_mpl_inline_rc_config(self, option, value):
try:
from matplotlib import rcParams
rcParams[option] = value
except Exception:
pass
def show_mpl_backend_errors(self):
if self._mpl_backend_error is not None:
print(self._mpl_backend_error) # spyder: test-skip
def set_sympy_forecolor(self, background_color='dark'):
if os.environ.get('SPY_SYMPY_O') == 'True':
try:
from sympy import init_printing
from IPython.core.getipython import get_ipython
if background_color == 'dark':
init_printing(forecolor='White', ip=get_ipython())
elif background_color == 'light':
init_printing(forecolor='Black', ip=get_ipython())
except Exception:
pass
# --- Others
def _load_autoreload_magic(self):
from IPython.core.getipython import get_ipython
try:
get_ipython().run_line_magic('reload_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
except Exception:
pass
def _load_wurlitzer(self):
# Wurlitzer has no effect on Windows
if not os.name == 'nt':
from IPython.core.getipython import get_ipython
# Enclose this in a try/except because if it fails the
# console will be totally unusable.
# Fixes spyder-ide/spyder#8668
try:
get_ipython().run_line_magic('reload_ext', 'wurlitzer')
except Exception:
pass
def _get_comm(self, comm_id):
try:
return self.comm_manager.comms[comm_id]
except KeyError:
pass
| true
| true
|
1c42e0fb6c6a8804f139a55a6b4ef4187901c5b6
| 11,946
|
py
|
Python
|
rangeslicetools/utils.py
|
KOLANICH/rangeslicetools
|
3111219b6ee52556483e5e6e260ba769b14e818b
|
[
"Unlicense"
] | null | null | null |
rangeslicetools/utils.py
|
KOLANICH/rangeslicetools
|
3111219b6ee52556483e5e6e260ba769b14e818b
|
[
"Unlicense"
] | null | null | null |
rangeslicetools/utils.py
|
KOLANICH/rangeslicetools
|
3111219b6ee52556483e5e6e260ba769b14e818b
|
[
"Unlicense"
] | null | null | null |
import heapq
import itertools
import typing
from collections.abc import Sequence
from functools import wraps
__all__ = ("SliceRangeT", "SliceRangeTypeT", "SliceRangeSeqT", "SliceRangeListT", "sAny2Type", "range2slice", "slice2range", "slen", "sdir", "svec", "srev", "sdirect", "snormalize", "ssplit_1_", "ssplit_1", "ssplit_", "ssplit", "schunks_", "schunks", "soffset_split_", "soffset_split", "sjoin_", "swithin", "soverlaps", "teeSliceSequences", "salign_", "sPointIn", "ssegments_", "ssegments", "shull")
isInstArg = (range, slice)
SliceRangeT = typing.Union[isInstArg]
SliceRangeTypeT = typing.Union[tuple(typing.Type[el] for el in isInstArg)]
SliceRangeSeqT = typing.Iterable[SliceRangeT]
SliceRangeListT = typing.Sequence[SliceRangeT]
SliceRangeOptListT = typing.Union[SliceRangeT, SliceRangeListT]
def _getStepForComputation(slc: SliceRangeT) -> int:
"""Returns a `step` that is a number"""
if slc.step is not None:
return slc.step
if slc.start <= slc.stop:
return 1
raise ValueError("start < end, so if step is not explicitly defined, it is undefined! Setup the step explicitly (you would likely need -1)!")
def sign(n: int) -> int:
"""Signum func FOR OUR PURPOSES"""
if n is None or n >= 0:
return 1
return -1
def _scollapse(slc: SliceRangeOptListT) -> SliceRangeOptListT:
"""Collapses a sequence of ranges into a range, if it contains only a 1 range"""
if not isinstance(slc, isInstArg) and len(slc) == 1:
return slc[0]
return slc
def sAny2Type(rng: SliceRangeT, tp: SliceRangeTypeT) -> SliceRangeT:
"""Creates a new /range/slice with needed type"""
return tp(rng.start, rng.stop, _getStepForComputation(rng))
def range2slice(rng: SliceRangeT) -> slice:
"""Clones into a slice."""
return sAny2Type(rng, slice)
def slice2range(slc: SliceRangeT) -> range:
"""Clones into a range."""
return sAny2Type(slc, range)
def _slen(slc: SliceRangeT) -> int:
return len(slice2range(slc))
def slen(slcs: SliceRangeSeqT) -> int:
"""Returns length of a range/slice."""
if isinstance(slcs, isInstArg):
return _slen(slcs)
total = 0
for s in slcs:
total += _slen(s)
return total
def sdir(slc: SliceRangeT) -> int:
"""Returns director of a range/slice."""
return sign(slc.stop - slc.start)
def svec(slc: SliceRangeT) -> int:
return sdir(slc) * slen(slc)
def srev(slc: SliceRangeT) -> SliceRangeT:
"""Reverses direction of a range/slice."""
step = _getStepForComputation(slc)
newStep = -1 * step
assert isinstance(slc, range) or newStep >= -1, "Negative-directed slices with `step`s other -1 don't work!"
return slc.__class__(slc.stop - step, slc.start - step, newStep)
def _isNegative(slcs: SliceRangeListT) -> typing.Iterable[bool]:
return slcs.__class__(el.stop < el.start for el in slcs)
def _sdirect(donorNegative: bool, acceptor: SliceRangeOptListT) -> SliceRangeOptListT:
if not isinstance(acceptor, isInstArg):
if not isinstance(donorNegative, bool):
return acceptor.__class__(_sdirect(*el) for el in zip(donorNegative, acceptor))
return acceptor.__class__(_sdirect(donorNegative, el) for el in acceptor)
if donorNegative != (acceptor.stop < acceptor.start):
return srev(acceptor)
return acceptor
def sPointIn(s: SliceRangeT, pt: int) -> bool:
#return (((s.step is None or s.step > 0) and s.start <= pt < s.stop) or (s.start >= pt > s.stop))
return pt in slice2range(s)
def snormalize(slc: SliceRangeOptListT) -> SliceRangeOptListT:
"""Returns range/slice that points forward. If the range is positive-directed with the step 1, removes the step."""
res = _sdirect(False, slc)
if isinstance(res, isInstArg):
return sAny2Type(res, slc.__class__)
return res.__class__(sAny2Type(el, el.__class__) for el in res)
def sdirect(donor: SliceRangeT, acceptor: SliceRangeT) -> SliceRangeT:
"""Makes direction of an `acceptor` the same as a direction of a `donor."""
return _sdirect(donor.stop < donor.start, acceptor)
class InBandSignal:
__slots__ = ()
newMacroGroup = InBandSignal()
def _createWrappedWithnewMacroGroup(f: typing.Callable) -> typing.Callable:
@wraps(f)
def f1(*args, **kwargs):
bigRes = []
res = []
secCtor = kwargs.get("_secCtor", tuple)
def genericAppend():
nonlocal res
if len(res) == 1:
res = res[0]
else:
res = secCtor(res)
bigRes.append(res)
res = []
for el in f(*args, **kwargs):
#ic(el)
if el is not newMacroGroup:
res.append(el)
else:
genericAppend()
if res:
genericAppend()
bigRes = secCtor(bigRes)
return bigRes
f1.__annotations__["return"] = typing.Iterable[SliceRangeOptListT]
return f1
def ssplit_1_(slc: SliceRangeT, splitPts: typing.Union[int, typing.Iterable[int]]) -> SliceRangeSeqT:
"""Splits the slices by split points, which are ABSOLUTE POSITIONS OF POINTS on axis."""
tp = slc.__class__
if isinstance(splitPts, int):
splitPts = (splitPts,)
for p in splitPts:
if p != slc.start:
yield tp(slc.start, p, slc.step)
yield newMacroGroup
slc = tp(p, slc.stop, slc.step)
yield slc
ssplit_1 = _createWrappedWithnewMacroGroup(ssplit_1_)
def ssplit_(slc: SliceRangeSeqT, splitPts: typing.Iterable[int]) -> SliceRangeSeqT:
"""Splits the slices by split points, which are ABSOLUTE POSITIONS OF POINTS on axis."""
if isinstance(slc, isInstArg):
slc = (slc,)
if isinstance(splitPts, int):
splitPts = (splitPts,)
splitPts = iter(splitPts)
try:
pt = next(splitPts)
except StopIteration:
yield from slc
return
pts2split = []
for s in slc:
while pt is not None and sPointIn(s, pt):
pts2split.append(pt)
try:
pt = next(splitPts)
except StopIteration:
pt = None
if pts2split:
yield from ssplit_1_(s, pts2split)
pts2split = []
else:
yield s
ssplit = _createWrappedWithnewMacroGroup(ssplit_)
def schunks_(slc: SliceRangeT, chunkLen: int) -> SliceRangeSeqT:
"""Splits the slice into slices of length `chunkLen` (which is in `slc.step`s!!!)"""
cl = chunkLen * _getStepForComputation(slc)
return ssplit_(slc, range(slc.start + cl, slc.stop, cl))
schunks = _createWrappedWithnewMacroGroup(schunks_)
def soffset_split_(slc: SliceRangeSeqT, splitPts: typing.Iterable[int]) -> SliceRangeSeqT:
"""Splits the slices by split points, which are OFFSETS FROM RANGE BEGINNING."""
if isinstance(slc, isInstArg):
slc = (slc,)
if isinstance(splitPts, int):
splitPts = (splitPts,)
splitPts = iter(splitPts)
try:
pt = next(splitPts)
except StopIteration:
yield from slc
return
cumLen = 0
cumLenPrev = None
pts2split = []
for s in slc:
cumLenPrev = cumLen
cumLen += slen(s)
while pt is not None and cumLenPrev <= pt < cumLen:
pts2split.append(s.start + (pt - cumLenPrev) * _getStepForComputation(s))
try:
pt = next(splitPts)
except StopIteration:
pt = None
if pts2split:
yield from ssplit_1_(s, pts2split)
pts2split = []
else:
yield s
soffset_split = _createWrappedWithnewMacroGroup(soffset_split_)
def _posHull(first: SliceRangeT, slcs: SliceRangeSeqT) -> SliceRangeT:
mi, ma = first.start, first.stop
for slc in slcs:
mi = min(mi, slc.start)
ma = max(slc.stop, ma)
return first.__class__(mi, ma, first.step)
def _negHull(first: SliceRangeT, slcs: SliceRangeSeqT) -> SliceRangeT:
ma, mi = first.start, first.stop
for slc in slcs:
mi = min(mi, slc.stop)
ma = max(slc.start, ma)
return first.__class__(ma, mi, first.step)
def shull(slcs: SliceRangeSeqT) -> SliceRangeT:
"""Returns the range covering all the ranges provided. Every item must be of the same direction!
See also `sunion`, which is slower, but takes into account direction.
"""
slcs = iter(slcs)
first = next(slcs)
if first.start < first.stop:
return _posHull(first, slcs)
return _negHull(first, slcs)
def sjoin_(slcs: SliceRangeSeqT) -> SliceRangeSeqT:
"""Merges adjacent or overlapped ranges. All the ranges must be of the same direction. If the direction is negative, the sequence MUST be reversed! The sequence MUST be sorted. The type is taken from the type of the first range in the input."""
slcs = iter(slcs)
wholeDir = 0
while not wholeDir:
try:
prevSlc = next(slcs)
except StopIteration:
return
wholeDir = sdir(prevSlc)
wholeDir = wholeDir > 0 # type: bool
tp = prevSlc.__class__
for s in slcs:
#assert (prevSlc.start <= prevSlc.stop) == (s.start <= s.stop)
if prevSlc.step == s.step:
if s.start == prevSlc.stop:
prevSlc = tp(prevSlc.start, s.stop, prevSlc.step)
else:
curDir = prevSlc.start <= s.start
if (swithin(prevSlc, s) or swithin(s, prevSlc)) or curDir == wholeDir and soverlaps(prevSlc, s):
prevSlc = shull((prevSlc, s))
else:
yield prevSlc
prevSlc = s
else:
yield prevSlc
prevSlc = s
yield prevSlc
def swithin(haystack: SliceRangeT, needle: SliceRangeT) -> bool:
"""Answers if needle is fully within haystack (including boundaries)."""
hsn = snormalize(haystack)
nn = snormalize(needle)
return _swithin(hsn, nn)
def soverlaps(haystack: SliceRangeT, needle: SliceRangeT) -> bool:
"""Answers if needle is at least partially overlaps haystack (including boundaries)."""
hsn = snormalize(haystack)
nn = snormalize(needle)
return _soverlaps(hsn, nn)
_normalizationSkippedWarning = " Normalization is skipped."
def _swithin(haystack: SliceRangeT, needle: SliceRangeT) -> bool:
res = needle.start >= haystack.start and needle.stop <= haystack.stop
#ic("_swithin", haystack, needle, needle.start >= haystack.start, needle.stop < haystack.stop, res)
return res
_swithin.__doc__ = swithin.__doc__ + _normalizationSkippedWarning
def _soverlaps(haystack: SliceRangeT, needle: SliceRangeT) -> bool:
#ic("_soverlaps", haystack, needle, needle.start <= haystack.start < needle.stop, needle.start < haystack.stop < needle.stop)
return _swithin(haystack, needle) or needle.start <= haystack.start < needle.stop or needle.start < haystack.stop < needle.stop
_soverlaps.__doc__ = soverlaps.__doc__ + _normalizationSkippedWarning
def _teeSliceSequences(sliceSequences: typing.Iterable[SliceRangeSeqT], count: int = 2) -> typing.Iterator[typing.Tuple[itertools._tee, itertools._tee]]:
for s in sliceSequences:
if isinstance(s, isInstArg):
s = (s,)
yield itertools.tee(s, count)
def teeSliceSequences(sliceSequences: typing.Iterable[SliceRangeSeqT], count: int = 2) -> zip:
return zip(*(_teeSliceSequences(sliceSequences, count)))
def _integrator(chunkLens: typing.Iterable[int]) -> typing.Iterable[int]:
cumLen = 0
for s in chunkLens:
cumLen += s
yield cumLen
def _uniq(it: typing.Iterable[typing.Any]) -> typing.Iterable[typing.Any]:
it = iter(it)
try:
prev = next(it)
yield prev
except StopIteration:
return
for el in it:
if prev == el:
continue
prev = el
yield el
def _mergeAndDedup(intSeqs: typing.Iterable[typing.Iterable[int]]) -> typing.Iterable[int]:
return _uniq(sorted(heapq.merge(*intSeqs)))
def _deduplicatedIntegrator(*chunksLens: typing.Iterable[typing.Iterable[int]]) -> typing.Iterable[int]:
return _mergeAndDedup(map(_integrator, chunksLens))
def ssegments_(slc: SliceRangeT, chunkLens: typing.Iterable[int]) -> SliceRangeSeqT:
"""Splits the slice into slices of lengths `chunkLen` (which is in `slc.step`s!!!)"""
return soffset_split_(slc, _deduplicatedIntegrator(chunkLens)) # pylint: disable=undefined-variable
ssegments = _createWrappedWithnewMacroGroup(ssegments_)
def salign_(sliceSequences: typing.Iterable[SliceRangeSeqT]) -> SliceRangeSeqT:
""""Aligns" seqs of ranges/slices OF THE SAME TOTAL LENGTH, returning ones with additional split points, so that all the sequences have segments of equal lengths between split points with the same indexes. See the test for more insight on what it does."""
slcsPoints, slcsSplit = teeSliceSequences(sliceSequences, 2)
splitPoints = tuple(_deduplicatedIntegrator(*(map(_slen, ss) for ss in slcsPoints)))
for ss in slcsSplit:
yield soffset_split(ss, splitPoints) # pylint: disable=undefined-variable
| 28.375297
| 399
| 0.720827
|
import heapq
import itertools
import typing
from collections.abc import Sequence
from functools import wraps
__all__ = ("SliceRangeT", "SliceRangeTypeT", "SliceRangeSeqT", "SliceRangeListT", "sAny2Type", "range2slice", "slice2range", "slen", "sdir", "svec", "srev", "sdirect", "snormalize", "ssplit_1_", "ssplit_1", "ssplit_", "ssplit", "schunks_", "schunks", "soffset_split_", "soffset_split", "sjoin_", "swithin", "soverlaps", "teeSliceSequences", "salign_", "sPointIn", "ssegments_", "ssegments", "shull")
isInstArg = (range, slice)
SliceRangeT = typing.Union[isInstArg]
SliceRangeTypeT = typing.Union[tuple(typing.Type[el] for el in isInstArg)]
SliceRangeSeqT = typing.Iterable[SliceRangeT]
SliceRangeListT = typing.Sequence[SliceRangeT]
SliceRangeOptListT = typing.Union[SliceRangeT, SliceRangeListT]
def _getStepForComputation(slc: SliceRangeT) -> int:
if slc.step is not None:
return slc.step
if slc.start <= slc.stop:
return 1
raise ValueError("start < end, so if step is not explicitly defined, it is undefined! Setup the step explicitly (you would likely need -1)!")
def sign(n: int) -> int:
if n is None or n >= 0:
return 1
return -1
def _scollapse(slc: SliceRangeOptListT) -> SliceRangeOptListT:
if not isinstance(slc, isInstArg) and len(slc) == 1:
return slc[0]
return slc
def sAny2Type(rng: SliceRangeT, tp: SliceRangeTypeT) -> SliceRangeT:
return tp(rng.start, rng.stop, _getStepForComputation(rng))
def range2slice(rng: SliceRangeT) -> slice:
return sAny2Type(rng, slice)
def slice2range(slc: SliceRangeT) -> range:
return sAny2Type(slc, range)
def _slen(slc: SliceRangeT) -> int:
return len(slice2range(slc))
def slen(slcs: SliceRangeSeqT) -> int:
if isinstance(slcs, isInstArg):
return _slen(slcs)
total = 0
for s in slcs:
total += _slen(s)
return total
def sdir(slc: SliceRangeT) -> int:
return sign(slc.stop - slc.start)
def svec(slc: SliceRangeT) -> int:
return sdir(slc) * slen(slc)
def srev(slc: SliceRangeT) -> SliceRangeT:
step = _getStepForComputation(slc)
newStep = -1 * step
assert isinstance(slc, range) or newStep >= -1, "Negative-directed slices with `step`s other -1 don't work!"
return slc.__class__(slc.stop - step, slc.start - step, newStep)
def _isNegative(slcs: SliceRangeListT) -> typing.Iterable[bool]:
return slcs.__class__(el.stop < el.start for el in slcs)
def _sdirect(donorNegative: bool, acceptor: SliceRangeOptListT) -> SliceRangeOptListT:
if not isinstance(acceptor, isInstArg):
if not isinstance(donorNegative, bool):
return acceptor.__class__(_sdirect(*el) for el in zip(donorNegative, acceptor))
return acceptor.__class__(_sdirect(donorNegative, el) for el in acceptor)
if donorNegative != (acceptor.stop < acceptor.start):
return srev(acceptor)
return acceptor
def sPointIn(s: SliceRangeT, pt: int) -> bool:
#return (((s.step is None or s.step > 0) and s.start <= pt < s.stop) or (s.start >= pt > s.stop))
return pt in slice2range(s)
def snormalize(slc: SliceRangeOptListT) -> SliceRangeOptListT:
res = _sdirect(False, slc)
if isinstance(res, isInstArg):
return sAny2Type(res, slc.__class__)
return res.__class__(sAny2Type(el, el.__class__) for el in res)
def sdirect(donor: SliceRangeT, acceptor: SliceRangeT) -> SliceRangeT:
return _sdirect(donor.stop < donor.start, acceptor)
class InBandSignal:
__slots__ = ()
newMacroGroup = InBandSignal()
def _createWrappedWithnewMacroGroup(f: typing.Callable) -> typing.Callable:
@wraps(f)
def f1(*args, **kwargs):
bigRes = []
res = []
secCtor = kwargs.get("_secCtor", tuple)
def genericAppend():
nonlocal res
if len(res) == 1:
res = res[0]
else:
res = secCtor(res)
bigRes.append(res)
res = []
for el in f(*args, **kwargs):
#ic(el)
if el is not newMacroGroup:
res.append(el)
else:
genericAppend()
if res:
genericAppend()
bigRes = secCtor(bigRes)
return bigRes
f1.__annotations__["return"] = typing.Iterable[SliceRangeOptListT]
return f1
def ssplit_1_(slc: SliceRangeT, splitPts: typing.Union[int, typing.Iterable[int]]) -> SliceRangeSeqT:
tp = slc.__class__
if isinstance(splitPts, int):
splitPts = (splitPts,)
for p in splitPts:
if p != slc.start:
yield tp(slc.start, p, slc.step)
yield newMacroGroup
slc = tp(p, slc.stop, slc.step)
yield slc
ssplit_1 = _createWrappedWithnewMacroGroup(ssplit_1_)
def ssplit_(slc: SliceRangeSeqT, splitPts: typing.Iterable[int]) -> SliceRangeSeqT:
if isinstance(slc, isInstArg):
slc = (slc,)
if isinstance(splitPts, int):
splitPts = (splitPts,)
splitPts = iter(splitPts)
try:
pt = next(splitPts)
except StopIteration:
yield from slc
return
pts2split = []
for s in slc:
while pt is not None and sPointIn(s, pt):
pts2split.append(pt)
try:
pt = next(splitPts)
except StopIteration:
pt = None
if pts2split:
yield from ssplit_1_(s, pts2split)
pts2split = []
else:
yield s
ssplit = _createWrappedWithnewMacroGroup(ssplit_)
def schunks_(slc: SliceRangeT, chunkLen: int) -> SliceRangeSeqT:
cl = chunkLen * _getStepForComputation(slc)
return ssplit_(slc, range(slc.start + cl, slc.stop, cl))
schunks = _createWrappedWithnewMacroGroup(schunks_)
def soffset_split_(slc: SliceRangeSeqT, splitPts: typing.Iterable[int]) -> SliceRangeSeqT:
if isinstance(slc, isInstArg):
slc = (slc,)
if isinstance(splitPts, int):
splitPts = (splitPts,)
splitPts = iter(splitPts)
try:
pt = next(splitPts)
except StopIteration:
yield from slc
return
cumLen = 0
cumLenPrev = None
pts2split = []
for s in slc:
cumLenPrev = cumLen
cumLen += slen(s)
while pt is not None and cumLenPrev <= pt < cumLen:
pts2split.append(s.start + (pt - cumLenPrev) * _getStepForComputation(s))
try:
pt = next(splitPts)
except StopIteration:
pt = None
if pts2split:
yield from ssplit_1_(s, pts2split)
pts2split = []
else:
yield s
soffset_split = _createWrappedWithnewMacroGroup(soffset_split_)
def _posHull(first: SliceRangeT, slcs: SliceRangeSeqT) -> SliceRangeT:
mi, ma = first.start, first.stop
for slc in slcs:
mi = min(mi, slc.start)
ma = max(slc.stop, ma)
return first.__class__(mi, ma, first.step)
def _negHull(first: SliceRangeT, slcs: SliceRangeSeqT) -> SliceRangeT:
ma, mi = first.start, first.stop
for slc in slcs:
mi = min(mi, slc.stop)
ma = max(slc.start, ma)
return first.__class__(ma, mi, first.step)
def shull(slcs: SliceRangeSeqT) -> SliceRangeT:
slcs = iter(slcs)
first = next(slcs)
if first.start < first.stop:
return _posHull(first, slcs)
return _negHull(first, slcs)
def sjoin_(slcs: SliceRangeSeqT) -> SliceRangeSeqT:
slcs = iter(slcs)
wholeDir = 0
while not wholeDir:
try:
prevSlc = next(slcs)
except StopIteration:
return
wholeDir = sdir(prevSlc)
wholeDir = wholeDir > 0 # type: bool
tp = prevSlc.__class__
for s in slcs:
#assert (prevSlc.start <= prevSlc.stop) == (s.start <= s.stop)
if prevSlc.step == s.step:
if s.start == prevSlc.stop:
prevSlc = tp(prevSlc.start, s.stop, prevSlc.step)
else:
curDir = prevSlc.start <= s.start
if (swithin(prevSlc, s) or swithin(s, prevSlc)) or curDir == wholeDir and soverlaps(prevSlc, s):
prevSlc = shull((prevSlc, s))
else:
yield prevSlc
prevSlc = s
else:
yield prevSlc
prevSlc = s
yield prevSlc
def swithin(haystack: SliceRangeT, needle: SliceRangeT) -> bool:
hsn = snormalize(haystack)
nn = snormalize(needle)
return _swithin(hsn, nn)
def soverlaps(haystack: SliceRangeT, needle: SliceRangeT) -> bool:
hsn = snormalize(haystack)
nn = snormalize(needle)
return _soverlaps(hsn, nn)
_normalizationSkippedWarning = " Normalization is skipped."
def _swithin(haystack: SliceRangeT, needle: SliceRangeT) -> bool:
res = needle.start >= haystack.start and needle.stop <= haystack.stop
#ic("_swithin", haystack, needle, needle.start >= haystack.start, needle.stop < haystack.stop, res)
return res
_swithin.__doc__ = swithin.__doc__ + _normalizationSkippedWarning
def _soverlaps(haystack: SliceRangeT, needle: SliceRangeT) -> bool:
#ic("_soverlaps", haystack, needle, needle.start <= haystack.start < needle.stop, needle.start < haystack.stop < needle.stop)
return _swithin(haystack, needle) or needle.start <= haystack.start < needle.stop or needle.start < haystack.stop < needle.stop
_soverlaps.__doc__ = soverlaps.__doc__ + _normalizationSkippedWarning
def _teeSliceSequences(sliceSequences: typing.Iterable[SliceRangeSeqT], count: int = 2) -> typing.Iterator[typing.Tuple[itertools._tee, itertools._tee]]:
for s in sliceSequences:
if isinstance(s, isInstArg):
s = (s,)
yield itertools.tee(s, count)
def teeSliceSequences(sliceSequences: typing.Iterable[SliceRangeSeqT], count: int = 2) -> zip:
return zip(*(_teeSliceSequences(sliceSequences, count)))
def _integrator(chunkLens: typing.Iterable[int]) -> typing.Iterable[int]:
cumLen = 0
for s in chunkLens:
cumLen += s
yield cumLen
def _uniq(it: typing.Iterable[typing.Any]) -> typing.Iterable[typing.Any]:
it = iter(it)
try:
prev = next(it)
yield prev
except StopIteration:
return
for el in it:
if prev == el:
continue
prev = el
yield el
def _mergeAndDedup(intSeqs: typing.Iterable[typing.Iterable[int]]) -> typing.Iterable[int]:
return _uniq(sorted(heapq.merge(*intSeqs)))
def _deduplicatedIntegrator(*chunksLens: typing.Iterable[typing.Iterable[int]]) -> typing.Iterable[int]:
return _mergeAndDedup(map(_integrator, chunksLens))
def ssegments_(slc: SliceRangeT, chunkLens: typing.Iterable[int]) -> SliceRangeSeqT:
return soffset_split_(slc, _deduplicatedIntegrator(chunkLens)) # pylint: disable=undefined-variable
ssegments = _createWrappedWithnewMacroGroup(ssegments_)
def salign_(sliceSequences: typing.Iterable[SliceRangeSeqT]) -> SliceRangeSeqT:
slcsPoints, slcsSplit = teeSliceSequences(sliceSequences, 2)
splitPoints = tuple(_deduplicatedIntegrator(*(map(_slen, ss) for ss in slcsPoints)))
for ss in slcsSplit:
yield soffset_split(ss, splitPoints) # pylint: disable=undefined-variable
| true
| true
|
1c42e161b3810277a30977eea2901c24884b60c8
| 357
|
py
|
Python
|
source/appModules/skype.py
|
marlon-sousa/nvda
|
83738d7d9150fb379083eb3918e9c78c78610489
|
[
"bzip2-1.0.6"
] | 1,592
|
2015-11-10T12:05:44.000Z
|
2022-03-31T11:50:40.000Z
|
source/appModules/skype.py
|
marlon-sousa/nvda
|
83738d7d9150fb379083eb3918e9c78c78610489
|
[
"bzip2-1.0.6"
] | 9,479
|
2015-11-10T20:56:48.000Z
|
2022-03-31T23:51:30.000Z
|
source/appModules/skype.py
|
marlon-sousa/nvda
|
83738d7d9150fb379083eb3918e9c78c78610489
|
[
"bzip2-1.0.6"
] | 682
|
2015-11-10T11:19:23.000Z
|
2022-03-31T07:51:29.000Z
|
# -*- coding: UTF-8 -*-
#appModules/skype.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2019 Peter Vágner, NV Access Limited, Babbage B.V.
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import appModuleHandler
class AppModule(appModuleHandler.AppModule):
disableBrowseModeByDefault = True
| 29.75
| 65
| 0.773109
|
import appModuleHandler
class AppModule(appModuleHandler.AppModule):
disableBrowseModeByDefault = True
| true
| true
|
1c42e2702c5774cffa7414e952498a588522c4de
| 30,790
|
bzl
|
Python
|
packages/bazel/src/ng_module.bzl
|
jameskirsch/angular
|
168abc6d6f52713383411b14980e104c99bfeef5
|
[
"MIT"
] | 1
|
2019-11-29T04:18:04.000Z
|
2019-11-29T04:18:04.000Z
|
packages/bazel/src/ng_module.bzl
|
resuta566/angular
|
5de7960f019701e4e26dc6a7809c244ef94b5e30
|
[
"MIT"
] | null | null | null |
packages/bazel/src/ng_module.bzl
|
resuta566/angular
|
5de7960f019701e4e26dc6a7809c244ef94b5e30
|
[
"MIT"
] | null | null | null |
# Copyright Google Inc. All Rights Reserved.
#
# Use of this source code is governed by an MIT-style license that can be
# found in the LICENSE file at https://angular.io/license
"""Run Angular's AOT template compiler
"""
load(
":external.bzl",
"COMMON_ATTRIBUTES",
"COMMON_OUTPUTS",
"DEFAULT_API_EXTRACTOR",
"DEFAULT_NG_COMPILER",
"DEFAULT_NG_XI18N",
"DEPS_ASPECTS",
"NpmPackageInfo",
"TsConfigInfo",
"compile_ts",
"js_ecma_script_module_info",
"js_named_module_info",
"node_modules_aspect",
"ts_providers_dict_to_struct",
"tsc_wrapped_tsconfig",
)
_FLAT_DTS_FILE_SUFFIX = ".bundle.d.ts"
_R3_SYMBOLS_DTS_FILE = "src/r3_symbols.d.ts"
def is_ivy_enabled(ctx):
"""Determine if the ivy compiler should be used to by the ng_module.
Args:
ctx: skylark rule execution context
Returns:
Boolean, Whether the ivy compiler should be used.
"""
# TODO(josephperrott): Remove configuration via compile=aot define flag.
if ctx.var.get("compile", None) == "aot":
return True
if ctx.var.get("angular_ivy_enabled", None) == "True":
return True
# Enable Angular targets extracted by Kythe Angular indexer to be compiled with the Ivy compiler architecture.
# TODO(ayazhafiz): remove once Ivy has landed as the default in g3.
if ctx.var.get("GROK_ELLIPSIS_BUILD", None) != None:
return True
# Return false to default to ViewEngine compiler
return False
def _compiler_name(ctx):
"""Selects a user-visible name depending on the current compilation strategy.
Args:
ctx: skylark rule execution context
Returns:
The name of the current compiler to be displayed in build output
"""
return "Ivy" if is_ivy_enabled(ctx) else "ViewEngine"
def _is_view_engine_enabled(ctx):
"""Determines whether Angular outputs will be produced by the current compilation strategy.
Args:
ctx: skylark rule execution context
Returns:
true iff the current compilation strategy will produce View Engine compilation outputs (such as
factory files), false otherwise
"""
return not is_ivy_enabled(ctx)
def _basename_of(ctx, file):
ext_len = len(".ts")
if file.short_path.endswith(".ng.html"):
ext_len = len(".ng.html")
elif file.short_path.endswith(".html"):
ext_len = len(".html")
return file.short_path[len(ctx.label.package) + 1:-ext_len]
# Return true if run with bazel (the open-sourced version of blaze), false if
# run with blaze.
def _is_bazel():
return not hasattr(native, "genmpm")
def _flat_module_out_file(ctx):
"""Provide a default for the flat_module_out_file attribute.
We cannot use the default="" parameter of ctx.attr because the value is calculated
from other attributes (name)
Args:
ctx: skylark rule execution context
Returns:
a basename used for the flat module out (no extension)
"""
if getattr(ctx.attr, "flat_module_out_file", False):
return ctx.attr.flat_module_out_file
return "%s_public_index" % ctx.label.name
def _should_produce_dts_bundle(ctx):
"""Should we produce dts bundles.
We only produce flatten dts outs when we expect the ng_module is meant to be published,
based on the value of the bundle_dts attribute.
Args:
ctx: skylark rule execution context
Returns:
true when we should produce bundled dts.
"""
# At the moment we cannot use this with ngtsc compiler since it emits
# import * as ___ from local modules which is not supported
# see: https://github.com/Microsoft/web-build-tools/issues/1029
return _is_view_engine_enabled(ctx) and getattr(ctx.attr, "bundle_dts", False)
def _should_produce_r3_symbols_bundle(ctx):
"""Should we produce r3_symbols bundle.
NGCC relies on having r3_symbols file. This file is located in @angular/core
And should only be included when bundling core in legacy mode.
Args:
ctx: skylark rule execution context
Returns:
true when we should produce r3_symbols dts.
"""
# iif we are compiling @angular/core with ngc we should add this addition dts bundle
# because ngcc relies on having this file.
# see: https://github.com/angular/angular/blob/84406e4d6d93b28b23efbb1701bc5ae1084da67b/packages/compiler-cli/src/ngcc/src/packages/entry_point_bundle.ts#L56
# todo: alan-agius4: remove when ngcc doesn't need this anymore
return _is_view_engine_enabled(ctx) and ctx.attr.module_name == "@angular/core"
def _should_produce_flat_module_outs(ctx):
"""Should we produce flat module outputs.
We only produce flat module outs when we expect the ng_module is meant to be published,
based on the presence of the module_name attribute.
Args:
ctx: skylark rule execution context
Returns:
true iff we should run the bundle_index_host to produce flat module metadata and bundle index
"""
return _is_bazel() and ctx.attr.module_name
# Calculate the expected output of the template compiler for every source in
# in the library. Most of these will be produced as empty files but it is
# unknown, without parsing, which will be empty.
def _expected_outs(ctx):
is_legacy_ngc = _is_view_engine_enabled(ctx)
devmode_js_files = []
closure_js_files = []
declaration_files = []
summary_files = []
metadata_files = []
factory_basename_set = depset([_basename_of(ctx, src) for src in ctx.files.factories])
for src in ctx.files.srcs + ctx.files.assets:
package_prefix = ctx.label.package + "/" if ctx.label.package else ""
# Strip external repository name from path if src is from external repository
# If src is from external repository, it's short_path will be ../<external_repo_name>/...
short_path = src.short_path if src.short_path[0:2] != ".." else "/".join(src.short_path.split("/")[2:])
if short_path.endswith(".ts") and not short_path.endswith(".d.ts"):
basename = short_path[len(package_prefix):-len(".ts")]
if (len(factory_basename_set.to_list()) == 0 or basename in factory_basename_set.to_list()):
if _generate_ve_shims(ctx):
devmode_js = [
".ngfactory.js",
".ngsummary.js",
".js",
]
else:
devmode_js = [".js"]
# Only ngc produces .json files, they're not needed in Ivy.
if is_legacy_ngc:
summaries = [".ngsummary.json"]
metadata = [".metadata.json"]
else:
summaries = []
metadata = []
else:
devmode_js = [".js"]
if not _is_bazel():
devmode_js += [".ngfactory.js"]
summaries = []
metadata = []
elif is_legacy_ngc and short_path.endswith(".css"):
basename = short_path[len(package_prefix):-len(".css")]
devmode_js = [
".css.shim.ngstyle.js",
".css.ngstyle.js",
]
summaries = []
metadata = []
else:
continue
filter_summaries = ctx.attr.filter_summaries
closure_js = [f.replace(".js", ".mjs") for f in devmode_js if not filter_summaries or not f.endswith(".ngsummary.js")]
declarations = [f.replace(".js", ".d.ts") for f in devmode_js]
devmode_js_files += [ctx.actions.declare_file(basename + ext) for ext in devmode_js]
closure_js_files += [ctx.actions.declare_file(basename + ext) for ext in closure_js]
declaration_files += [ctx.actions.declare_file(basename + ext) for ext in declarations]
summary_files += [ctx.actions.declare_file(basename + ext) for ext in summaries]
if not _is_bazel():
metadata_files += [ctx.actions.declare_file(basename + ext) for ext in metadata]
dts_bundles = None
if _should_produce_dts_bundle(ctx):
# We need to add a suffix to bundle as it might collide with the flat module dts.
# The flat module dts out contains several other exports
# https://github.com/angular/angular/blob/84406e4d6d93b28b23efbb1701bc5ae1084da67b/packages/compiler-cli/src/metadata/index_writer.ts#L18
# the file name will be like 'core.bundle.d.ts'
dts_bundles = [ctx.actions.declare_file(ctx.label.name + _FLAT_DTS_FILE_SUFFIX)]
if _should_produce_r3_symbols_bundle(ctx):
dts_bundles.append(ctx.actions.declare_file(_R3_SYMBOLS_DTS_FILE.replace(".d.ts", _FLAT_DTS_FILE_SUFFIX)))
# We do this just when producing a flat module index for a publishable ng_module
if _should_produce_flat_module_outs(ctx):
flat_module_out = _flat_module_out_file(ctx)
devmode_js_files.append(ctx.actions.declare_file("%s.js" % flat_module_out))
closure_js_files.append(ctx.actions.declare_file("%s.mjs" % flat_module_out))
bundle_index_typings = ctx.actions.declare_file("%s.d.ts" % flat_module_out)
declaration_files.append(bundle_index_typings)
if is_legacy_ngc:
metadata_files.append(ctx.actions.declare_file("%s.metadata.json" % flat_module_out))
else:
bundle_index_typings = None
# TODO(alxhub): i18n is only produced by the legacy compiler currently. This should be re-enabled
# when ngtsc can extract messages
if is_legacy_ngc and _is_bazel():
i18n_messages_files = [ctx.actions.declare_file(ctx.label.name + "_ngc_messages.xmb")]
elif is_legacy_ngc:
# write the xmb file to blaze-genfiles since that path appears in the translation console keys
i18n_messages_files = [ctx.new_file(ctx.genfiles_dir, ctx.label.name + "_ngc_messages.xmb")]
else:
i18n_messages_files = []
return struct(
closure_js = closure_js_files,
devmode_js = devmode_js_files,
declarations = declaration_files,
summaries = summary_files,
metadata = metadata_files,
dts_bundles = dts_bundles,
bundle_index_typings = bundle_index_typings,
i18n_messages = i18n_messages_files,
)
# Determines if we need to generate View Engine shims (.ngfactory and .ngsummary files)
def _generate_ve_shims(ctx):
# we are checking the workspace name here, because otherwise this would be a breaking change
# (the shims used to be on by default)
# we can remove this check once angular/components and angular/angular-cli repos no longer depend
# on the presence of shims, or if they explicitly opt-in to their generation via ng_modules' generate_ve_shims attr
return _is_bazel() and _is_view_engine_enabled(ctx) or (
getattr(ctx.attr, "generate_ve_shims", False) == True or ctx.workspace_name != "angular"
)
def _ngc_tsconfig(ctx, files, srcs, **kwargs):
generate_ve_shims = _generate_ve_shims(ctx)
outs = _expected_outs(ctx)
is_legacy_ngc = _is_view_engine_enabled(ctx)
if "devmode_manifest" in kwargs:
expected_outs = outs.devmode_js + outs.declarations + outs.summaries + outs.metadata
else:
expected_outs = outs.closure_js
angular_compiler_options = {
"enableResourceInlining": ctx.attr.inline_resources,
"generateCodeForLibraries": False,
"allowEmptyCodegenFiles": True,
"generateNgFactoryShims": True if generate_ve_shims else False,
"generateNgSummaryShims": True if generate_ve_shims else False,
# Summaries are only enabled if Angular outputs are to be produced.
"enableSummariesForJit": is_legacy_ngc,
"enableIvy": is_ivy_enabled(ctx),
"fullTemplateTypeCheck": ctx.attr.type_check,
# TODO(alxhub/arick): template type-checking for Ivy needs to be tested in g3 before it can
# be enabled here.
"ivyTemplateTypeCheck": False,
# In Google3 we still want to use the symbol factory re-exports in order to
# not break existing apps inside Google. Unlike Bazel, Google3 does not only
# enforce strict dependencies of source files, but also for generated files
# (such as the factory files). Therefore in order to avoid that generated files
# introduce new module dependencies (which aren't explicitly declared), we need
# to enable external symbol re-exports by default when running with Blaze.
"createExternalSymbolFactoryReexports": (not _is_bazel()),
# FIXME: wrong place to de-dupe
"expectedOut": depset([o.path for o in expected_outs]).to_list(),
"_useHostForImportGeneration": (not _is_bazel()),
}
if _should_produce_flat_module_outs(ctx):
angular_compiler_options["flatModuleId"] = ctx.attr.module_name
angular_compiler_options["flatModuleOutFile"] = _flat_module_out_file(ctx)
angular_compiler_options["flatModulePrivateSymbolPrefix"] = "_".join(
[ctx.workspace_name] + ctx.label.package.split("/") + [ctx.label.name, ""],
)
return dict(tsc_wrapped_tsconfig(ctx, files, srcs, **kwargs), **{
"angularCompilerOptions": angular_compiler_options,
})
def _collect_summaries_aspect_impl(target, ctx):
results = depset(target.angular.summaries if hasattr(target, "angular") else [])
# If we are visiting empty-srcs ts_library, this is a re-export
srcs = ctx.rule.attr.srcs if hasattr(ctx.rule.attr, "srcs") else []
# "re-export" rules should expose all the files of their deps
if not srcs and hasattr(ctx.rule.attr, "deps"):
for dep in ctx.rule.attr.deps:
if (hasattr(dep, "angular")):
results = depset(dep.angular.summaries, transitive = [results])
return struct(collect_summaries_aspect_result = results)
_collect_summaries_aspect = aspect(
implementation = _collect_summaries_aspect_impl,
attr_aspects = ["deps"],
)
# Extra options passed to Node when running ngc.
_EXTRA_NODE_OPTIONS_FLAGS = [
# Expose the v8 garbage collection API to JS.
"--node_options=--expose-gc",
# Show ~full stack traces, instead of cutting off after 10 items.
"--node_options=--stack-trace-limit=100",
# Give 4 GB RAM to node to allow bigger google3 modules to compile.
"--node_options=--max-old-space-size=4096",
]
def ngc_compile_action(
ctx,
label,
inputs,
outputs,
messages_out,
tsconfig_file,
node_opts,
locale = None,
i18n_args = [],
dts_bundles_out = None,
compile_mode = "prodmode"):
"""Helper function to create the ngc action.
This is exposed for google3 to wire up i18n replay rules, and is not intended
as part of the public API.
Args:
ctx: skylark context
label: the label of the ng_module being compiled
inputs: passed to the ngc action's inputs
outputs: passed to the ngc action's outputs
messages_out: produced xmb files
tsconfig_file: tsconfig file with settings used for the compilation
node_opts: list of strings, extra nodejs options.
locale: i18n locale, or None
i18n_args: additional command-line arguments to ngc
dts_bundles_out: produced flattened dts file
Returns:
the parameters of the compilation which will be used to replay the ngc action for i18N.
"""
is_legacy_ngc = _is_view_engine_enabled(ctx)
mnemonic = "AngularTemplateCompile"
progress_message = "Compiling Angular templates (%s - %s) %s" % (_compiler_name(ctx), compile_mode, label)
if locale:
mnemonic = "AngularI18NMerging"
supports_workers = "0"
progress_message = ("Recompiling Angular templates (ngc - %s) %s for locale %s" %
(compile_mode, label, locale))
else:
supports_workers = str(int(ctx.attr._supports_workers))
arguments = (list(_EXTRA_NODE_OPTIONS_FLAGS) +
["--node_options=%s" % opt for opt in node_opts])
# One at-sign makes this a params-file, enabling the worker strategy.
# Two at-signs escapes the argument so it's passed through to ngc
# rather than the contents getting expanded.
if supports_workers == "1":
arguments += ["@@" + tsconfig_file.path]
else:
arguments += ["-p", tsconfig_file.path]
arguments += i18n_args
ctx.actions.run(
progress_message = progress_message,
mnemonic = mnemonic,
inputs = inputs,
outputs = outputs,
arguments = arguments,
executable = ctx.executable.compiler,
execution_requirements = {
"supports-workers": supports_workers,
},
)
if is_legacy_ngc and messages_out != None:
# The base path is bin_dir because of the way the ngc
# compiler host is configured. Under Blaze, we need to explicitly
# point to genfiles/ to redirect the output.
# See _expected_outs above, where the output path for the message file
# is conditional on whether we are in Bazel.
message_file_path = messages_out[0].short_path if _is_bazel() else "../genfiles/" + messages_out[0].short_path
ctx.actions.run(
inputs = inputs,
outputs = messages_out,
executable = ctx.executable.ng_xi18n,
arguments = (_EXTRA_NODE_OPTIONS_FLAGS +
[tsconfig_file.path] +
[message_file_path]),
progress_message = "Extracting Angular 2 messages (ng_xi18n)",
mnemonic = "Angular2MessageExtractor",
)
if dts_bundles_out != None:
# combine the inputs and outputs and filter .d.ts and json files
filter_inputs = [f for f in inputs.to_list() + outputs if f.path.endswith(".d.ts") or f.path.endswith(".json")]
if _should_produce_flat_module_outs(ctx):
dts_entry_points = ["%s.d.ts" % _flat_module_out_file(ctx)]
else:
dts_entry_points = [ctx.attr.entry_point.label.name.replace(".ts", ".d.ts")]
if _should_produce_r3_symbols_bundle(ctx):
dts_entry_points.append(_R3_SYMBOLS_DTS_FILE)
ctx.actions.run(
progress_message = "Bundling DTS (%s) %s" % (compile_mode, str(ctx.label)),
mnemonic = "APIExtractor",
executable = ctx.executable.api_extractor,
inputs = filter_inputs,
outputs = dts_bundles_out,
arguments = [
tsconfig_file.path,
",".join(["/".join([ctx.bin_dir.path, ctx.label.package, f]) for f in dts_entry_points]),
",".join([f.path for f in dts_bundles_out]),
],
)
if not locale and not ctx.attr.no_i18n:
return struct(
label = label,
tsconfig = tsconfig_file,
inputs = inputs,
outputs = outputs,
compiler = ctx.executable.compiler,
)
return None
def _filter_ts_inputs(all_inputs):
# The compiler only needs to see TypeScript sources from the npm dependencies,
# but may need to look at package.json and ngsummary.json files as well.
return [
f
for f in all_inputs
if f.path.endswith(".js") or f.path.endswith(".ts") or f.path.endswith(".json")
]
def _compile_action(
ctx,
inputs,
outputs,
dts_bundles_out,
messages_out,
tsconfig_file,
node_opts,
compile_mode):
# Give the Angular compiler all the user-listed assets
file_inputs = list(ctx.files.assets)
if (type(inputs) == type([])):
file_inputs.extend(inputs)
else:
# inputs ought to be a list, but allow depset as well
# so that this can change independently of rules_typescript
# TODO(alexeagle): remove this case after update (July 2019)
file_inputs.extend(inputs.to_list())
if hasattr(ctx.attr, "node_modules"):
file_inputs.extend(_filter_ts_inputs(ctx.files.node_modules))
# If the user supplies a tsconfig.json file, the Angular compiler needs to read it
if hasattr(ctx.attr, "tsconfig") and ctx.file.tsconfig:
file_inputs.append(ctx.file.tsconfig)
if TsConfigInfo in ctx.attr.tsconfig:
file_inputs += ctx.attr.tsconfig[TsConfigInfo].deps
# Also include files from npm fine grained deps as action_inputs.
# These deps are identified by the NpmPackageInfo provider.
for d in ctx.attr.deps:
if NpmPackageInfo in d:
# Note: we can't avoid calling .to_list() on sources
file_inputs.extend(_filter_ts_inputs(d[NpmPackageInfo].sources.to_list()))
# Collect the inputs and summary files from our deps
action_inputs = depset(
file_inputs,
transitive = [
dep.collect_summaries_aspect_result
for dep in ctx.attr.deps
if hasattr(dep, "collect_summaries_aspect_result")
],
)
return ngc_compile_action(ctx, ctx.label, action_inputs, outputs, messages_out, tsconfig_file, node_opts, None, [], dts_bundles_out, compile_mode)
def _prodmode_compile_action(ctx, inputs, outputs, tsconfig_file, node_opts):
outs = _expected_outs(ctx)
return _compile_action(ctx, inputs, outputs + outs.closure_js, None, outs.i18n_messages, tsconfig_file, node_opts, "prodmode")
def _devmode_compile_action(ctx, inputs, outputs, tsconfig_file, node_opts):
outs = _expected_outs(ctx)
compile_action_outputs = outputs + outs.devmode_js + outs.declarations + outs.summaries + outs.metadata
_compile_action(ctx, inputs, compile_action_outputs, outs.dts_bundles, None, tsconfig_file, node_opts, "devmode")
def _ts_expected_outs(ctx, label, srcs_files = []):
# rules_typescript expects a function with two or more arguments, but our
# implementation doesn't use the label(and **kwargs).
_ignored = [label, srcs_files]
return _expected_outs(ctx)
def ng_module_impl(ctx, ts_compile_actions):
"""Implementation function for the ng_module rule.
This is exposed so that google3 can have its own entry point that re-uses this
and is not meant as a public API.
Args:
ctx: the skylark rule context
ts_compile_actions: generates all the actions to run an ngc compilation
Returns:
the result of the ng_module rule as a dict, suitable for
conversion by ts_providers_dict_to_struct
"""
is_legacy_ngc = _is_view_engine_enabled(ctx)
providers = ts_compile_actions(
ctx,
is_library = True,
deps = ctx.attr.deps,
compile_action = _prodmode_compile_action,
devmode_compile_action = _devmode_compile_action,
tsc_wrapped_tsconfig = _ngc_tsconfig,
outputs = _ts_expected_outs,
)
outs = _expected_outs(ctx)
if is_legacy_ngc:
providers["angular"] = {
"summaries": outs.summaries,
"metadata": outs.metadata,
}
providers["ngc_messages"] = outs.i18n_messages
if is_legacy_ngc and _should_produce_flat_module_outs(ctx):
if len(outs.metadata) > 1:
fail("expecting exactly one metadata output for " + str(ctx.label))
providers["angular"]["flat_module_metadata"] = struct(
module_name = ctx.attr.module_name,
metadata_file = outs.metadata[0],
typings_file = outs.bundle_index_typings,
flat_module_out_file = _flat_module_out_file(ctx),
)
if outs.dts_bundles != None:
providers["dts_bundles"] = outs.dts_bundles
return providers
def _ng_module_impl(ctx):
ts_providers = ng_module_impl(ctx, compile_ts)
# Add in new JS providers
# See design doc https://docs.google.com/document/d/1ggkY5RqUkVL4aQLYm7esRW978LgX3GUCnQirrk5E1C0/edit#
# and issue https://github.com/bazelbuild/rules_nodejs/issues/57 for more details.
ts_providers["providers"].extend([
js_named_module_info(
sources = ts_providers["typescript"]["es5_sources"],
deps = ctx.attr.deps,
),
js_ecma_script_module_info(
sources = ts_providers["typescript"]["es6_sources"],
deps = ctx.attr.deps,
),
# TODO: Add remaining shared JS providers from design doc
# (JSModuleInfo) and remove legacy "typescript" provider
# once it is no longer needed.
])
return ts_providers_dict_to_struct(ts_providers)
local_deps_aspects = [node_modules_aspect, _collect_summaries_aspect]
# Workaround skydoc bug which assumes DEPS_ASPECTS is a str type
[local_deps_aspects.append(a) for a in DEPS_ASPECTS]
NG_MODULE_ATTRIBUTES = {
"srcs": attr.label_list(allow_files = [".ts"]),
# Note: DEPS_ASPECTS is already a list, we add the cast to workaround
# https://github.com/bazelbuild/skydoc/issues/21
"deps": attr.label_list(
doc = "Targets that are imported by this target",
aspects = local_deps_aspects,
),
"assets": attr.label_list(
doc = ".html and .css files needed by the Angular compiler",
allow_files = [
".css",
# TODO(alexeagle): change this to ".ng.html" when usages updated
".html",
],
),
"factories": attr.label_list(
allow_files = [".ts", ".html"],
mandatory = False,
),
"filter_summaries": attr.bool(default = False),
"type_check": attr.bool(default = True),
"inline_resources": attr.bool(default = True),
"no_i18n": attr.bool(default = False),
"compiler": attr.label(
doc = """Sets a different ngc compiler binary to use for this library.
The default ngc compiler depends on the `@npm//@angular/bazel`
target which is setup for projects that use bazel managed npm deps that
fetch the @angular/bazel npm package. It is recommended that you use
the workspace name `@npm` for bazel managed deps so the default
compiler works out of the box. Otherwise, you'll have to override
the compiler attribute manually.
""",
default = Label(DEFAULT_NG_COMPILER),
executable = True,
cfg = "host",
),
"ng_xi18n": attr.label(
default = Label(DEFAULT_NG_XI18N),
executable = True,
cfg = "host",
),
"_supports_workers": attr.bool(default = True),
}
NG_MODULE_RULE_ATTRS = dict(dict(COMMON_ATTRIBUTES, **NG_MODULE_ATTRIBUTES), **{
"tsconfig": attr.label(allow_single_file = True),
"node_modules": attr.label(
doc = """The npm packages which should be available during the compile.
The default value of `@npm//typescript:typescript__typings` is
for projects that use bazel managed npm deps. It is recommended
that you use the workspace name `@npm` for bazel managed deps so the
default value works out of the box. Otherwise, you'll have to
override the node_modules attribute manually. This default is in place
since code compiled by ng_module will always depend on at least the
typescript default libs which are provided by
`@npm//typescript:typescript__typings`.
This attribute is DEPRECATED. As of version 0.18.0 the recommended
approach to npm dependencies is to use fine grained npm dependencies
which are setup with the `yarn_install` or `npm_install` rules.
For example, in targets that used a `//:node_modules` filegroup,
```
ng_module(
name = "my_lib",
...
node_modules = "//:node_modules",
)
```
which specifies all files within the `//:node_modules` filegroup
to be inputs to the `my_lib`. Using fine grained npm dependencies,
`my_lib` is defined with only the npm dependencies that are
needed:
```
ng_module(
name = "my_lib",
...
deps = [
"@npm//@types/foo",
"@npm//@types/bar",
"@npm//foo",
"@npm//bar",
...
],
)
```
In this case, only the listed npm packages and their
transitive deps are includes as inputs to the `my_lib` target
which reduces the time required to setup the runfiles for this
target (see https://github.com/bazelbuild/bazel/issues/5153).
The default typescript libs are also available via the node_modules
default in this case.
The @npm external repository and the fine grained npm package
targets are setup using the `yarn_install` or `npm_install` rule
in your WORKSPACE file:
yarn_install(
name = "npm",
package_json = "//:package.json",
yarn_lock = "//:yarn.lock",
)
""",
default = Label("@npm//typescript:typescript__typings"),
),
"entry_point": attr.label(allow_single_file = True),
# Default is %{name}_public_index
# The suffix points to the generated "bundle index" files that users import from
# The default is intended to avoid collisions with the users input files.
# Later packaging rules will point to these generated files as the entry point
# into the package.
# See the flatModuleOutFile documentation in
# https://github.com/angular/angular/blob/master/packages/compiler-cli/src/transformers/api.ts
"flat_module_out_file": attr.string(),
"bundle_dts": attr.bool(default = False),
"api_extractor": attr.label(
default = Label(DEFAULT_API_EXTRACTOR),
executable = True,
cfg = "host",
),
# Should the rule generate ngfactory and ngsummary shim files?
"generate_ve_shims": attr.bool(default = False),
})
ng_module = rule(
implementation = _ng_module_impl,
attrs = NG_MODULE_RULE_ATTRS,
outputs = COMMON_OUTPUTS,
)
"""
Run the Angular AOT template compiler.
This rule extends the [ts_library] rule.
[ts_library]: http://tsetse.info/api/build_defs.html#ts_library
"""
def ng_module_macro(tsconfig = None, **kwargs):
"""Wraps `ng_module` to set the default for the `tsconfig` attribute.
This must be a macro so that the string is converted to a label in the context of the
workspace that declares the `ng_module` target, rather than the workspace that defines
`ng_module`, or the workspace where the build is taking place.
This macro is re-exported as `ng_module` in the public API.
Args:
tsconfig: the label pointing to a tsconfig.json file
**kwargs: remaining args to pass to the ng_module rule
"""
if not tsconfig:
tsconfig = "//:tsconfig.json"
ng_module(tsconfig = tsconfig, **kwargs)
| 38.778338
| 161
| 0.66317
|
load(
":external.bzl",
"COMMON_ATTRIBUTES",
"COMMON_OUTPUTS",
"DEFAULT_API_EXTRACTOR",
"DEFAULT_NG_COMPILER",
"DEFAULT_NG_XI18N",
"DEPS_ASPECTS",
"NpmPackageInfo",
"TsConfigInfo",
"compile_ts",
"js_ecma_script_module_info",
"js_named_module_info",
"node_modules_aspect",
"ts_providers_dict_to_struct",
"tsc_wrapped_tsconfig",
)
_FLAT_DTS_FILE_SUFFIX = ".bundle.d.ts"
_R3_SYMBOLS_DTS_FILE = "src/r3_symbols.d.ts"
def is_ivy_enabled(ctx):
if ctx.var.get("compile", None) == "aot":
return True
if ctx.var.get("angular_ivy_enabled", None) == "True":
return True
if ctx.var.get("GROK_ELLIPSIS_BUILD", None) != None:
return True
return False
def _compiler_name(ctx):
return "Ivy" if is_ivy_enabled(ctx) else "ViewEngine"
def _is_view_engine_enabled(ctx):
return not is_ivy_enabled(ctx)
def _basename_of(ctx, file):
ext_len = len(".ts")
if file.short_path.endswith(".ng.html"):
ext_len = len(".ng.html")
elif file.short_path.endswith(".html"):
ext_len = len(".html")
return file.short_path[len(ctx.label.package) + 1:-ext_len]
def _is_bazel():
return not hasattr(native, "genmpm")
def _flat_module_out_file(ctx):
if getattr(ctx.attr, "flat_module_out_file", False):
return ctx.attr.flat_module_out_file
return "%s_public_index" % ctx.label.name
def _should_produce_dts_bundle(ctx):
return _is_view_engine_enabled(ctx) and getattr(ctx.attr, "bundle_dts", False)
def _should_produce_r3_symbols_bundle(ctx):
return _is_view_engine_enabled(ctx) and ctx.attr.module_name == "@angular/core"
def _should_produce_flat_module_outs(ctx):
return _is_bazel() and ctx.attr.module_name
# Calculate the expected output of the template compiler for every source in
# in the library. Most of these will be produced as empty files but it is
# unknown, without parsing, which will be empty.
def _expected_outs(ctx):
is_legacy_ngc = _is_view_engine_enabled(ctx)
devmode_js_files = []
closure_js_files = []
declaration_files = []
summary_files = []
metadata_files = []
factory_basename_set = depset([_basename_of(ctx, src) for src in ctx.files.factories])
for src in ctx.files.srcs + ctx.files.assets:
package_prefix = ctx.label.package + "/" if ctx.label.package else ""
# Strip external repository name from path if src is from external repository
# If src is from external repository, it's short_path will be ../<external_repo_name>/...
short_path = src.short_path if src.short_path[0:2] != ".." else "/".join(src.short_path.split("/")[2:])
if short_path.endswith(".ts") and not short_path.endswith(".d.ts"):
basename = short_path[len(package_prefix):-len(".ts")]
if (len(factory_basename_set.to_list()) == 0 or basename in factory_basename_set.to_list()):
if _generate_ve_shims(ctx):
devmode_js = [
".ngfactory.js",
".ngsummary.js",
".js",
]
else:
devmode_js = [".js"]
if is_legacy_ngc:
summaries = [".ngsummary.json"]
metadata = [".metadata.json"]
else:
summaries = []
metadata = []
else:
devmode_js = [".js"]
if not _is_bazel():
devmode_js += [".ngfactory.js"]
summaries = []
metadata = []
elif is_legacy_ngc and short_path.endswith(".css"):
basename = short_path[len(package_prefix):-len(".css")]
devmode_js = [
".css.shim.ngstyle.js",
".css.ngstyle.js",
]
summaries = []
metadata = []
else:
continue
filter_summaries = ctx.attr.filter_summaries
closure_js = [f.replace(".js", ".mjs") for f in devmode_js if not filter_summaries or not f.endswith(".ngsummary.js")]
declarations = [f.replace(".js", ".d.ts") for f in devmode_js]
devmode_js_files += [ctx.actions.declare_file(basename + ext) for ext in devmode_js]
closure_js_files += [ctx.actions.declare_file(basename + ext) for ext in closure_js]
declaration_files += [ctx.actions.declare_file(basename + ext) for ext in declarations]
summary_files += [ctx.actions.declare_file(basename + ext) for ext in summaries]
if not _is_bazel():
metadata_files += [ctx.actions.declare_file(basename + ext) for ext in metadata]
dts_bundles = None
if _should_produce_dts_bundle(ctx):
# We need to add a suffix to bundle as it might collide with the flat module dts.
# The flat module dts out contains several other exports
# https://github.com/angular/angular/blob/84406e4d6d93b28b23efbb1701bc5ae1084da67b/packages/compiler-cli/src/metadata/index_writer.ts#L18
# the file name will be like 'core.bundle.d.ts'
dts_bundles = [ctx.actions.declare_file(ctx.label.name + _FLAT_DTS_FILE_SUFFIX)]
if _should_produce_r3_symbols_bundle(ctx):
dts_bundles.append(ctx.actions.declare_file(_R3_SYMBOLS_DTS_FILE.replace(".d.ts", _FLAT_DTS_FILE_SUFFIX)))
# We do this just when producing a flat module index for a publishable ng_module
if _should_produce_flat_module_outs(ctx):
flat_module_out = _flat_module_out_file(ctx)
devmode_js_files.append(ctx.actions.declare_file("%s.js" % flat_module_out))
closure_js_files.append(ctx.actions.declare_file("%s.mjs" % flat_module_out))
bundle_index_typings = ctx.actions.declare_file("%s.d.ts" % flat_module_out)
declaration_files.append(bundle_index_typings)
if is_legacy_ngc:
metadata_files.append(ctx.actions.declare_file("%s.metadata.json" % flat_module_out))
else:
bundle_index_typings = None
# TODO(alxhub): i18n is only produced by the legacy compiler currently. This should be re-enabled
# when ngtsc can extract messages
if is_legacy_ngc and _is_bazel():
i18n_messages_files = [ctx.actions.declare_file(ctx.label.name + "_ngc_messages.xmb")]
elif is_legacy_ngc:
# write the xmb file to blaze-genfiles since that path appears in the translation console keys
i18n_messages_files = [ctx.new_file(ctx.genfiles_dir, ctx.label.name + "_ngc_messages.xmb")]
else:
i18n_messages_files = []
return struct(
closure_js = closure_js_files,
devmode_js = devmode_js_files,
declarations = declaration_files,
summaries = summary_files,
metadata = metadata_files,
dts_bundles = dts_bundles,
bundle_index_typings = bundle_index_typings,
i18n_messages = i18n_messages_files,
)
# Determines if we need to generate View Engine shims (.ngfactory and .ngsummary files)
def _generate_ve_shims(ctx):
# we are checking the workspace name here, because otherwise this would be a breaking change
# (the shims used to be on by default)
# we can remove this check once angular/components and angular/angular-cli repos no longer depend
# on the presence of shims, or if they explicitly opt-in to their generation via ng_modules' generate_ve_shims attr
return _is_bazel() and _is_view_engine_enabled(ctx) or (
getattr(ctx.attr, "generate_ve_shims", False) == True or ctx.workspace_name != "angular"
)
def _ngc_tsconfig(ctx, files, srcs, **kwargs):
generate_ve_shims = _generate_ve_shims(ctx)
outs = _expected_outs(ctx)
is_legacy_ngc = _is_view_engine_enabled(ctx)
if "devmode_manifest" in kwargs:
expected_outs = outs.devmode_js + outs.declarations + outs.summaries + outs.metadata
else:
expected_outs = outs.closure_js
angular_compiler_options = {
"enableResourceInlining": ctx.attr.inline_resources,
"generateCodeForLibraries": False,
"allowEmptyCodegenFiles": True,
"generateNgFactoryShims": True if generate_ve_shims else False,
"generateNgSummaryShims": True if generate_ve_shims else False,
"enableSummariesForJit": is_legacy_ngc,
"enableIvy": is_ivy_enabled(ctx),
"fullTemplateTypeCheck": ctx.attr.type_check,
"ivyTemplateTypeCheck": False,
# to enable external symbol re-exports by default when running with Blaze.
"createExternalSymbolFactoryReexports": (not _is_bazel()),
# FIXME: wrong place to de-dupe
"expectedOut": depset([o.path for o in expected_outs]).to_list(),
"_useHostForImportGeneration": (not _is_bazel()),
}
if _should_produce_flat_module_outs(ctx):
angular_compiler_options["flatModuleId"] = ctx.attr.module_name
angular_compiler_options["flatModuleOutFile"] = _flat_module_out_file(ctx)
angular_compiler_options["flatModulePrivateSymbolPrefix"] = "_".join(
[ctx.workspace_name] + ctx.label.package.split("/") + [ctx.label.name, ""],
)
return dict(tsc_wrapped_tsconfig(ctx, files, srcs, **kwargs), **{
"angularCompilerOptions": angular_compiler_options,
})
def _collect_summaries_aspect_impl(target, ctx):
results = depset(target.angular.summaries if hasattr(target, "angular") else [])
# If we are visiting empty-srcs ts_library, this is a re-export
srcs = ctx.rule.attr.srcs if hasattr(ctx.rule.attr, "srcs") else []
# "re-export" rules should expose all the files of their deps
if not srcs and hasattr(ctx.rule.attr, "deps"):
for dep in ctx.rule.attr.deps:
if (hasattr(dep, "angular")):
results = depset(dep.angular.summaries, transitive = [results])
return struct(collect_summaries_aspect_result = results)
_collect_summaries_aspect = aspect(
implementation = _collect_summaries_aspect_impl,
attr_aspects = ["deps"],
)
# Extra options passed to Node when running ngc.
_EXTRA_NODE_OPTIONS_FLAGS = [
# Expose the v8 garbage collection API to JS.
"--node_options=--expose-gc",
# Show ~full stack traces, instead of cutting off after 10 items.
"--node_options=--stack-trace-limit=100",
# Give 4 GB RAM to node to allow bigger google3 modules to compile.
"--node_options=--max-old-space-size=4096",
]
def ngc_compile_action(
ctx,
label,
inputs,
outputs,
messages_out,
tsconfig_file,
node_opts,
locale = None,
i18n_args = [],
dts_bundles_out = None,
compile_mode = "prodmode"):
is_legacy_ngc = _is_view_engine_enabled(ctx)
mnemonic = "AngularTemplateCompile"
progress_message = "Compiling Angular templates (%s - %s) %s" % (_compiler_name(ctx), compile_mode, label)
if locale:
mnemonic = "AngularI18NMerging"
supports_workers = "0"
progress_message = ("Recompiling Angular templates (ngc - %s) %s for locale %s" %
(compile_mode, label, locale))
else:
supports_workers = str(int(ctx.attr._supports_workers))
arguments = (list(_EXTRA_NODE_OPTIONS_FLAGS) +
["--node_options=%s" % opt for opt in node_opts])
# One at-sign makes this a params-file, enabling the worker strategy.
# Two at-signs escapes the argument so it's passed through to ngc
if supports_workers == "1":
arguments += ["@@" + tsconfig_file.path]
else:
arguments += ["-p", tsconfig_file.path]
arguments += i18n_args
ctx.actions.run(
progress_message = progress_message,
mnemonic = mnemonic,
inputs = inputs,
outputs = outputs,
arguments = arguments,
executable = ctx.executable.compiler,
execution_requirements = {
"supports-workers": supports_workers,
},
)
if is_legacy_ngc and messages_out != None:
message_file_path = messages_out[0].short_path if _is_bazel() else "../genfiles/" + messages_out[0].short_path
ctx.actions.run(
inputs = inputs,
outputs = messages_out,
executable = ctx.executable.ng_xi18n,
arguments = (_EXTRA_NODE_OPTIONS_FLAGS +
[tsconfig_file.path] +
[message_file_path]),
progress_message = "Extracting Angular 2 messages (ng_xi18n)",
mnemonic = "Angular2MessageExtractor",
)
if dts_bundles_out != None:
filter_inputs = [f for f in inputs.to_list() + outputs if f.path.endswith(".d.ts") or f.path.endswith(".json")]
if _should_produce_flat_module_outs(ctx):
dts_entry_points = ["%s.d.ts" % _flat_module_out_file(ctx)]
else:
dts_entry_points = [ctx.attr.entry_point.label.name.replace(".ts", ".d.ts")]
if _should_produce_r3_symbols_bundle(ctx):
dts_entry_points.append(_R3_SYMBOLS_DTS_FILE)
ctx.actions.run(
progress_message = "Bundling DTS (%s) %s" % (compile_mode, str(ctx.label)),
mnemonic = "APIExtractor",
executable = ctx.executable.api_extractor,
inputs = filter_inputs,
outputs = dts_bundles_out,
arguments = [
tsconfig_file.path,
",".join(["/".join([ctx.bin_dir.path, ctx.label.package, f]) for f in dts_entry_points]),
",".join([f.path for f in dts_bundles_out]),
],
)
if not locale and not ctx.attr.no_i18n:
return struct(
label = label,
tsconfig = tsconfig_file,
inputs = inputs,
outputs = outputs,
compiler = ctx.executable.compiler,
)
return None
def _filter_ts_inputs(all_inputs):
return [
f
for f in all_inputs
if f.path.endswith(".js") or f.path.endswith(".ts") or f.path.endswith(".json")
]
def _compile_action(
ctx,
inputs,
outputs,
dts_bundles_out,
messages_out,
tsconfig_file,
node_opts,
compile_mode):
file_inputs = list(ctx.files.assets)
if (type(inputs) == type([])):
file_inputs.extend(inputs)
else:
file_inputs.extend(inputs.to_list())
if hasattr(ctx.attr, "node_modules"):
file_inputs.extend(_filter_ts_inputs(ctx.files.node_modules))
if hasattr(ctx.attr, "tsconfig") and ctx.file.tsconfig:
file_inputs.append(ctx.file.tsconfig)
if TsConfigInfo in ctx.attr.tsconfig:
file_inputs += ctx.attr.tsconfig[TsConfigInfo].deps
for d in ctx.attr.deps:
if NpmPackageInfo in d:
file_inputs.extend(_filter_ts_inputs(d[NpmPackageInfo].sources.to_list()))
# Collect the inputs and summary files from our deps
action_inputs = depset(
file_inputs,
transitive = [
dep.collect_summaries_aspect_result
for dep in ctx.attr.deps
if hasattr(dep, "collect_summaries_aspect_result")
],
)
return ngc_compile_action(ctx, ctx.label, action_inputs, outputs, messages_out, tsconfig_file, node_opts, None, [], dts_bundles_out, compile_mode)
def _prodmode_compile_action(ctx, inputs, outputs, tsconfig_file, node_opts):
outs = _expected_outs(ctx)
return _compile_action(ctx, inputs, outputs + outs.closure_js, None, outs.i18n_messages, tsconfig_file, node_opts, "prodmode")
def _devmode_compile_action(ctx, inputs, outputs, tsconfig_file, node_opts):
outs = _expected_outs(ctx)
compile_action_outputs = outputs + outs.devmode_js + outs.declarations + outs.summaries + outs.metadata
_compile_action(ctx, inputs, compile_action_outputs, outs.dts_bundles, None, tsconfig_file, node_opts, "devmode")
def _ts_expected_outs(ctx, label, srcs_files = []):
# rules_typescript expects a function with two or more arguments, but our
# implementation doesn't use the label(and **kwargs).
_ignored = [label, srcs_files]
return _expected_outs(ctx)
def ng_module_impl(ctx, ts_compile_actions):
is_legacy_ngc = _is_view_engine_enabled(ctx)
providers = ts_compile_actions(
ctx,
is_library = True,
deps = ctx.attr.deps,
compile_action = _prodmode_compile_action,
devmode_compile_action = _devmode_compile_action,
tsc_wrapped_tsconfig = _ngc_tsconfig,
outputs = _ts_expected_outs,
)
outs = _expected_outs(ctx)
if is_legacy_ngc:
providers["angular"] = {
"summaries": outs.summaries,
"metadata": outs.metadata,
}
providers["ngc_messages"] = outs.i18n_messages
if is_legacy_ngc and _should_produce_flat_module_outs(ctx):
if len(outs.metadata) > 1:
fail("expecting exactly one metadata output for " + str(ctx.label))
providers["angular"]["flat_module_metadata"] = struct(
module_name = ctx.attr.module_name,
metadata_file = outs.metadata[0],
typings_file = outs.bundle_index_typings,
flat_module_out_file = _flat_module_out_file(ctx),
)
if outs.dts_bundles != None:
providers["dts_bundles"] = outs.dts_bundles
return providers
def _ng_module_impl(ctx):
ts_providers = ng_module_impl(ctx, compile_ts)
ts_providers["providers"].extend([
js_named_module_info(
sources = ts_providers["typescript"]["es5_sources"],
deps = ctx.attr.deps,
),
js_ecma_script_module_info(
sources = ts_providers["typescript"]["es6_sources"],
deps = ctx.attr.deps,
),
])
return ts_providers_dict_to_struct(ts_providers)
local_deps_aspects = [node_modules_aspect, _collect_summaries_aspect]
[local_deps_aspects.append(a) for a in DEPS_ASPECTS]
NG_MODULE_ATTRIBUTES = {
"srcs": attr.label_list(allow_files = [".ts"]),
"deps": attr.label_list(
doc = "Targets that are imported by this target",
aspects = local_deps_aspects,
),
"assets": attr.label_list(
doc = ".html and .css files needed by the Angular compiler",
allow_files = [
".css",
".html",
],
),
"factories": attr.label_list(
allow_files = [".ts", ".html"],
mandatory = False,
),
"filter_summaries": attr.bool(default = False),
"type_check": attr.bool(default = True),
"inline_resources": attr.bool(default = True),
"no_i18n": attr.bool(default = False),
"compiler": attr.label(
doc = """Sets a different ngc compiler binary to use for this library.
The default ngc compiler depends on the `@npm//@angular/bazel`
target which is setup for projects that use bazel managed npm deps that
fetch the @angular/bazel npm package. It is recommended that you use
the workspace name `@npm` for bazel managed deps so the default
compiler works out of the box. Otherwise, you'll have to override
the compiler attribute manually.
""",
default = Label(DEFAULT_NG_COMPILER),
executable = True,
cfg = "host",
),
"ng_xi18n": attr.label(
default = Label(DEFAULT_NG_XI18N),
executable = True,
cfg = "host",
),
"_supports_workers": attr.bool(default = True),
}
NG_MODULE_RULE_ATTRS = dict(dict(COMMON_ATTRIBUTES, **NG_MODULE_ATTRIBUTES), **{
"tsconfig": attr.label(allow_single_file = True),
"node_modules": attr.label(
doc = """The npm packages which should be available during the compile.
The default value of `@npm//typescript:typescript__typings` is
for projects that use bazel managed npm deps. It is recommended
that you use the workspace name `@npm` for bazel managed deps so the
default value works out of the box. Otherwise, you'll have to
override the node_modules attribute manually. This default is in place
since code compiled by ng_module will always depend on at least the
typescript default libs which are provided by
`@npm//typescript:typescript__typings`.
This attribute is DEPRECATED. As of version 0.18.0 the recommended
approach to npm dependencies is to use fine grained npm dependencies
which are setup with the `yarn_install` or `npm_install` rules.
For example, in targets that used a `//:node_modules` filegroup,
```
ng_module(
name = "my_lib",
...
node_modules = "//:node_modules",
)
```
which specifies all files within the `//:node_modules` filegroup
to be inputs to the `my_lib`. Using fine grained npm dependencies,
`my_lib` is defined with only the npm dependencies that are
needed:
```
ng_module(
name = "my_lib",
...
deps = [
"@npm//@types/foo",
"@npm//@types/bar",
"@npm//foo",
"@npm//bar",
...
],
)
```
In this case, only the listed npm packages and their
transitive deps are includes as inputs to the `my_lib` target
which reduces the time required to setup the runfiles for this
target (see https://github.com/bazelbuild/bazel/issues/5153).
The default typescript libs are also available via the node_modules
default in this case.
The @npm external repository and the fine grained npm package
targets are setup using the `yarn_install` or `npm_install` rule
in your WORKSPACE file:
yarn_install(
name = "npm",
package_json = "//:package.json",
yarn_lock = "//:yarn.lock",
)
""",
default = Label("@npm//typescript:typescript__typings"),
),
"entry_point": attr.label(allow_single_file = True),
"flat_module_out_file": attr.string(),
"bundle_dts": attr.bool(default = False),
"api_extractor": attr.label(
default = Label(DEFAULT_API_EXTRACTOR),
executable = True,
cfg = "host",
),
"generate_ve_shims": attr.bool(default = False),
})
ng_module = rule(
implementation = _ng_module_impl,
attrs = NG_MODULE_RULE_ATTRS,
outputs = COMMON_OUTPUTS,
)
def ng_module_macro(tsconfig = None, **kwargs):
if not tsconfig:
tsconfig = "//:tsconfig.json"
ng_module(tsconfig = tsconfig, **kwargs)
| true
| true
|
1c42e27db63ee9fece775495e0835c19ee035ba6
| 684
|
py
|
Python
|
dashboard/migrations/0026_auto_20200903_1538.py
|
HERA-Team/heranow
|
1bc827459a7a92f600cefbd0c8a08f629a211cda
|
[
"BSD-3-Clause"
] | null | null | null |
dashboard/migrations/0026_auto_20200903_1538.py
|
HERA-Team/heranow
|
1bc827459a7a92f600cefbd0c8a08f629a211cda
|
[
"BSD-3-Clause"
] | 6
|
2020-09-10T05:33:17.000Z
|
2021-03-16T20:36:47.000Z
|
dashboard/migrations/0026_auto_20200903_1538.py
|
HERA-Team/heranow
|
1bc827459a7a92f600cefbd0c8a08f629a211cda
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 3.0.3 on 2020-09-03 15:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("dashboard", "0025_auto_20200902_2049"),
]
operations = [
migrations.AlterModelOptions(
name="snaptoant", options={"ordering": ["node", "snap"]},
),
migrations.AlterField(
model_name="snaptoant",
name="node",
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name="snaptoant",
name="snap",
field=models.IntegerField(blank=True, null=True),
),
]
| 25.333333
| 69
| 0.573099
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("dashboard", "0025_auto_20200902_2049"),
]
operations = [
migrations.AlterModelOptions(
name="snaptoant", options={"ordering": ["node", "snap"]},
),
migrations.AlterField(
model_name="snaptoant",
name="node",
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name="snaptoant",
name="snap",
field=models.IntegerField(blank=True, null=True),
),
]
| true
| true
|
1c42e35407c661a4aa593a0690413cb4d041d6eb
| 1,639
|
py
|
Python
|
tetrad_cms/config/local.py
|
UsernameForGerman/tetraD-NK
|
e00b406ac7b2ce63b92698c887fb53bf53344454
|
[
"Apache-2.0"
] | null | null | null |
tetrad_cms/config/local.py
|
UsernameForGerman/tetraD-NK
|
e00b406ac7b2ce63b92698c887fb53bf53344454
|
[
"Apache-2.0"
] | null | null | null |
tetrad_cms/config/local.py
|
UsernameForGerman/tetraD-NK
|
e00b406ac7b2ce63b92698c887fb53bf53344454
|
[
"Apache-2.0"
] | null | null | null |
from .base import *
# GENERAL
# ------------------------------------------------------------------------------
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', ' ').split(' ')
# REST FRAMEWORK
# ------------------------------------------------------------------------------
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.AllowAny', ),
'DEFAULT_AUTHENTICATION_CLASSES': ('rest_framework.authentication.TokenAuthentication', ),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser',
'rest_framework.parsers.JSONParser',
)
}
# CORS
# ------------------------------------------------------------------------------
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_ALLOW_ALL = True
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('POSTGRES_DB'),
'USER': os.environ.get('POSTGRES_USER'),
'PASSWORD': os.environ.get('POSTGRES_PASSWORD'),
'HOST': os.environ.get('POSTGRES_HOST'),
'PORT': os.environ.get('POSTGRES_PORT'),
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.dummy',
# }
# }
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(DATA_DIR, 'media')
STATIC_ROOT = os.path.join(DATA_DIR, 'static')
# STATICFILES_DIRS = (
# os.path.join(BASE_DIR, 'static'),
# )
SITE_ID = 1
| 28.754386
| 94
| 0.568029
|
from .base import *
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', ' ').split(' ')
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.AllowAny', ),
'DEFAULT_AUTHENTICATION_CLASSES': ('rest_framework.authentication.TokenAuthentication', ),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser',
'rest_framework.parsers.JSONParser',
)
}
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_ALLOW_ALL = True
S = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('POSTGRES_DB'),
'USER': os.environ.get('POSTGRES_USER'),
'PASSWORD': os.environ.get('POSTGRES_PASSWORD'),
'HOST': os.environ.get('POSTGRES_HOST'),
'PORT': os.environ.get('POSTGRES_PORT'),
}
}
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(DATA_DIR, 'media')
STATIC_ROOT = os.path.join(DATA_DIR, 'static')
SITE_ID = 1
| true
| true
|
1c42e373479814ddbda206be88d0715f6ab20dc6
| 4,817
|
py
|
Python
|
scripts/github-actions/filter_sarif.py
|
aerkiaga/avogadrolibs
|
f0a64061f521dce156e67e07118db546da6b9f1b
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/github-actions/filter_sarif.py
|
aerkiaga/avogadrolibs
|
f0a64061f521dce156e67e07118db546da6b9f1b
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/github-actions/filter_sarif.py
|
aerkiaga/avogadrolibs
|
f0a64061f521dce156e67e07118db546da6b9f1b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# From https://github.com/zbazztian/filter-sarif/blob/master/filter_sarif.py
# Some modifications by Geoffrey Hutchison
import argparse
import json
import re
from globber import match
def match_path_and_rule(path, rule, patterns):
result = True
for sign, file_pattern, rule_pattern in patterns:
if match(rule_pattern, rule) and match(file_pattern, path):
result = sign
return result
def parse_pattern(line):
sep_char = ":"
esc_char = "\\"
file_pattern = ""
rule_pattern = ""
seen_separator = False
sign = True
# inclusion or exclusion pattern?
uline = line
if line:
if line[0] == "-":
sign = False
uline = line[1:]
elif line[0] == "+":
uline = line[1:]
i = 0
while i < len(uline):
char = uline[i]
i = i + 1
if char == sep_char:
if seen_separator:
raise Exception(
'Invalid pattern: "' + line + '" Contains more than one separator!'
)
seen_separator = True
continue
if char == esc_char:
next_char = uline[i] if (i < len(uline)) else None
if next_char in ["+", "-", esc_char, sep_char]:
i = i + 1
char = next_char
if seen_separator:
rule_pattern = rule_pattern + char
else:
file_pattern = file_pattern + char
if not rule_pattern:
rule_pattern = "**"
return sign, file_pattern, rule_pattern
def filter_sarif(args):
if args.split_lines:
tmp = []
for pattern in args.patterns:
tmp = tmp + re.split("\r?\n", pattern)
args.patterns = tmp
args.patterns = [parse_pattern(pattern) for pattern in args.patterns if pattern]
print("Given patterns:")
for sign, file_pattern, rule_pattern in args.patterns:
sign_text = "positive" if sign else "negative"
print(f"files: {file_pattern} rules: {rule_pattern} ({sign_text})")
with open(args.input, "r", encoding="UTF-8") as file:
sarif = json.load(file)
for run in sarif.get("runs", []):
if run.get("results", []):
new_results = []
for result in run["results"]:
if result.get("locations", []):
new_locations = []
for location in result["locations"]:
# TODO: The uri field is optional. We might have to fetch the
# actual uri from "artifacts" via "index"
# (https://github.com/microsoft/sarif-tutorials/blob/main/docs/2-Basics.md)
uri = (
location.get("physicalLocation", {})
.get("artifactLocation", {})
.get("uri", None)
)
# TODO: The ruleId field is optional and potentially ambiguous.
# We might have to fetch the actual ruleId from the rule metadata
# via the ruleIndex field.
# (https://github.com/microsoft/sarif-tutorials/blob/main/docs/2-Basics.md)
rule_id = result["ruleId"]
if uri is None or match_path_and_rule(
uri, rule_id, args.patterns
):
new_locations.append(location)
result["locations"] = new_locations
if new_locations:
new_results.append(result)
else:
# locations array doesn't exist or is empty, so we can't match on anything
# therefore, we include the result in the output
new_results.append(result)
run["results"] = new_results
with open(args.output, "w", encoding="UTF-8") as file:
json.dump(sarif, file, indent=args.indent)
def main():
parser = argparse.ArgumentParser(prog="filter-sarif")
parser.add_argument("--input", help="Input SARIF file", required=True)
parser.add_argument("--output", help="Output SARIF file", required=True)
parser.add_argument(
"--split-lines",
default=False,
action="store_true",
help="Split given patterns on newlines.",
)
parser.add_argument(
"--indent", default=None, type=int, help="Indentation level for JSON output."
)
parser.add_argument("patterns", help="Inclusion and exclusion patterns.", nargs="+")
def print_usage():
print(parser.format_usage())
args = parser.parse_args()
filter_sarif(args)
if __name__ == "__main__":
main()
| 33.451389
| 99
| 0.542246
|
import argparse
import json
import re
from globber import match
def match_path_and_rule(path, rule, patterns):
result = True
for sign, file_pattern, rule_pattern in patterns:
if match(rule_pattern, rule) and match(file_pattern, path):
result = sign
return result
def parse_pattern(line):
sep_char = ":"
esc_char = "\\"
file_pattern = ""
rule_pattern = ""
seen_separator = False
sign = True
uline = line
if line:
if line[0] == "-":
sign = False
uline = line[1:]
elif line[0] == "+":
uline = line[1:]
i = 0
while i < len(uline):
char = uline[i]
i = i + 1
if char == sep_char:
if seen_separator:
raise Exception(
'Invalid pattern: "' + line + '" Contains more than one separator!'
)
seen_separator = True
continue
if char == esc_char:
next_char = uline[i] if (i < len(uline)) else None
if next_char in ["+", "-", esc_char, sep_char]:
i = i + 1
char = next_char
if seen_separator:
rule_pattern = rule_pattern + char
else:
file_pattern = file_pattern + char
if not rule_pattern:
rule_pattern = "**"
return sign, file_pattern, rule_pattern
def filter_sarif(args):
if args.split_lines:
tmp = []
for pattern in args.patterns:
tmp = tmp + re.split("\r?\n", pattern)
args.patterns = tmp
args.patterns = [parse_pattern(pattern) for pattern in args.patterns if pattern]
print("Given patterns:")
for sign, file_pattern, rule_pattern in args.patterns:
sign_text = "positive" if sign else "negative"
print(f"files: {file_pattern} rules: {rule_pattern} ({sign_text})")
with open(args.input, "r", encoding="UTF-8") as file:
sarif = json.load(file)
for run in sarif.get("runs", []):
if run.get("results", []):
new_results = []
for result in run["results"]:
if result.get("locations", []):
new_locations = []
for location in result["locations"]:
uri = (
location.get("physicalLocation", {})
.get("artifactLocation", {})
.get("uri", None)
)
rule_id = result["ruleId"]
if uri is None or match_path_and_rule(
uri, rule_id, args.patterns
):
new_locations.append(location)
result["locations"] = new_locations
if new_locations:
new_results.append(result)
else:
new_results.append(result)
run["results"] = new_results
with open(args.output, "w", encoding="UTF-8") as file:
json.dump(sarif, file, indent=args.indent)
def main():
parser = argparse.ArgumentParser(prog="filter-sarif")
parser.add_argument("--input", help="Input SARIF file", required=True)
parser.add_argument("--output", help="Output SARIF file", required=True)
parser.add_argument(
"--split-lines",
default=False,
action="store_true",
help="Split given patterns on newlines.",
)
parser.add_argument(
"--indent", default=None, type=int, help="Indentation level for JSON output."
)
parser.add_argument("patterns", help="Inclusion and exclusion patterns.", nargs="+")
def print_usage():
print(parser.format_usage())
args = parser.parse_args()
filter_sarif(args)
if __name__ == "__main__":
main()
| true
| true
|
1c42e427a00c05ad31c186816aafb28c013df29c
| 7,088
|
py
|
Python
|
homeassistant/components/lock/wink.py
|
don66/home-assistant
|
a277470363c0758bb305410aad49c257ff8bac40
|
[
"Apache-2.0"
] | 37
|
2018-05-22T07:17:26.000Z
|
2022-03-03T13:14:46.000Z
|
homeassistant/components/lock/wink.py
|
don66/home-assistant
|
a277470363c0758bb305410aad49c257ff8bac40
|
[
"Apache-2.0"
] | 34
|
2018-05-22T07:19:40.000Z
|
2022-03-11T23:21:03.000Z
|
homeassistant/components/lock/wink.py
|
don66/home-assistant
|
a277470363c0758bb305410aad49c257ff8bac40
|
[
"Apache-2.0"
] | 8
|
2018-05-30T20:05:26.000Z
|
2021-02-19T14:17:05.000Z
|
"""
Support for Wink locks.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/lock.wink/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.components.lock import LockDevice
from homeassistant.components.wink import DOMAIN, WinkDevice
from homeassistant.const import (
ATTR_CODE, ATTR_ENTITY_ID, ATTR_NAME, STATE_UNKNOWN)
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['wink']
_LOGGER = logging.getLogger(__name__)
SERVICE_SET_VACATION_MODE = 'wink_set_lock_vacation_mode'
SERVICE_SET_ALARM_MODE = 'wink_set_lock_alarm_mode'
SERVICE_SET_ALARM_SENSITIVITY = 'wink_set_lock_alarm_sensitivity'
SERVICE_SET_ALARM_STATE = 'wink_set_lock_alarm_state'
SERVICE_SET_BEEPER_STATE = 'wink_set_lock_beeper_state'
SERVICE_ADD_KEY = 'wink_add_new_lock_key_code'
ATTR_ENABLED = 'enabled'
ATTR_SENSITIVITY = 'sensitivity'
ATTR_MODE = 'mode'
ALARM_SENSITIVITY_MAP = {
'low': 0.2,
'medium_low': 0.4,
'medium': 0.6,
'medium_high': 0.8,
'high': 1.0,
}
ALARM_MODES_MAP = {
'activity': 'alert',
'forced_entry': 'forced_entry',
'tamper': 'tamper',
}
SET_ENABLED_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_ENABLED): cv.string,
})
SET_SENSITIVITY_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_SENSITIVITY): vol.In(ALARM_SENSITIVITY_MAP)
})
SET_ALARM_MODES_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_MODE): vol.In(ALARM_MODES_MAP)
})
ADD_KEY_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_CODE): cv.positive_int,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Wink platform."""
import pywink
for lock in pywink.get_locks():
_id = lock.object_id() + lock.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkLockDevice(lock, hass)])
def service_handle(service):
"""Handle for services."""
entity_ids = service.data.get('entity_id')
all_locks = hass.data[DOMAIN]['entities']['lock']
locks_to_set = []
if entity_ids is None:
locks_to_set = all_locks
else:
for lock in all_locks:
if lock.entity_id in entity_ids:
locks_to_set.append(lock)
for lock in locks_to_set:
if service.service == SERVICE_SET_VACATION_MODE:
lock.set_vacation_mode(service.data.get(ATTR_ENABLED))
elif service.service == SERVICE_SET_ALARM_STATE:
lock.set_alarm_state(service.data.get(ATTR_ENABLED))
elif service.service == SERVICE_SET_BEEPER_STATE:
lock.set_beeper_state(service.data.get(ATTR_ENABLED))
elif service.service == SERVICE_SET_ALARM_MODE:
lock.set_alarm_mode(service.data.get(ATTR_MODE))
elif service.service == SERVICE_SET_ALARM_SENSITIVITY:
lock.set_alarm_sensitivity(service.data.get(ATTR_SENSITIVITY))
elif service.service == SERVICE_ADD_KEY:
name = service.data.get(ATTR_NAME)
code = service.data.get(ATTR_CODE)
lock.add_new_key(code, name)
hass.services.register(DOMAIN, SERVICE_SET_VACATION_MODE,
service_handle,
schema=SET_ENABLED_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_ALARM_STATE,
service_handle,
schema=SET_ENABLED_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_BEEPER_STATE,
service_handle,
schema=SET_ENABLED_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_ALARM_MODE,
service_handle,
schema=SET_ALARM_MODES_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_ALARM_SENSITIVITY,
service_handle,
schema=SET_SENSITIVITY_SCHEMA)
hass.services.register(DOMAIN, SERVICE_ADD_KEY,
service_handle,
schema=ADD_KEY_SCHEMA)
class WinkLockDevice(WinkDevice, LockDevice):
"""Representation of a Wink lock."""
@asyncio.coroutine
def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]['entities']['lock'].append(self)
@property
def is_locked(self):
"""Return true if device is locked."""
return self.wink.state()
def lock(self, **kwargs):
"""Lock the device."""
self.wink.set_state(True)
def unlock(self, **kwargs):
"""Unlock the device."""
self.wink.set_state(False)
def set_alarm_state(self, enabled):
"""Set lock's alarm state."""
self.wink.set_alarm_state(enabled)
def set_vacation_mode(self, enabled):
"""Set lock's vacation mode."""
self.wink.set_vacation_mode(enabled)
def set_beeper_state(self, enabled):
"""Set lock's beeper mode."""
self.wink.set_beeper_mode(enabled)
def add_new_key(self, code, name):
"""Add a new user key code."""
self.wink.add_new_key(code, name)
def set_alarm_sensitivity(self, sensitivity):
"""
Set lock's alarm sensitivity.
Valid sensitivities:
0.2, 0.4, 0.6, 0.8, 1.0
"""
self.wink.set_alarm_sensitivity(sensitivity)
def set_alarm_mode(self, mode):
"""
Set lock's alarm mode.
Valid modes:
alert - Beep when lock is locked or unlocked
tamper - 15 sec alarm when lock is disturbed when locked
forced_entry - 3 min alarm when significant force applied
to door when locked.
"""
self.wink.set_alarm_mode(mode)
@property
def device_state_attributes(self):
"""Return the state attributes."""
super_attrs = super().device_state_attributes
sensitivity = dict_value_to_key(ALARM_SENSITIVITY_MAP,
self.wink.alarm_sensitivity())
super_attrs['alarm_sensitivity'] = sensitivity
super_attrs['vacation_mode'] = self.wink.vacation_mode_enabled()
super_attrs['beeper_mode'] = self.wink.beeper_enabled()
super_attrs['auto_lock'] = self.wink.auto_lock_enabled()
alarm_mode = dict_value_to_key(ALARM_MODES_MAP,
self.wink.alarm_mode())
super_attrs['alarm_mode'] = alarm_mode
super_attrs['alarm_enabled'] = self.wink.alarm_enabled()
return super_attrs
def dict_value_to_key(dict_map, comp_value):
"""Return the key that has the provided value."""
for key, value in dict_map.items():
if value == comp_value:
return key
return STATE_UNKNOWN
| 33.433962
| 78
| 0.643905
|
import asyncio
import logging
import voluptuous as vol
from homeassistant.components.lock import LockDevice
from homeassistant.components.wink import DOMAIN, WinkDevice
from homeassistant.const import (
ATTR_CODE, ATTR_ENTITY_ID, ATTR_NAME, STATE_UNKNOWN)
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['wink']
_LOGGER = logging.getLogger(__name__)
SERVICE_SET_VACATION_MODE = 'wink_set_lock_vacation_mode'
SERVICE_SET_ALARM_MODE = 'wink_set_lock_alarm_mode'
SERVICE_SET_ALARM_SENSITIVITY = 'wink_set_lock_alarm_sensitivity'
SERVICE_SET_ALARM_STATE = 'wink_set_lock_alarm_state'
SERVICE_SET_BEEPER_STATE = 'wink_set_lock_beeper_state'
SERVICE_ADD_KEY = 'wink_add_new_lock_key_code'
ATTR_ENABLED = 'enabled'
ATTR_SENSITIVITY = 'sensitivity'
ATTR_MODE = 'mode'
ALARM_SENSITIVITY_MAP = {
'low': 0.2,
'medium_low': 0.4,
'medium': 0.6,
'medium_high': 0.8,
'high': 1.0,
}
ALARM_MODES_MAP = {
'activity': 'alert',
'forced_entry': 'forced_entry',
'tamper': 'tamper',
}
SET_ENABLED_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_ENABLED): cv.string,
})
SET_SENSITIVITY_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_SENSITIVITY): vol.In(ALARM_SENSITIVITY_MAP)
})
SET_ALARM_MODES_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_MODE): vol.In(ALARM_MODES_MAP)
})
ADD_KEY_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_CODE): cv.positive_int,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
import pywink
for lock in pywink.get_locks():
_id = lock.object_id() + lock.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkLockDevice(lock, hass)])
def service_handle(service):
entity_ids = service.data.get('entity_id')
all_locks = hass.data[DOMAIN]['entities']['lock']
locks_to_set = []
if entity_ids is None:
locks_to_set = all_locks
else:
for lock in all_locks:
if lock.entity_id in entity_ids:
locks_to_set.append(lock)
for lock in locks_to_set:
if service.service == SERVICE_SET_VACATION_MODE:
lock.set_vacation_mode(service.data.get(ATTR_ENABLED))
elif service.service == SERVICE_SET_ALARM_STATE:
lock.set_alarm_state(service.data.get(ATTR_ENABLED))
elif service.service == SERVICE_SET_BEEPER_STATE:
lock.set_beeper_state(service.data.get(ATTR_ENABLED))
elif service.service == SERVICE_SET_ALARM_MODE:
lock.set_alarm_mode(service.data.get(ATTR_MODE))
elif service.service == SERVICE_SET_ALARM_SENSITIVITY:
lock.set_alarm_sensitivity(service.data.get(ATTR_SENSITIVITY))
elif service.service == SERVICE_ADD_KEY:
name = service.data.get(ATTR_NAME)
code = service.data.get(ATTR_CODE)
lock.add_new_key(code, name)
hass.services.register(DOMAIN, SERVICE_SET_VACATION_MODE,
service_handle,
schema=SET_ENABLED_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_ALARM_STATE,
service_handle,
schema=SET_ENABLED_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_BEEPER_STATE,
service_handle,
schema=SET_ENABLED_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_ALARM_MODE,
service_handle,
schema=SET_ALARM_MODES_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_ALARM_SENSITIVITY,
service_handle,
schema=SET_SENSITIVITY_SCHEMA)
hass.services.register(DOMAIN, SERVICE_ADD_KEY,
service_handle,
schema=ADD_KEY_SCHEMA)
class WinkLockDevice(WinkDevice, LockDevice):
@asyncio.coroutine
def async_added_to_hass(self):
self.hass.data[DOMAIN]['entities']['lock'].append(self)
@property
def is_locked(self):
return self.wink.state()
def lock(self, **kwargs):
self.wink.set_state(True)
def unlock(self, **kwargs):
self.wink.set_state(False)
def set_alarm_state(self, enabled):
self.wink.set_alarm_state(enabled)
def set_vacation_mode(self, enabled):
self.wink.set_vacation_mode(enabled)
def set_beeper_state(self, enabled):
self.wink.set_beeper_mode(enabled)
def add_new_key(self, code, name):
self.wink.add_new_key(code, name)
def set_alarm_sensitivity(self, sensitivity):
self.wink.set_alarm_sensitivity(sensitivity)
def set_alarm_mode(self, mode):
self.wink.set_alarm_mode(mode)
@property
def device_state_attributes(self):
super_attrs = super().device_state_attributes
sensitivity = dict_value_to_key(ALARM_SENSITIVITY_MAP,
self.wink.alarm_sensitivity())
super_attrs['alarm_sensitivity'] = sensitivity
super_attrs['vacation_mode'] = self.wink.vacation_mode_enabled()
super_attrs['beeper_mode'] = self.wink.beeper_enabled()
super_attrs['auto_lock'] = self.wink.auto_lock_enabled()
alarm_mode = dict_value_to_key(ALARM_MODES_MAP,
self.wink.alarm_mode())
super_attrs['alarm_mode'] = alarm_mode
super_attrs['alarm_enabled'] = self.wink.alarm_enabled()
return super_attrs
def dict_value_to_key(dict_map, comp_value):
for key, value in dict_map.items():
if value == comp_value:
return key
return STATE_UNKNOWN
| true
| true
|
1c42e5438572fb044589dd487d08721a41242e32
| 2,155
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/interactive/__init__.py
|
YuanyuanNi/azure-cli
|
63844964374858bfacd209bfe1b69eb456bd64ca
|
[
"MIT"
] | 3,287
|
2016-07-26T17:34:33.000Z
|
2022-03-31T09:52:13.000Z
|
src/azure-cli/azure/cli/command_modules/interactive/__init__.py
|
YuanyuanNi/azure-cli
|
63844964374858bfacd209bfe1b69eb456bd64ca
|
[
"MIT"
] | 19,206
|
2016-07-26T07:04:42.000Z
|
2022-03-31T23:57:09.000Z
|
src/azure-cli/azure/cli/command_modules/interactive/__init__.py
|
YuanyuanNi/azure-cli
|
63844964374858bfacd209bfe1b69eb456bd64ca
|
[
"MIT"
] | 2,575
|
2016-07-26T06:44:40.000Z
|
2022-03-31T22:56:06.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps
from azure.cli.core import AzCommandsLoader
helps['interactive'] = """
type: command
short-summary: Start interactive mode. Installs the Interactive extension if not installed already.
long-summary: >
For more information on interactive mode, see: https://azure.microsoft.com/blog/welcome-to-azure-cli-shell/
"""
class InteractiveCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
from azure.cli.core import ModExtensionSuppress
super(InteractiveCommandsLoader, self).__init__(
cli_ctx=cli_ctx,
suppress_extension=ModExtensionSuppress(
__name__, 'alias', '0.5.1',
reason='Your version of the extension is not compatible with this version of the CLI.',
recommend_update=True))
def load_command_table(self, _):
with self.command_group('', operations_tmpl='azure.cli.command_modules.interactive.custom#{}') as g:
g.command('interactive', 'start_shell', is_preview=True)
return self.command_table
def load_arguments(self, _):
with self.argument_context('interactive') as c:
style_options = ['quiet', 'purple', 'default', 'none', 'contrast', 'pastel',
'halloween', 'grey', 'br', 'bg', 'primary', 'neon']
c.argument('style', options_list=['--style', '-s'], help='The colors of the shell.',
choices=style_options)
c.argument('update', help='Update the Interactive extension to the latest available.',
action='store_true')
c.ignore('_subscription') # hide global subscription param
COMMAND_LOADER_CLS = InteractiveCommandsLoader
| 43.979592
| 123
| 0.589327
|
from knack.help_files import helps
from azure.cli.core import AzCommandsLoader
helps['interactive'] = """
type: command
short-summary: Start interactive mode. Installs the Interactive extension if not installed already.
long-summary: >
For more information on interactive mode, see: https://azure.microsoft.com/blog/welcome-to-azure-cli-shell/
"""
class InteractiveCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
from azure.cli.core import ModExtensionSuppress
super(InteractiveCommandsLoader, self).__init__(
cli_ctx=cli_ctx,
suppress_extension=ModExtensionSuppress(
__name__, 'alias', '0.5.1',
reason='Your version of the extension is not compatible with this version of the CLI.',
recommend_update=True))
def load_command_table(self, _):
with self.command_group('', operations_tmpl='azure.cli.command_modules.interactive.custom#{}') as g:
g.command('interactive', 'start_shell', is_preview=True)
return self.command_table
def load_arguments(self, _):
with self.argument_context('interactive') as c:
style_options = ['quiet', 'purple', 'default', 'none', 'contrast', 'pastel',
'halloween', 'grey', 'br', 'bg', 'primary', 'neon']
c.argument('style', options_list=['--style', '-s'], help='The colors of the shell.',
choices=style_options)
c.argument('update', help='Update the Interactive extension to the latest available.',
action='store_true')
c.ignore('_subscription')
COMMAND_LOADER_CLS = InteractiveCommandsLoader
| true
| true
|
1c42e633061f9e6448acc5fe0b62e464a3e38089
| 645
|
py
|
Python
|
models.py
|
dholzmueller/bmdal_reg
|
1a9e9c19fbd350ec32a2bd7b505e7015df7dc9bf
|
[
"Apache-2.0"
] | 3
|
2022-03-19T21:30:10.000Z
|
2022-03-30T08:20:48.000Z
|
models.py
|
dholzmueller/bmdal_reg
|
1a9e9c19fbd350ec32a2bd7b505e7015df7dc9bf
|
[
"Apache-2.0"
] | null | null | null |
models.py
|
dholzmueller/bmdal_reg
|
1a9e9c19fbd350ec32a2bd7b505e7015df7dc9bf
|
[
"Apache-2.0"
] | null | null | null |
from layers import *
def create_tabular_model(n_models, n_features, hidden_sizes=[512]*2, act='relu', **config):
layer_sizes = [n_features] + hidden_sizes + [1]
layers = []
for in_features, out_features in zip(layer_sizes[:-2], layer_sizes[1:-1]):
layers.append(ParallelLinearLayer(n_models, in_features, out_features, **config))
layers.append(get_parallel_act_layer(act))
layers.append(ParallelLinearLayer(n_models, layer_sizes[-2], layer_sizes[-1],
weight_init_mode='zero' if config.get('use_llz', False) else 'normal', **config))
return ParallelSequential(*layers)
| 46.071429
| 119
| 0.68062
|
from layers import *
def create_tabular_model(n_models, n_features, hidden_sizes=[512]*2, act='relu', **config):
layer_sizes = [n_features] + hidden_sizes + [1]
layers = []
for in_features, out_features in zip(layer_sizes[:-2], layer_sizes[1:-1]):
layers.append(ParallelLinearLayer(n_models, in_features, out_features, **config))
layers.append(get_parallel_act_layer(act))
layers.append(ParallelLinearLayer(n_models, layer_sizes[-2], layer_sizes[-1],
weight_init_mode='zero' if config.get('use_llz', False) else 'normal', **config))
return ParallelSequential(*layers)
| true
| true
|
1c42e886e3522fe82b8d2736f2b6fc0f4b73f2cb
| 1,052
|
py
|
Python
|
swing_open_loop.py
|
HuiminHe/PyDy
|
0834605bc2eed8d2768b50f55162bd6ac09cc694
|
[
"MIT"
] | null | null | null |
swing_open_loop.py
|
HuiminHe/PyDy
|
0834605bc2eed8d2768b50f55162bd6ac09cc694
|
[
"MIT"
] | null | null | null |
swing_open_loop.py
|
HuiminHe/PyDy
|
0834605bc2eed8d2768b50f55162bd6ac09cc694
|
[
"MIT"
] | null | null | null |
from scipy.integrate import odeint
from swing_config import *
f = cloudpickle.load(open('./swing_open_loop_dynamic.dll', 'rb'))
def fv_gen(amp, ome, phi, q_max):
return lambda t, y: amp * np.sin(ome * t + phi) / (1 + np.exp((np.abs(y[1:3])-q_max) / 0.01) * np.logical_or(np.abs(y[1:3]) < q_max, y[1:3] * y[4:] > 0))
def open_loop_test(amp, ome, phi):
amp = np.ones(2) * amp_max *amp
ome = np.ones(2) * ome_max * ome
phi = np.ones(2) * phi_max * phi
fv = fv_gen(amp, ome, phi, q_max)
q0 = np.array([np.pi/6, 0, 0])
a0 = np.array([0, 0])
v0 = fv(t0, np.r_[q0, np.zeros(3)])
y0 = np.r_[q0, 0, v0]
sol = odeint(f, y0, t, args=(param0, con0, a_max, fv, dt))
return Solution(t, sol, param0)
if __name__ == '__main__':
import matplotlib.pyplot as plt
from swing_plot import swing_plot
from swing_anim import swing_animation
amp = 0
ome = 0
phi = 0
sol = open_loop_test(amp, ome, phi)
#fig = swing_plot(sol)
#plt.show(fig)
anim = swing_animation(sol)
plt.show(anim)
| 30.057143
| 157
| 0.606464
|
from scipy.integrate import odeint
from swing_config import *
f = cloudpickle.load(open('./swing_open_loop_dynamic.dll', 'rb'))
def fv_gen(amp, ome, phi, q_max):
return lambda t, y: amp * np.sin(ome * t + phi) / (1 + np.exp((np.abs(y[1:3])-q_max) / 0.01) * np.logical_or(np.abs(y[1:3]) < q_max, y[1:3] * y[4:] > 0))
def open_loop_test(amp, ome, phi):
amp = np.ones(2) * amp_max *amp
ome = np.ones(2) * ome_max * ome
phi = np.ones(2) * phi_max * phi
fv = fv_gen(amp, ome, phi, q_max)
q0 = np.array([np.pi/6, 0, 0])
a0 = np.array([0, 0])
v0 = fv(t0, np.r_[q0, np.zeros(3)])
y0 = np.r_[q0, 0, v0]
sol = odeint(f, y0, t, args=(param0, con0, a_max, fv, dt))
return Solution(t, sol, param0)
if __name__ == '__main__':
import matplotlib.pyplot as plt
from swing_plot import swing_plot
from swing_anim import swing_animation
amp = 0
ome = 0
phi = 0
sol = open_loop_test(amp, ome, phi)
anim = swing_animation(sol)
plt.show(anim)
| true
| true
|
1c42e97e5faa6e0e8e85b2c77b113058f3869d5c
| 23,589
|
py
|
Python
|
aries_cloudagent/protocols/issue_credential/v2_0/formats/ld_proof/handler.py
|
mduffin95/aries-cloudagent-python
|
cb102bcb796dd6e7aec95eb4fe753b10f0c612b3
|
[
"Apache-2.0"
] | 247
|
2019-07-02T21:10:21.000Z
|
2022-03-30T13:55:33.000Z
|
aries_cloudagent/protocols/issue_credential/v2_0/formats/ld_proof/handler.py
|
estrehle/aries-cloudagent-python
|
1460b2d32c933944b4677cf25a78c4ace07346c8
|
[
"Apache-2.0"
] | 1,462
|
2019-07-02T20:57:30.000Z
|
2022-03-31T23:13:35.000Z
|
aries_cloudagent/protocols/issue_credential/v2_0/formats/ld_proof/handler.py
|
estrehle/aries-cloudagent-python
|
1460b2d32c933944b4677cf25a78c4ace07346c8
|
[
"Apache-2.0"
] | 377
|
2019-06-20T21:01:31.000Z
|
2022-03-30T08:27:53.000Z
|
"""V2.0 issue-credential linked data proof credential format handler."""
from ......vc.ld_proofs.error import LinkedDataProofException
from ......vc.ld_proofs.check import get_properties_without_context
import logging
from typing import Mapping
from marshmallow import EXCLUDE, INCLUDE
from pyld import jsonld
from pyld.jsonld import JsonLdProcessor
from ......did.did_key import DIDKey
from ......messaging.decorators.attach_decorator import AttachDecorator
from ......storage.vc_holder.base import VCHolder
from ......storage.vc_holder.vc_record import VCRecord
from ......vc.vc_ld import (
issue,
verify_credential,
VerifiableCredentialSchema,
LDProof,
VerifiableCredential,
)
from ......vc.ld_proofs import (
AuthenticationProofPurpose,
BbsBlsSignature2020,
CredentialIssuancePurpose,
DocumentLoader,
Ed25519Signature2018,
LinkedDataProof,
ProofPurpose,
WalletKeyPair,
)
from ......vc.ld_proofs.constants import SECURITY_CONTEXT_BBS_URL
from ......wallet.base import BaseWallet, DIDInfo
from ......wallet.error import WalletNotFoundError
from ......wallet.key_type import KeyType
from ...message_types import (
ATTACHMENT_FORMAT,
CRED_20_ISSUE,
CRED_20_OFFER,
CRED_20_PROPOSAL,
CRED_20_REQUEST,
)
from ...messages.cred_format import V20CredFormat
from ...messages.cred_issue import V20CredIssue
from ...messages.cred_offer import V20CredOffer
from ...messages.cred_proposal import V20CredProposal
from ...messages.cred_request import V20CredRequest
from ...models.cred_ex_record import V20CredExRecord
from ...models.detail.ld_proof import V20CredExRecordLDProof
from ..handler import CredFormatAttachment, V20CredFormatError, V20CredFormatHandler
from .models.cred_detail import LDProofVCDetailSchema
from .models.cred_detail import LDProofVCDetail
LOGGER = logging.getLogger(__name__)
SUPPORTED_ISSUANCE_PROOF_PURPOSES = {
CredentialIssuancePurpose.term,
AuthenticationProofPurpose.term,
}
SUPPORTED_ISSUANCE_SUITES = {Ed25519Signature2018}
SIGNATURE_SUITE_KEY_TYPE_MAPPING = {Ed25519Signature2018: KeyType.ED25519}
# We only want to add bbs suites to supported if the module is installed
if BbsBlsSignature2020.BBS_SUPPORTED:
SUPPORTED_ISSUANCE_SUITES.add(BbsBlsSignature2020)
SIGNATURE_SUITE_KEY_TYPE_MAPPING[BbsBlsSignature2020] = KeyType.BLS12381G2
PROOF_TYPE_SIGNATURE_SUITE_MAPPING = {
suite.signature_type: suite
for suite, key_type in SIGNATURE_SUITE_KEY_TYPE_MAPPING.items()
}
KEY_TYPE_SIGNATURE_SUITE_MAPPING = {
key_type: suite for suite, key_type in SIGNATURE_SUITE_KEY_TYPE_MAPPING.items()
}
class LDProofCredFormatHandler(V20CredFormatHandler):
"""Linked data proof credential format handler."""
format = V20CredFormat.Format.LD_PROOF
@classmethod
def validate_fields(cls, message_type: str, attachment_data: Mapping) -> None:
"""Validate attachment data for a specific message type.
Uses marshmallow schemas to validate if format specific attachment data
is valid for the specified message type. Only does structural and type
checks, does not validate if .e.g. the issuer value is valid.
Args:
message_type (str): The message type to validate the attachment data for.
Should be one of the message types as defined in message_types.py
attachment_data (Mapping): [description]
The attachment data to valide
Raises:
Exception: When the data is not valid.
"""
mapping = {
CRED_20_PROPOSAL: LDProofVCDetailSchema,
CRED_20_OFFER: LDProofVCDetailSchema,
CRED_20_REQUEST: LDProofVCDetailSchema,
CRED_20_ISSUE: VerifiableCredentialSchema,
}
# Get schema class
Schema = mapping[message_type]
# Validate, throw if not valid
Schema(unknown=EXCLUDE).load(attachment_data)
async def get_detail_record(self, cred_ex_id: str) -> V20CredExRecordLDProof:
"""Retrieve credential exchange detail record by cred_ex_id."""
async with self.profile.session() as session:
records = await LDProofCredFormatHandler.format.detail.query_by_cred_ex_id(
session, cred_ex_id
)
if len(records) > 1:
LOGGER.warning(
"Cred ex id %s has %d %s detail records: should be 1",
cred_ex_id,
len(records),
LDProofCredFormatHandler.format.api,
)
return records[0] if records else None
def get_format_identifier(self, message_type: str) -> str:
"""Get attachment format identifier for format and message combination.
Args:
message_type (str): Message type for which to return the format identifier
Returns:
str: Issue credential attachment format identifier
"""
return ATTACHMENT_FORMAT[message_type][LDProofCredFormatHandler.format.api]
def get_format_data(self, message_type: str, data: dict) -> CredFormatAttachment:
"""Get credential format and attachment objects for use in cred ex messages.
Returns a tuple of both credential format and attachment decorator for use
in credential exchange messages. It looks up the correct format identifier and
encodes the data as a base64 attachment.
Args:
message_type (str): The message type for which to return the cred format.
Should be one of the message types defined in the message types file
data (dict): The data to include in the attach decorator
Returns:
CredFormatAttachment: Credential format and attachment data objects
"""
return (
V20CredFormat(
attach_id=LDProofCredFormatHandler.format.api,
format_=self.get_format_identifier(message_type),
),
AttachDecorator.data_base64(
data, ident=LDProofCredFormatHandler.format.api
),
)
async def _assert_can_issue_with_id_and_proof_type(
self, issuer_id: str, proof_type: str
):
"""Assert that it is possible to issue using the specified id and proof type.
Args:
issuer_id (str): The issuer id
proof_type (str): the signature suite proof type
Raises:
V20CredFormatError:
- If the proof type is not supported
- If the issuer id is not a did
- If the did is not found in th wallet
- If the did does not support to create signatures for the proof type
"""
try:
# Check if it is a proof type we can issue with
if proof_type not in PROOF_TYPE_SIGNATURE_SUITE_MAPPING.keys():
raise V20CredFormatError(
f"Unable to sign credential with unsupported proof type {proof_type}."
f" Supported proof types: {PROOF_TYPE_SIGNATURE_SUITE_MAPPING.keys()}"
)
if not issuer_id.startswith("did:"):
raise V20CredFormatError(
f"Unable to issue credential with issuer id: {issuer_id}."
" Only issuance with DIDs is supported"
)
# Retrieve did from wallet. Will throw if not found
did = await self._did_info_for_did(issuer_id)
# Raise error if we cannot issue a credential with this proof type
# using this DID from
did_proof_type = KEY_TYPE_SIGNATURE_SUITE_MAPPING[
did.key_type
].signature_type
if proof_type != did_proof_type:
raise V20CredFormatError(
f"Unable to issue credential with issuer id {issuer_id} and proof "
f"type {proof_type}. DID only supports proof type {did_proof_type}"
)
except WalletNotFoundError:
raise V20CredFormatError(
f"Issuer did {issuer_id} not found."
" Unable to issue credential with this DID."
)
async def _did_info_for_did(self, did: str) -> DIDInfo:
"""Get the did info for specified did.
If the did starts with did:sov it will remove the prefix for
backwards compatibility with not fully qualified did.
Args:
did (str): The did to retrieve from the wallet.
Raises:
WalletNotFoundError: If the did is not found in the wallet.
Returns:
DIDInfo: did information
"""
async with self.profile.session() as session:
wallet = session.inject(BaseWallet)
# If the did starts with did:sov we need to query without
if did.startswith("did:sov:"):
return await wallet.get_local_did(did.replace("did:sov:", ""))
# All other methods we can just query
return await wallet.get_local_did(did)
async def _get_suite_for_detail(self, detail: LDProofVCDetail) -> LinkedDataProof:
issuer_id = detail.credential.issuer_id
proof_type = detail.options.proof_type
# Assert we can issue the credential based on issuer + proof_type
await self._assert_can_issue_with_id_and_proof_type(issuer_id, proof_type)
# Create base proof object with options from detail
proof = LDProof(
created=detail.options.created,
domain=detail.options.domain,
challenge=detail.options.challenge,
)
did_info = await self._did_info_for_did(issuer_id)
verification_method = self._get_verification_method(issuer_id)
suite = await self._get_suite(
proof_type=proof_type,
verification_method=verification_method,
proof=proof.serialize(),
did_info=did_info,
)
return suite
async def _get_suite(
self,
*,
proof_type: str,
verification_method: str = None,
proof: dict = None,
did_info: DIDInfo = None,
):
"""Get signature suite for issuance of verification."""
session = await self.profile.session()
wallet = session.inject(BaseWallet)
# Get signature class based on proof type
SignatureClass = PROOF_TYPE_SIGNATURE_SUITE_MAPPING[proof_type]
# Generically create signature class
return SignatureClass(
verification_method=verification_method,
proof=proof,
key_pair=WalletKeyPair(
wallet=wallet,
key_type=SIGNATURE_SUITE_KEY_TYPE_MAPPING[SignatureClass],
public_key_base58=did_info.verkey if did_info else None,
),
)
def _get_verification_method(self, did: str):
"""Get the verification method for a did."""
if did.startswith("did:key:"):
return DIDKey.from_did(did).key_id
elif did.startswith("did:sov:"):
# key-1 is what the resolver uses for key id
return did + "#key-1"
else:
raise V20CredFormatError(
f"Unable to get retrieve verification method for did {did}"
)
def _get_proof_purpose(
self, *, proof_purpose: str = None, challenge: str = None, domain: str = None
) -> ProofPurpose:
"""Get the proof purpose for a credential detail.
Args:
proof_purpose (str): The proof purpose string value
challenge (str, optional): Challenge
domain (str, optional): domain
Raises:
V20CredFormatError:
- If the proof purpose is not supported.
- [authentication] If challenge is missing.
Returns:
ProofPurpose: Proof purpose instance that can be used for issuance.
"""
# Default proof purpose is assertionMethod
proof_purpose = proof_purpose or CredentialIssuancePurpose.term
if proof_purpose == CredentialIssuancePurpose.term:
return CredentialIssuancePurpose()
elif proof_purpose == AuthenticationProofPurpose.term:
# assert challenge is present for authentication proof purpose
if not challenge:
raise V20CredFormatError(
f"Challenge is required for '{proof_purpose}' proof purpose."
)
return AuthenticationProofPurpose(challenge=challenge, domain=domain)
else:
raise V20CredFormatError(
f"Unsupported proof purpose: {proof_purpose}. "
f"Supported proof types are: {SUPPORTED_ISSUANCE_PROOF_PURPOSES}"
)
async def _prepare_detail(
self, detail: LDProofVCDetail, holder_did: str = None
) -> LDProofVCDetail:
# Add BBS context if not present yet
if (
detail.options.proof_type == BbsBlsSignature2020.signature_type
and SECURITY_CONTEXT_BBS_URL not in detail.credential.context_urls
):
detail.credential.add_context(SECURITY_CONTEXT_BBS_URL)
# add holder_did as credentialSubject.id (if provided)
if holder_did and holder_did.startswith("did:key"):
detail.credential.credential_subject["id"] = holder_did
return detail
async def create_proposal(
self, cred_ex_record: V20CredExRecord, proposal_data: Mapping
) -> CredFormatAttachment:
"""Create linked data proof credential proposal."""
detail = LDProofVCDetail.deserialize(proposal_data)
detail = await self._prepare_detail(detail)
return self.get_format_data(CRED_20_PROPOSAL, detail.serialize())
async def receive_proposal(
self, cred_ex_record: V20CredExRecord, cred_proposal_message: V20CredProposal
) -> None:
"""Receive linked data proof credential proposal."""
async def create_offer(
self, cred_proposal_message: V20CredProposal
) -> CredFormatAttachment:
"""Create linked data proof credential offer."""
if not cred_proposal_message:
raise V20CredFormatError(
"Cannot create linked data proof offer without proposal data"
)
# Parse offer data which is either a proposal or an offer.
# Data is stored in proposal if we received a proposal
# but also when we create an offer (manager does some weird stuff)
offer_data = cred_proposal_message.attachment(LDProofCredFormatHandler.format)
detail = LDProofVCDetail.deserialize(offer_data)
detail = await self._prepare_detail(detail)
document_loader = self.profile.inject(DocumentLoader)
missing_properties = get_properties_without_context(
detail.credential.serialize(), document_loader
)
if len(missing_properties) > 0:
raise LinkedDataProofException(
f"{len(missing_properties)} attributes dropped. "
f"Provide definitions in context to correct. {missing_properties}"
)
# Make sure we can issue with the did and proof type
await self._assert_can_issue_with_id_and_proof_type(
detail.credential.issuer_id, detail.options.proof_type
)
return self.get_format_data(CRED_20_OFFER, detail.serialize())
async def receive_offer(
self, cred_ex_record: V20CredExRecord, cred_offer_message: V20CredOffer
) -> None:
"""Receive linked data proof credential offer."""
async def create_request(
self, cred_ex_record: V20CredExRecord, request_data: Mapping = None
) -> CredFormatAttachment:
"""Create linked data proof credential request."""
holder_did = request_data.get("holder_did") if request_data else None
if cred_ex_record.cred_offer:
request_data = cred_ex_record.cred_offer.attachment(
LDProofCredFormatHandler.format
)
# API data is stored in proposal (when starting from request)
# It is a bit of a strage flow IMO.
elif cred_ex_record.cred_proposal:
request_data = cred_ex_record.cred_proposal.attachment(
LDProofCredFormatHandler.format
)
else:
raise V20CredFormatError(
"Cannot create linked data proof request without offer or input data"
)
detail = LDProofVCDetail.deserialize(request_data)
detail = await self._prepare_detail(detail, holder_did=holder_did)
return self.get_format_data(CRED_20_REQUEST, detail.serialize())
async def receive_request(
self, cred_ex_record: V20CredExRecord, cred_request_message: V20CredRequest
) -> None:
"""Receive linked data proof request."""
async def issue_credential(
self, cred_ex_record: V20CredExRecord, retries: int = 5
) -> CredFormatAttachment:
"""Issue linked data proof credential."""
if not cred_ex_record.cred_request:
raise V20CredFormatError(
"Cannot issue credential without credential request"
)
detail_dict = cred_ex_record.cred_request.attachment(
LDProofCredFormatHandler.format
)
detail = LDProofVCDetail.deserialize(detail_dict)
detail = await self._prepare_detail(detail)
# Get signature suite, proof purpose and document loader
suite = await self._get_suite_for_detail(detail)
proof_purpose = self._get_proof_purpose(
proof_purpose=detail.options.proof_purpose,
challenge=detail.options.challenge,
domain=detail.options.domain,
)
document_loader = self.profile.inject(DocumentLoader)
# issue the credential
vc = await issue(
credential=detail.credential.serialize(),
suite=suite,
document_loader=document_loader,
purpose=proof_purpose,
)
return self.get_format_data(CRED_20_ISSUE, vc)
async def receive_credential(
self, cred_ex_record: V20CredExRecord, cred_issue_message: V20CredIssue
) -> None:
"""Receive linked data proof credential."""
cred_dict = cred_issue_message.attachment(LDProofCredFormatHandler.format)
detail_dict = cred_ex_record.cred_request.attachment(
LDProofCredFormatHandler.format
)
vc = VerifiableCredential.deserialize(cred_dict, unknown=INCLUDE)
detail = LDProofVCDetail.deserialize(detail_dict)
# Remove values from cred that are not part of detail
cred_dict.pop("proof")
credential_status = cred_dict.pop("credentialStatus", None)
detail_status = detail.options.credential_status
if cred_dict != detail_dict["credential"]:
raise V20CredFormatError(
f"Received credential for cred_ex_id {cred_ex_record.cred_ex_id} does not"
" match requested credential"
)
# both credential and detail contain status. Check for equalness
if credential_status and detail_status:
if credential_status.get("type") != detail_status.get("type"):
raise V20CredFormatError(
"Received credential status type does not match credential request"
)
# Either credential or detail contains status. Throw error
elif (credential_status and not detail_status) or (
not credential_status and detail_status
):
raise V20CredFormatError(
"Received credential status contains credential status"
" that does not match credential request"
)
# TODO: if created wasn't present in the detail options, should we verify
# it is ~now (e.g. some time in the past + future)?
# Check if created property matches
if detail.options.created and vc.proof.created != detail.options.created:
raise V20CredFormatError(
"Received credential proof.created does not"
" match options.created from credential request"
)
# Check challenge
if vc.proof.challenge != detail.options.challenge:
raise V20CredFormatError(
"Received credential proof.challenge does not"
" match options.challenge from credential request"
)
# Check domain
if vc.proof.domain != detail.options.domain:
raise V20CredFormatError(
"Received credential proof.domain does not"
" match options.domain from credential request"
)
# Check if proof type matches
if vc.proof.type != detail.options.proof_type:
raise V20CredFormatError(
"Received credential proof.type does not"
" match options.proofType from credential request"
)
async def store_credential(
self, cred_ex_record: V20CredExRecord, cred_id: str = None
) -> None:
"""Store linked data proof credential."""
# Get attachment data
cred_dict: dict = cred_ex_record.cred_issue.attachment(
LDProofCredFormatHandler.format
)
# Deserialize objects
credential = VerifiableCredential.deserialize(cred_dict, unknown=INCLUDE)
# Get signature suite, proof purpose and document loader
suite = await self._get_suite(proof_type=credential.proof.type)
purpose = self._get_proof_purpose(
proof_purpose=credential.proof.proof_purpose,
challenge=credential.proof.challenge,
domain=credential.proof.domain,
)
document_loader = self.profile.inject(DocumentLoader)
# Verify the credential
result = await verify_credential(
credential=cred_dict,
suites=[suite],
document_loader=document_loader,
purpose=purpose,
)
if not result.verified:
raise V20CredFormatError(f"Received invalid credential: {result}")
# Saving expanded type as a cred_tag
expanded = jsonld.expand(cred_dict)
types = JsonLdProcessor.get_values(
expanded[0],
"@type",
)
# create VC record for storage
vc_record = VCRecord(
contexts=credential.context_urls,
expanded_types=types,
issuer_id=credential.issuer_id,
subject_ids=credential.credential_subject_ids,
schema_ids=[], # Schemas not supported yet
proof_types=[credential.proof.type],
cred_value=credential.serialize(),
given_id=credential.id,
record_id=cred_id,
cred_tags=None, # Tags should be derived from credential values
)
# Create detail record with cred_id_stored
detail_record = V20CredExRecordLDProof(
cred_ex_id=cred_ex_record.cred_ex_id, cred_id_stored=vc_record.record_id
)
# save credential and detail record
async with self.profile.session() as session:
vc_holder = session.inject(VCHolder)
await vc_holder.store_credential(vc_record)
# Store detail record, emit event
await detail_record.save(
session, reason="store credential v2.0", event=True
)
| 37.562102
| 90
| 0.64937
|
from ......vc.ld_proofs.error import LinkedDataProofException
from ......vc.ld_proofs.check import get_properties_without_context
import logging
from typing import Mapping
from marshmallow import EXCLUDE, INCLUDE
from pyld import jsonld
from pyld.jsonld import JsonLdProcessor
from ......did.did_key import DIDKey
from ......messaging.decorators.attach_decorator import AttachDecorator
from ......storage.vc_holder.base import VCHolder
from ......storage.vc_holder.vc_record import VCRecord
from ......vc.vc_ld import (
issue,
verify_credential,
VerifiableCredentialSchema,
LDProof,
VerifiableCredential,
)
from ......vc.ld_proofs import (
AuthenticationProofPurpose,
BbsBlsSignature2020,
CredentialIssuancePurpose,
DocumentLoader,
Ed25519Signature2018,
LinkedDataProof,
ProofPurpose,
WalletKeyPair,
)
from ......vc.ld_proofs.constants import SECURITY_CONTEXT_BBS_URL
from ......wallet.base import BaseWallet, DIDInfo
from ......wallet.error import WalletNotFoundError
from ......wallet.key_type import KeyType
from ...message_types import (
ATTACHMENT_FORMAT,
CRED_20_ISSUE,
CRED_20_OFFER,
CRED_20_PROPOSAL,
CRED_20_REQUEST,
)
from ...messages.cred_format import V20CredFormat
from ...messages.cred_issue import V20CredIssue
from ...messages.cred_offer import V20CredOffer
from ...messages.cred_proposal import V20CredProposal
from ...messages.cred_request import V20CredRequest
from ...models.cred_ex_record import V20CredExRecord
from ...models.detail.ld_proof import V20CredExRecordLDProof
from ..handler import CredFormatAttachment, V20CredFormatError, V20CredFormatHandler
from .models.cred_detail import LDProofVCDetailSchema
from .models.cred_detail import LDProofVCDetail
LOGGER = logging.getLogger(__name__)
SUPPORTED_ISSUANCE_PROOF_PURPOSES = {
CredentialIssuancePurpose.term,
AuthenticationProofPurpose.term,
}
SUPPORTED_ISSUANCE_SUITES = {Ed25519Signature2018}
SIGNATURE_SUITE_KEY_TYPE_MAPPING = {Ed25519Signature2018: KeyType.ED25519}
if BbsBlsSignature2020.BBS_SUPPORTED:
SUPPORTED_ISSUANCE_SUITES.add(BbsBlsSignature2020)
SIGNATURE_SUITE_KEY_TYPE_MAPPING[BbsBlsSignature2020] = KeyType.BLS12381G2
PROOF_TYPE_SIGNATURE_SUITE_MAPPING = {
suite.signature_type: suite
for suite, key_type in SIGNATURE_SUITE_KEY_TYPE_MAPPING.items()
}
KEY_TYPE_SIGNATURE_SUITE_MAPPING = {
key_type: suite for suite, key_type in SIGNATURE_SUITE_KEY_TYPE_MAPPING.items()
}
class LDProofCredFormatHandler(V20CredFormatHandler):
format = V20CredFormat.Format.LD_PROOF
@classmethod
def validate_fields(cls, message_type: str, attachment_data: Mapping) -> None:
mapping = {
CRED_20_PROPOSAL: LDProofVCDetailSchema,
CRED_20_OFFER: LDProofVCDetailSchema,
CRED_20_REQUEST: LDProofVCDetailSchema,
CRED_20_ISSUE: VerifiableCredentialSchema,
}
Schema = mapping[message_type]
Schema(unknown=EXCLUDE).load(attachment_data)
async def get_detail_record(self, cred_ex_id: str) -> V20CredExRecordLDProof:
async with self.profile.session() as session:
records = await LDProofCredFormatHandler.format.detail.query_by_cred_ex_id(
session, cred_ex_id
)
if len(records) > 1:
LOGGER.warning(
"Cred ex id %s has %d %s detail records: should be 1",
cred_ex_id,
len(records),
LDProofCredFormatHandler.format.api,
)
return records[0] if records else None
def get_format_identifier(self, message_type: str) -> str:
return ATTACHMENT_FORMAT[message_type][LDProofCredFormatHandler.format.api]
def get_format_data(self, message_type: str, data: dict) -> CredFormatAttachment:
return (
V20CredFormat(
attach_id=LDProofCredFormatHandler.format.api,
format_=self.get_format_identifier(message_type),
),
AttachDecorator.data_base64(
data, ident=LDProofCredFormatHandler.format.api
),
)
async def _assert_can_issue_with_id_and_proof_type(
self, issuer_id: str, proof_type: str
):
try:
if proof_type not in PROOF_TYPE_SIGNATURE_SUITE_MAPPING.keys():
raise V20CredFormatError(
f"Unable to sign credential with unsupported proof type {proof_type}."
f" Supported proof types: {PROOF_TYPE_SIGNATURE_SUITE_MAPPING.keys()}"
)
if not issuer_id.startswith("did:"):
raise V20CredFormatError(
f"Unable to issue credential with issuer id: {issuer_id}."
" Only issuance with DIDs is supported"
)
did = await self._did_info_for_did(issuer_id)
did_proof_type = KEY_TYPE_SIGNATURE_SUITE_MAPPING[
did.key_type
].signature_type
if proof_type != did_proof_type:
raise V20CredFormatError(
f"Unable to issue credential with issuer id {issuer_id} and proof "
f"type {proof_type}. DID only supports proof type {did_proof_type}"
)
except WalletNotFoundError:
raise V20CredFormatError(
f"Issuer did {issuer_id} not found."
" Unable to issue credential with this DID."
)
async def _did_info_for_did(self, did: str) -> DIDInfo:
async with self.profile.session() as session:
wallet = session.inject(BaseWallet)
if did.startswith("did:sov:"):
return await wallet.get_local_did(did.replace("did:sov:", ""))
return await wallet.get_local_did(did)
async def _get_suite_for_detail(self, detail: LDProofVCDetail) -> LinkedDataProof:
issuer_id = detail.credential.issuer_id
proof_type = detail.options.proof_type
await self._assert_can_issue_with_id_and_proof_type(issuer_id, proof_type)
proof = LDProof(
created=detail.options.created,
domain=detail.options.domain,
challenge=detail.options.challenge,
)
did_info = await self._did_info_for_did(issuer_id)
verification_method = self._get_verification_method(issuer_id)
suite = await self._get_suite(
proof_type=proof_type,
verification_method=verification_method,
proof=proof.serialize(),
did_info=did_info,
)
return suite
async def _get_suite(
self,
*,
proof_type: str,
verification_method: str = None,
proof: dict = None,
did_info: DIDInfo = None,
):
session = await self.profile.session()
wallet = session.inject(BaseWallet)
SignatureClass = PROOF_TYPE_SIGNATURE_SUITE_MAPPING[proof_type]
return SignatureClass(
verification_method=verification_method,
proof=proof,
key_pair=WalletKeyPair(
wallet=wallet,
key_type=SIGNATURE_SUITE_KEY_TYPE_MAPPING[SignatureClass],
public_key_base58=did_info.verkey if did_info else None,
),
)
def _get_verification_method(self, did: str):
if did.startswith("did:key:"):
return DIDKey.from_did(did).key_id
elif did.startswith("did:sov:"):
return did + "#key-1"
else:
raise V20CredFormatError(
f"Unable to get retrieve verification method for did {did}"
)
def _get_proof_purpose(
self, *, proof_purpose: str = None, challenge: str = None, domain: str = None
) -> ProofPurpose:
proof_purpose = proof_purpose or CredentialIssuancePurpose.term
if proof_purpose == CredentialIssuancePurpose.term:
return CredentialIssuancePurpose()
elif proof_purpose == AuthenticationProofPurpose.term:
if not challenge:
raise V20CredFormatError(
f"Challenge is required for '{proof_purpose}' proof purpose."
)
return AuthenticationProofPurpose(challenge=challenge, domain=domain)
else:
raise V20CredFormatError(
f"Unsupported proof purpose: {proof_purpose}. "
f"Supported proof types are: {SUPPORTED_ISSUANCE_PROOF_PURPOSES}"
)
async def _prepare_detail(
self, detail: LDProofVCDetail, holder_did: str = None
) -> LDProofVCDetail:
if (
detail.options.proof_type == BbsBlsSignature2020.signature_type
and SECURITY_CONTEXT_BBS_URL not in detail.credential.context_urls
):
detail.credential.add_context(SECURITY_CONTEXT_BBS_URL)
if holder_did and holder_did.startswith("did:key"):
detail.credential.credential_subject["id"] = holder_did
return detail
async def create_proposal(
self, cred_ex_record: V20CredExRecord, proposal_data: Mapping
) -> CredFormatAttachment:
detail = LDProofVCDetail.deserialize(proposal_data)
detail = await self._prepare_detail(detail)
return self.get_format_data(CRED_20_PROPOSAL, detail.serialize())
async def receive_proposal(
self, cred_ex_record: V20CredExRecord, cred_proposal_message: V20CredProposal
) -> None:
async def create_offer(
self, cred_proposal_message: V20CredProposal
) -> CredFormatAttachment:
if not cred_proposal_message:
raise V20CredFormatError(
"Cannot create linked data proof offer without proposal data"
)
offer_data = cred_proposal_message.attachment(LDProofCredFormatHandler.format)
detail = LDProofVCDetail.deserialize(offer_data)
detail = await self._prepare_detail(detail)
document_loader = self.profile.inject(DocumentLoader)
missing_properties = get_properties_without_context(
detail.credential.serialize(), document_loader
)
if len(missing_properties) > 0:
raise LinkedDataProofException(
f"{len(missing_properties)} attributes dropped. "
f"Provide definitions in context to correct. {missing_properties}"
)
await self._assert_can_issue_with_id_and_proof_type(
detail.credential.issuer_id, detail.options.proof_type
)
return self.get_format_data(CRED_20_OFFER, detail.serialize())
async def receive_offer(
self, cred_ex_record: V20CredExRecord, cred_offer_message: V20CredOffer
) -> None:
async def create_request(
self, cred_ex_record: V20CredExRecord, request_data: Mapping = None
) -> CredFormatAttachment:
holder_did = request_data.get("holder_did") if request_data else None
if cred_ex_record.cred_offer:
request_data = cred_ex_record.cred_offer.attachment(
LDProofCredFormatHandler.format
)
elif cred_ex_record.cred_proposal:
request_data = cred_ex_record.cred_proposal.attachment(
LDProofCredFormatHandler.format
)
else:
raise V20CredFormatError(
"Cannot create linked data proof request without offer or input data"
)
detail = LDProofVCDetail.deserialize(request_data)
detail = await self._prepare_detail(detail, holder_did=holder_did)
return self.get_format_data(CRED_20_REQUEST, detail.serialize())
async def receive_request(
self, cred_ex_record: V20CredExRecord, cred_request_message: V20CredRequest
) -> None:
async def issue_credential(
self, cred_ex_record: V20CredExRecord, retries: int = 5
) -> CredFormatAttachment:
if not cred_ex_record.cred_request:
raise V20CredFormatError(
"Cannot issue credential without credential request"
)
detail_dict = cred_ex_record.cred_request.attachment(
LDProofCredFormatHandler.format
)
detail = LDProofVCDetail.deserialize(detail_dict)
detail = await self._prepare_detail(detail)
suite = await self._get_suite_for_detail(detail)
proof_purpose = self._get_proof_purpose(
proof_purpose=detail.options.proof_purpose,
challenge=detail.options.challenge,
domain=detail.options.domain,
)
document_loader = self.profile.inject(DocumentLoader)
vc = await issue(
credential=detail.credential.serialize(),
suite=suite,
document_loader=document_loader,
purpose=proof_purpose,
)
return self.get_format_data(CRED_20_ISSUE, vc)
async def receive_credential(
self, cred_ex_record: V20CredExRecord, cred_issue_message: V20CredIssue
) -> None:
cred_dict = cred_issue_message.attachment(LDProofCredFormatHandler.format)
detail_dict = cred_ex_record.cred_request.attachment(
LDProofCredFormatHandler.format
)
vc = VerifiableCredential.deserialize(cred_dict, unknown=INCLUDE)
detail = LDProofVCDetail.deserialize(detail_dict)
cred_dict.pop("proof")
credential_status = cred_dict.pop("credentialStatus", None)
detail_status = detail.options.credential_status
if cred_dict != detail_dict["credential"]:
raise V20CredFormatError(
f"Received credential for cred_ex_id {cred_ex_record.cred_ex_id} does not"
" match requested credential"
)
if credential_status and detail_status:
if credential_status.get("type") != detail_status.get("type"):
raise V20CredFormatError(
"Received credential status type does not match credential request"
)
elif (credential_status and not detail_status) or (
not credential_status and detail_status
):
raise V20CredFormatError(
"Received credential status contains credential status"
" that does not match credential request"
)
# it is ~now (e.g. some time in the past + future)?
# Check if created property matches
if detail.options.created and vc.proof.created != detail.options.created:
raise V20CredFormatError(
"Received credential proof.created does not"
" match options.created from credential request"
)
# Check challenge
if vc.proof.challenge != detail.options.challenge:
raise V20CredFormatError(
"Received credential proof.challenge does not"
" match options.challenge from credential request"
)
# Check domain
if vc.proof.domain != detail.options.domain:
raise V20CredFormatError(
"Received credential proof.domain does not"
" match options.domain from credential request"
)
# Check if proof type matches
if vc.proof.type != detail.options.proof_type:
raise V20CredFormatError(
"Received credential proof.type does not"
" match options.proofType from credential request"
)
async def store_credential(
self, cred_ex_record: V20CredExRecord, cred_id: str = None
) -> None:
# Get attachment data
cred_dict: dict = cred_ex_record.cred_issue.attachment(
LDProofCredFormatHandler.format
)
# Deserialize objects
credential = VerifiableCredential.deserialize(cred_dict, unknown=INCLUDE)
# Get signature suite, proof purpose and document loader
suite = await self._get_suite(proof_type=credential.proof.type)
purpose = self._get_proof_purpose(
proof_purpose=credential.proof.proof_purpose,
challenge=credential.proof.challenge,
domain=credential.proof.domain,
)
document_loader = self.profile.inject(DocumentLoader)
# Verify the credential
result = await verify_credential(
credential=cred_dict,
suites=[suite],
document_loader=document_loader,
purpose=purpose,
)
if not result.verified:
raise V20CredFormatError(f"Received invalid credential: {result}")
# Saving expanded type as a cred_tag
expanded = jsonld.expand(cred_dict)
types = JsonLdProcessor.get_values(
expanded[0],
"@type",
)
# create VC record for storage
vc_record = VCRecord(
contexts=credential.context_urls,
expanded_types=types,
issuer_id=credential.issuer_id,
subject_ids=credential.credential_subject_ids,
schema_ids=[], # Schemas not supported yet
proof_types=[credential.proof.type],
cred_value=credential.serialize(),
given_id=credential.id,
record_id=cred_id,
cred_tags=None, # Tags should be derived from credential values
)
# Create detail record with cred_id_stored
detail_record = V20CredExRecordLDProof(
cred_ex_id=cred_ex_record.cred_ex_id, cred_id_stored=vc_record.record_id
)
# save credential and detail record
async with self.profile.session() as session:
vc_holder = session.inject(VCHolder)
await vc_holder.store_credential(vc_record)
# Store detail record, emit event
await detail_record.save(
session, reason="store credential v2.0", event=True
)
| true
| true
|
1c42e9958b507431d118de6f764798d653b61351
| 12,333
|
py
|
Python
|
app/originblog/models.py
|
ZhangPeng18/originblog
|
c52365e765dff060804043d709eccfb1a1c6f1ff
|
[
"MIT"
] | null | null | null |
app/originblog/models.py
|
ZhangPeng18/originblog
|
c52365e765dff060804043d709eccfb1a1c6f1ff
|
[
"MIT"
] | null | null | null |
app/originblog/models.py
|
ZhangPeng18/originblog
|
c52365e765dff060804043d709eccfb1a1c6f1ff
|
[
"MIT"
] | null | null | null |
import hashlib
import re
from datetime import datetime
from urllib.parse import urlencode
import bleach
import markdown2
from flask import current_app
from flask_login import UserMixin
from itsdangerous import BadTimeSignature, SignatureExpired
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from unidecode import unidecode
from werkzeug.security import generate_password_hash, check_password_hash
from .extensions import db
from .settings import BlogSettings
from .settings import Operations
# 获取博客配置
COMMENT_STATUS = BlogSettings.COMMENT_STATUS
GRAVATAR_CDN_BASE = BlogSettings.GRAVATAR_CDN_BASE
GRAVATAR_DEFAULT_IMAGE = BlogSettings.GRAVATAR_DEFAULT_IMAGE
SOCIAL_NETWORKS = BlogSettings.SOCIAL_NETWORKS
ROLE_PERMISSION_MAP = BlogSettings.ROLE_PERMISSION_MAP
# 编译分割标题获取别名的正则表达式
_punct_re = re.compile(r'[\t !"#$%&\-/<=>?@\[\\\]^_`{|},.]+')
def get_clean_html_content(html_content):
"""对转换成HTML的markdown文本进行消毒"""
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'br', 'blockquote', 'code',
'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p', 'hr', 'img',
'table', 'thead', 'tbody', 'tr', 'th', 'td',
'sup', 'sub']
allowed_attrs = {
'*': ['class'],
'a': ['href', 'rel', 'name'],
'img': ['alt', 'src', 'title']
}
html_content = bleach.linkify(bleach.clean(html_content, tags=allowed_tags, attributes=allowed_attrs, strip=True))
return html_content
def slugify(text, delim=u'-'):
"""Generates a ASCII-only slug"""
result = []
for word in _punct_re.split(text.lower()):
result.extend(unidecode(word).lower().split())
return unidecode(delim.join(result))[:230]
class Role(db.Document):
"""定义角色与权限模型"""
role_name = db.StringField(default='reader')
permissions = db.ListField(db.StringField(unique=True))
@classmethod
def init(cls):
"""初始化角色与对应的权限,支持角色的增加和更新"""
for role_name in ROLE_PERMISSION_MAP:
role = cls.objects.filter(role_name=role_name).first()
if role is None:
role = cls(role_name=role_name)
role.permissions = [] # 每次初始化都清空角色权限
for permission in ROLE_PERMISSION_MAP[role_name]:
role.permissions.append(permission)
role.save()
class User(db.Document, UserMixin):
"""定义用户数据模型"""
username = db.StringField(max_length=20, required=True, unique=True)
password_hash = db.StringField(max_length=128, required=True)
name = db.StringField(max_length=30, default=username)
email = db.EmailField(max_length=255, required=True, unique=True)
create_time = db.DateTimeField(default=datetime.utcnow, required=True)
last_login = db.DateTimeField(default=datetime.utcnow, required=True)
email_confirmed = db.BooleanField(default=False)
role = db.ReferenceField('Role') # TODO:角色被删除后的级联行为,目前角色不可删除
bio = db.StringField(max_length=200)
homepage = db.StringField(max_length=255)
social_networks = db.DictField(default=SOCIAL_NETWORKS)
active = db.BooleanField(default=True)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def validate_password(self, password):
return check_password_hash(self.password_hash, password)
def get_id(self):
"""Flask Login所需要实现的取得有效ID的方法"""
try:
return self.username
except AttributeError:
raise NotImplementedError('No `username` attribute - override `get_id`')
def generate_token(self, operation, expire_in=None, **kwargs):
"""生成私密操作所需要的验证token。
使用itsdangerous提供的jws序列化器将用户信息、操作类型等序列化成token
:param self: 用户对象
:param operation: 操作类型 Operations 类属性
:param expire_in: 过期时间(秒),默认值None时为一个小时
:param kwargs: 其他需要序列化的关键字参数(如新的邮箱地址)
:return: 序列化生成的token
"""
s = Serializer(current_app.config['SECRET_KEY'], expire_in) # 接收密钥和过期时间(秒)参数实例化一个JWS序列化器对象
data = {
'username': self.username,
'operation': operation
}
data.update(**kwargs)
return s.dumps(data)
def validate_token(self, token, operation, new_password=None):
"""验证token,并根据token携带的数据执行相应操作。
:param self: 用户对象
:param token: token字符串
:param operation: 要验证的操作类型(确认邮箱、重置密码或修改邮箱)
:param new_password: 若操作类型为重置密码可将新密码作为参数传入
:return: 布尔值
"""
s = Serializer(current_app.config['SECRET_KEY'])
# 尝试获取token中被序列化的信息,token不一定合法,应使用try...except语句
try:
data = s.loads(token)
except (BadTimeSignature, SignatureExpired):
return False
# 验证token携带的用户名和操作类型是否相符
if operation != data.get('operation') or self.username != data.get('username'):
return False
# 根据不同的操作类型执行对应操作
if operation == Operations.CONFIRM:
self.email_confirmed = True
elif operation == Operations.RESET_PASSWORD:
self.set_password(new_password)
elif operation == Operations.CHANGE_EMAIL:
self.email = data.get('new_email')
else:
return False
self.save()
return True
@property
def is_admin(self):
"""检查用户是否拥有管理员权限"""
return self.role and self.role.role_name == 'admin'
@property
def is_active(self):
"""Flask-Login检查用户是否活跃"""
return self.active
@property
def posts_count(self):
"""发表的文章总数"""
return Post.objects.filter(author=self).count()
def can(self, permission):
"""检查用户是否拥有指定权限"""
return self.role and permission in self.role.permissions
def set_role(self):
"""除网站管理员外,为每个用户指定初始角色为reader"""
if self.email != current_app.config['ORIGINBLOG_ADMIN_EMAIL']:
self.role = Role.objects.filter(role_name='reader').first()
else:
self.role = Role.objects.filter(role_name='admin').first()
def clean(self):
"""在创建对象并写入到数据库之前为其设置角色"""
if not self.role:
self.set_role()
meta = {
'indexes': ['username'],
}
class Post(db.Document):
"""定义文章数据模型"""
title = db.StringField(max_length=255, required=True)
slug = db.StringField(max_length=255, required=True, unique=True)
abstract = db.StringField(max_length=255)
author = db.ReferenceField('User', reverse_delete_rule=db.CASCADE) # 用户被删除时,关联的文章也会被删除
raw_content = db.StringField(required=True)
html_content = db.StringField(required=True)
pub_time = db.DateTimeField()
update_time = db.DateTimeField()
category = db.StringField(max_length=64, default='default')
tags = db.ListField(db.StringField(max_length=30))
weight = db.IntField(default=10)
can_comment = db.BooleanField(default=True)
from_admin = db.BooleanField(default=False)
type = db.StringField(max_length=64, default='post') # 将使用'page'类型保存捐赠、博客介绍等专用页面
def set_slug(self, title):
"""根据标题自动生成标题别名"""
self.slug = slugify(title)
def get_abstract(self, count, suffix='...'):
"""使用正则表达式从html中提取摘要内容"""
plain_content = re.sub(r'<.*?>', '', self.html_content)
abstract = ''.join(plain_content.split())[0:count]
return abstract + suffix
def reviewed_comments(self):
"""返回已审核通过的评论列表"""
return [comment for comment in self.comments if comment.status == 'approved']
@property
def comments_count(self):
"""收到的评论总数"""
return Comment.objects.filter(post_slug=self.slug).count()
def clean(self):
"""保存到数据库前更新时间戳, 生成标题别名并将markdown文本转换为html"""
now = datetime.utcnow()
if not self.pub_time:
self.pub_time = now
self.update_time = now
if not self.slug:
self.set_slug(self.title)
self.html_content = markdown2.markdown(self.raw_content,
extras=['code-friendly', 'fenced-code-blocks', 'tables'])
self.html_content = get_clean_html_content(self.html_content)
# 若未设置摘要,自动截取正文作为摘要
if not self.abstract:
self.abstract = self.get_abstract(140)
def to_dict(self):
"""把类的对象转化为 dict 类型的数据,将对象序列化"""
post_dict = {
'title': self.title,
'slug': self.slug,
'abstract': self.abstarct,
'author': self.author,
'html_content': self.html_content,
'raw_content': self.raw_content,
'pub_time': self.pub_time,
'update_time': self.update_time,
'category': self.category,
'tags': self.tags,
'weight': self.weight,
'can_comment': self.can_comment,
'from_admin': self.from_admin,
'type': self.type,
}
return post_dict
meta = {
'indexes': ['slug', 'type'], # 添加type索引以加快对专用页面的查询速度
'ordering': ['-pub_time']
}
class Comment(db.Document):
"""定义评论的数据模型"""
author = db.StringField(max_length=30, required=True)
email = db.EmailField(max_length=255, required=True)
homepage = db.URLField(max_length=255)
post_slug = db.StringField(required=True)
post_title = db.StringField(default='default article')
md_content = db.StringField(required=True)
html_content = db.StringField(required=True)
pub_time = db.DateTimeField()
reply_to = db.ReferenceField('self')
status = db.StringField(choices=COMMENT_STATUS, default='pending')
from_post_author = db.BooleanField(default=False)
from_admin = db.BooleanField(default=False)
gravatar_id = db.StringField(default='00000000000')
def clean(self):
"""保存到数据库前更新时间戳,生成头像id,并将markdown文本转换为html"""
html_content = markdown2.markdown(self.md_content,
extras=['code-friendly', 'fenced-code-blocks', 'tables', 'nofollow'])
self.html_content = get_clean_html_content(html_content)
if not self.pub_time:
self.pub_time = datetime.utcnow()
# 根据邮箱签名生成头像,若无邮箱则使用默认头像
if self.email:
self.gravatar_id = hashlib.md5(self.email.lower().encode('utf-8')).hexdigest()
def get_avatar_url(self, base_url=GRAVATAR_CDN_BASE, img_size=44, default_img_url=GRAVATAR_DEFAULT_IMAGE):
"""通过 gavatar_id 从cdn 获取头像图片的链接。
获取时可传入大小和默认图片参数
:param base_url: cdn地址
:param img_size: 需要的图片大小,默认为44
:param default_img_url: 没有匹配头像时的默认图片
:return: 图片url
"""
gravatar_url = base_url + self.gravatar_id
params = {}
if img_size:
params['s'] = str(img_size)
if default_img_url:
params['d'] = default_img_url
if params:
gravatar_url = '{0}?{1}'.format(gravatar_url, urlencode(params))
return gravatar_url
meta = {
'ordering': ['-update_time']
}
class PostStatistic(db.Document):
"""统计每篇文章的阅读次数等统计信息"""
post = db.ReferenceField(Post, reverse_delete_rule=db.CASCADE) # 与文章级联删除
visit_count = db.IntField(default=0)
verbose_count_base = db.IntField(default=0)
post_type = db.StringField(max_length=64, default='post')
class Tracker(db.Document):
"""记录访客信息"""
post = db.ReferenceField(Post, reverse_delete_rule=db.CASCADE) # 与文章级联删除
ip = db.StringField()
user_agent = db.StringField()
create_time = db.DateTimeField(default=datetime.utcnow)
meta = {
'ordering': ['-create_time']
}
class Widget(db.Document):
"""在主页显示文本内容的widget"""
title = db.StringField(default='widget')
raw_content = db.StringField()
html_content = db.StringField()
priority = db.IntField(default=10000)
pub_time = db.DateTimeField()
def clean(self):
"""保存到数据库前更新时间戳,生成头像id,并将markdown文本转换为html"""
if self.raw_content:
self.html_content = markdown2.markdown(self.raw_content,
extras=['code-friendly', 'fenced-code-blocks', 'tables'])
self.html_content = get_clean_html_content(self.html_content)
if not self.pub_time:
self.pub_time = datetime.utcnow()
meta = {
'ordering': ['priority']
}
| 34.163435
| 118
| 0.636585
|
import hashlib
import re
from datetime import datetime
from urllib.parse import urlencode
import bleach
import markdown2
from flask import current_app
from flask_login import UserMixin
from itsdangerous import BadTimeSignature, SignatureExpired
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from unidecode import unidecode
from werkzeug.security import generate_password_hash, check_password_hash
from .extensions import db
from .settings import BlogSettings
from .settings import Operations
COMMENT_STATUS = BlogSettings.COMMENT_STATUS
GRAVATAR_CDN_BASE = BlogSettings.GRAVATAR_CDN_BASE
GRAVATAR_DEFAULT_IMAGE = BlogSettings.GRAVATAR_DEFAULT_IMAGE
SOCIAL_NETWORKS = BlogSettings.SOCIAL_NETWORKS
ROLE_PERMISSION_MAP = BlogSettings.ROLE_PERMISSION_MAP
_punct_re = re.compile(r'[\t !"#$%&\-/<=>?@\[\\\]^_`{|},.]+')
def get_clean_html_content(html_content):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'br', 'blockquote', 'code',
'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p', 'hr', 'img',
'table', 'thead', 'tbody', 'tr', 'th', 'td',
'sup', 'sub']
allowed_attrs = {
'*': ['class'],
'a': ['href', 'rel', 'name'],
'img': ['alt', 'src', 'title']
}
html_content = bleach.linkify(bleach.clean(html_content, tags=allowed_tags, attributes=allowed_attrs, strip=True))
return html_content
def slugify(text, delim=u'-'):
result = []
for word in _punct_re.split(text.lower()):
result.extend(unidecode(word).lower().split())
return unidecode(delim.join(result))[:230]
class Role(db.Document):
role_name = db.StringField(default='reader')
permissions = db.ListField(db.StringField(unique=True))
@classmethod
def init(cls):
for role_name in ROLE_PERMISSION_MAP:
role = cls.objects.filter(role_name=role_name).first()
if role is None:
role = cls(role_name=role_name)
role.permissions = [] # 每次初始化都清空角色权限
for permission in ROLE_PERMISSION_MAP[role_name]:
role.permissions.append(permission)
role.save()
class User(db.Document, UserMixin):
username = db.StringField(max_length=20, required=True, unique=True)
password_hash = db.StringField(max_length=128, required=True)
name = db.StringField(max_length=30, default=username)
email = db.EmailField(max_length=255, required=True, unique=True)
create_time = db.DateTimeField(default=datetime.utcnow, required=True)
last_login = db.DateTimeField(default=datetime.utcnow, required=True)
email_confirmed = db.BooleanField(default=False)
role = db.ReferenceField('Role') # TODO:角色被删除后的级联行为,目前角色不可删除
bio = db.StringField(max_length=200)
homepage = db.StringField(max_length=255)
social_networks = db.DictField(default=SOCIAL_NETWORKS)
active = db.BooleanField(default=True)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def validate_password(self, password):
return check_password_hash(self.password_hash, password)
def get_id(self):
try:
return self.username
except AttributeError:
raise NotImplementedError('No `username` attribute - override `get_id`')
def generate_token(self, operation, expire_in=None, **kwargs):
s = Serializer(current_app.config['SECRET_KEY'], expire_in) # 接收密钥和过期时间(秒)参数实例化一个JWS序列化器对象
data = {
'username': self.username,
'operation': operation
}
data.update(**kwargs)
return s.dumps(data)
def validate_token(self, token, operation, new_password=None):
s = Serializer(current_app.config['SECRET_KEY'])
# 尝试获取token中被序列化的信息,token不一定合法,应使用try...except语句
try:
data = s.loads(token)
except (BadTimeSignature, SignatureExpired):
return False
# 验证token携带的用户名和操作类型是否相符
if operation != data.get('operation') or self.username != data.get('username'):
return False
# 根据不同的操作类型执行对应操作
if operation == Operations.CONFIRM:
self.email_confirmed = True
elif operation == Operations.RESET_PASSWORD:
self.set_password(new_password)
elif operation == Operations.CHANGE_EMAIL:
self.email = data.get('new_email')
else:
return False
self.save()
return True
@property
def is_admin(self):
return self.role and self.role.role_name == 'admin'
@property
def is_active(self):
return self.active
@property
def posts_count(self):
return Post.objects.filter(author=self).count()
def can(self, permission):
return self.role and permission in self.role.permissions
def set_role(self):
if self.email != current_app.config['ORIGINBLOG_ADMIN_EMAIL']:
self.role = Role.objects.filter(role_name='reader').first()
else:
self.role = Role.objects.filter(role_name='admin').first()
def clean(self):
if not self.role:
self.set_role()
meta = {
'indexes': ['username'],
}
class Post(db.Document):
title = db.StringField(max_length=255, required=True)
slug = db.StringField(max_length=255, required=True, unique=True)
abstract = db.StringField(max_length=255)
author = db.ReferenceField('User', reverse_delete_rule=db.CASCADE) # 用户被删除时,关联的文章也会被删除
raw_content = db.StringField(required=True)
html_content = db.StringField(required=True)
pub_time = db.DateTimeField()
update_time = db.DateTimeField()
category = db.StringField(max_length=64, default='default')
tags = db.ListField(db.StringField(max_length=30))
weight = db.IntField(default=10)
can_comment = db.BooleanField(default=True)
from_admin = db.BooleanField(default=False)
type = db.StringField(max_length=64, default='post') # 将使用'page'类型保存捐赠、博客介绍等专用页面
def set_slug(self, title):
self.slug = slugify(title)
def get_abstract(self, count, suffix='...'):
plain_content = re.sub(r'<.*?>', '', self.html_content)
abstract = ''.join(plain_content.split())[0:count]
return abstract + suffix
def reviewed_comments(self):
return [comment for comment in self.comments if comment.status == 'approved']
@property
def comments_count(self):
return Comment.objects.filter(post_slug=self.slug).count()
def clean(self):
now = datetime.utcnow()
if not self.pub_time:
self.pub_time = now
self.update_time = now
if not self.slug:
self.set_slug(self.title)
self.html_content = markdown2.markdown(self.raw_content,
extras=['code-friendly', 'fenced-code-blocks', 'tables'])
self.html_content = get_clean_html_content(self.html_content)
# 若未设置摘要,自动截取正文作为摘要
if not self.abstract:
self.abstract = self.get_abstract(140)
def to_dict(self):
post_dict = {
'title': self.title,
'slug': self.slug,
'abstract': self.abstarct,
'author': self.author,
'html_content': self.html_content,
'raw_content': self.raw_content,
'pub_time': self.pub_time,
'update_time': self.update_time,
'category': self.category,
'tags': self.tags,
'weight': self.weight,
'can_comment': self.can_comment,
'from_admin': self.from_admin,
'type': self.type,
}
return post_dict
meta = {
'indexes': ['slug', 'type'], # 添加type索引以加快对专用页面的查询速度
'ordering': ['-pub_time']
}
class Comment(db.Document):
author = db.StringField(max_length=30, required=True)
email = db.EmailField(max_length=255, required=True)
homepage = db.URLField(max_length=255)
post_slug = db.StringField(required=True)
post_title = db.StringField(default='default article')
md_content = db.StringField(required=True)
html_content = db.StringField(required=True)
pub_time = db.DateTimeField()
reply_to = db.ReferenceField('self')
status = db.StringField(choices=COMMENT_STATUS, default='pending')
from_post_author = db.BooleanField(default=False)
from_admin = db.BooleanField(default=False)
gravatar_id = db.StringField(default='00000000000')
def clean(self):
html_content = markdown2.markdown(self.md_content,
extras=['code-friendly', 'fenced-code-blocks', 'tables', 'nofollow'])
self.html_content = get_clean_html_content(html_content)
if not self.pub_time:
self.pub_time = datetime.utcnow()
# 根据邮箱签名生成头像,若无邮箱则使用默认头像
if self.email:
self.gravatar_id = hashlib.md5(self.email.lower().encode('utf-8')).hexdigest()
def get_avatar_url(self, base_url=GRAVATAR_CDN_BASE, img_size=44, default_img_url=GRAVATAR_DEFAULT_IMAGE):
gravatar_url = base_url + self.gravatar_id
params = {}
if img_size:
params['s'] = str(img_size)
if default_img_url:
params['d'] = default_img_url
if params:
gravatar_url = '{0}?{1}'.format(gravatar_url, urlencode(params))
return gravatar_url
meta = {
'ordering': ['-update_time']
}
class PostStatistic(db.Document):
post = db.ReferenceField(Post, reverse_delete_rule=db.CASCADE) # 与文章级联删除
visit_count = db.IntField(default=0)
verbose_count_base = db.IntField(default=0)
post_type = db.StringField(max_length=64, default='post')
class Tracker(db.Document):
post = db.ReferenceField(Post, reverse_delete_rule=db.CASCADE) # 与文章级联删除
ip = db.StringField()
user_agent = db.StringField()
create_time = db.DateTimeField(default=datetime.utcnow)
meta = {
'ordering': ['-create_time']
}
class Widget(db.Document):
title = db.StringField(default='widget')
raw_content = db.StringField()
html_content = db.StringField()
priority = db.IntField(default=10000)
pub_time = db.DateTimeField()
def clean(self):
if self.raw_content:
self.html_content = markdown2.markdown(self.raw_content,
extras=['code-friendly', 'fenced-code-blocks', 'tables'])
self.html_content = get_clean_html_content(self.html_content)
if not self.pub_time:
self.pub_time = datetime.utcnow()
meta = {
'ordering': ['priority']
}
| true
| true
|
1c42ea5a050ff90cd7136b1dd350c0440194626d
| 8,391
|
py
|
Python
|
homeassistant/components/energy/sensor.py
|
tsroka/home-assistant-core
|
2d83ad321115645a2103d577c3920df0c6afec4d
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/energy/sensor.py
|
tsroka/home-assistant-core
|
2d83ad321115645a2103d577c3920df0c6afec4d
|
[
"Apache-2.0"
] | 28
|
2021-09-14T06:14:07.000Z
|
2022-03-31T06:16:54.000Z
|
homeassistant/components/energy/sensor.py
|
tsroka/home-assistant-core
|
2d83ad321115645a2103d577c3920df0c6afec4d
|
[
"Apache-2.0"
] | null | null | null |
"""Helper sensor for calculating utility costs."""
from __future__ import annotations
from dataclasses import dataclass
from functools import partial
from typing import Any, Final, Literal, TypeVar, cast
from homeassistant.components.sensor import (
ATTR_LAST_RESET,
DEVICE_CLASS_MONETARY,
STATE_CLASS_MEASUREMENT,
SensorEntity,
)
from homeassistant.core import HomeAssistant, State, callback, split_entity_id
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
import homeassistant.util.dt as dt_util
from .const import DOMAIN
from .data import EnergyManager, async_get_manager
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the energy sensors."""
manager = await async_get_manager(hass)
process_now = partial(_process_manager_data, hass, manager, async_add_entities, {})
manager.async_listen_updates(process_now)
if manager.data:
await process_now()
T = TypeVar("T")
@dataclass
class FlowAdapter:
"""Adapter to allow flows to be used as sensors."""
flow_type: Literal["flow_from", "flow_to"]
stat_energy_key: Literal["stat_energy_from", "stat_energy_to"]
entity_energy_key: Literal["entity_energy_from", "entity_energy_to"]
total_money_key: Literal["stat_cost", "stat_compensation"]
name_suffix: str
entity_id_suffix: str
FLOW_ADAPTERS: Final = (
FlowAdapter(
"flow_from",
"stat_energy_from",
"entity_energy_from",
"stat_cost",
"Cost",
"cost",
),
FlowAdapter(
"flow_to",
"stat_energy_to",
"entity_energy_to",
"stat_compensation",
"Compensation",
"compensation",
),
)
async def _process_manager_data(
hass: HomeAssistant,
manager: EnergyManager,
async_add_entities: AddEntitiesCallback,
current_entities: dict[tuple[str, str], EnergyCostSensor],
) -> None:
"""Process updated data."""
to_add: list[SensorEntity] = []
to_remove = dict(current_entities)
async def finish() -> None:
if to_add:
async_add_entities(to_add)
for key, entity in to_remove.items():
current_entities.pop(key)
await entity.async_remove()
if not manager.data:
await finish()
return
for energy_source in manager.data["energy_sources"]:
if energy_source["type"] != "grid":
continue
for adapter in FLOW_ADAPTERS:
for flow in energy_source[adapter.flow_type]:
# Opting out of the type complexity because can't get it to work
untyped_flow = cast(dict, flow)
# No need to create an entity if we already have a cost stat
if untyped_flow.get(adapter.total_money_key) is not None:
continue
# This is unique among all flow_from's
key = (adapter.flow_type, untyped_flow[adapter.stat_energy_key])
# Make sure the right data is there
# If the entity existed, we don't pop it from to_remove so it's removed
if untyped_flow.get(adapter.entity_energy_key) is None or (
untyped_flow.get("entity_energy_price") is None
and untyped_flow.get("number_energy_price") is None
):
continue
current_entity = to_remove.pop(key, None)
if current_entity:
current_entity.update_config(untyped_flow)
continue
current_entities[key] = EnergyCostSensor(
adapter,
untyped_flow,
)
to_add.append(current_entities[key])
await finish()
class EnergyCostSensor(SensorEntity):
"""Calculate costs incurred by consuming energy.
This is intended as a fallback for when no specific cost sensor is available for the
utility.
"""
def __init__(
self,
adapter: FlowAdapter,
flow: dict,
) -> None:
"""Initialize the sensor."""
super().__init__()
self._adapter = adapter
self.entity_id = f"{flow[adapter.entity_energy_key]}_{adapter.entity_id_suffix}"
self._attr_device_class = DEVICE_CLASS_MONETARY
self._attr_state_class = STATE_CLASS_MEASUREMENT
self._flow = flow
self._last_energy_sensor_state: State | None = None
self._cur_value = 0.0
def _reset(self, energy_state: State) -> None:
"""Reset the cost sensor."""
self._attr_state = 0.0
self._cur_value = 0.0
self._attr_last_reset = dt_util.utcnow()
self._last_energy_sensor_state = energy_state
self.async_write_ha_state()
@callback
def _update_cost(self) -> None:
"""Update incurred costs."""
energy_state = self.hass.states.get(
cast(str, self._flow[self._adapter.entity_energy_key])
)
if energy_state is None or ATTR_LAST_RESET not in energy_state.attributes:
return
try:
energy = float(energy_state.state)
except ValueError:
return
# Determine energy price
if self._flow["entity_energy_price"] is not None:
energy_price_state = self.hass.states.get(self._flow["entity_energy_price"])
if energy_price_state is None:
return
try:
energy_price = float(energy_price_state.state)
except ValueError:
return
else:
energy_price_state = None
energy_price = cast(float, self._flow["number_energy_price"])
if self._last_energy_sensor_state is None:
# Initialize as it's the first time all required entities are in place.
self._reset(energy_state)
return
if (
energy_state.attributes[ATTR_LAST_RESET]
!= self._last_energy_sensor_state.attributes[ATTR_LAST_RESET]
):
# Energy meter was reset, reset cost sensor too
self._reset(energy_state)
else:
# Update with newly incurred cost
old_energy_value = float(self._last_energy_sensor_state.state)
self._cur_value += (energy - old_energy_value) * energy_price
self._attr_state = round(self._cur_value, 2)
self._last_energy_sensor_state = energy_state
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
energy_state = self.hass.states.get(self._flow[self._adapter.entity_energy_key])
if energy_state:
name = energy_state.name
else:
name = split_entity_id(self._flow[self._adapter.entity_energy_key])[
0
].replace("_", " ")
self._attr_name = f"{name} {self._adapter.name_suffix}"
self._update_cost()
# Store stat ID in hass.data so frontend can look it up
self.hass.data[DOMAIN]["cost_sensors"][
self._flow[self._adapter.entity_energy_key]
] = self.entity_id
@callback
def async_state_changed_listener(*_: Any) -> None:
"""Handle child updates."""
self._update_cost()
self.async_write_ha_state()
self.async_on_remove(
async_track_state_change_event(
self.hass,
cast(str, self._flow[self._adapter.entity_energy_key]),
async_state_changed_listener,
)
)
async def async_will_remove_from_hass(self) -> None:
"""Handle removing from hass."""
self.hass.data[DOMAIN]["cost_sensors"].pop(
self._flow[self._adapter.entity_energy_key]
)
await super().async_will_remove_from_hass()
@callback
def update_config(self, flow: dict) -> None:
"""Update the config."""
self._flow = flow
@property
def unit_of_measurement(self) -> str | None:
"""Return the units of measurement."""
return self.hass.config.currency
| 31.904943
| 88
| 0.628769
|
from __future__ import annotations
from dataclasses import dataclass
from functools import partial
from typing import Any, Final, Literal, TypeVar, cast
from homeassistant.components.sensor import (
ATTR_LAST_RESET,
DEVICE_CLASS_MONETARY,
STATE_CLASS_MEASUREMENT,
SensorEntity,
)
from homeassistant.core import HomeAssistant, State, callback, split_entity_id
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
import homeassistant.util.dt as dt_util
from .const import DOMAIN
from .data import EnergyManager, async_get_manager
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
manager = await async_get_manager(hass)
process_now = partial(_process_manager_data, hass, manager, async_add_entities, {})
manager.async_listen_updates(process_now)
if manager.data:
await process_now()
T = TypeVar("T")
@dataclass
class FlowAdapter:
flow_type: Literal["flow_from", "flow_to"]
stat_energy_key: Literal["stat_energy_from", "stat_energy_to"]
entity_energy_key: Literal["entity_energy_from", "entity_energy_to"]
total_money_key: Literal["stat_cost", "stat_compensation"]
name_suffix: str
entity_id_suffix: str
FLOW_ADAPTERS: Final = (
FlowAdapter(
"flow_from",
"stat_energy_from",
"entity_energy_from",
"stat_cost",
"Cost",
"cost",
),
FlowAdapter(
"flow_to",
"stat_energy_to",
"entity_energy_to",
"stat_compensation",
"Compensation",
"compensation",
),
)
async def _process_manager_data(
hass: HomeAssistant,
manager: EnergyManager,
async_add_entities: AddEntitiesCallback,
current_entities: dict[tuple[str, str], EnergyCostSensor],
) -> None:
to_add: list[SensorEntity] = []
to_remove = dict(current_entities)
async def finish() -> None:
if to_add:
async_add_entities(to_add)
for key, entity in to_remove.items():
current_entities.pop(key)
await entity.async_remove()
if not manager.data:
await finish()
return
for energy_source in manager.data["energy_sources"]:
if energy_source["type"] != "grid":
continue
for adapter in FLOW_ADAPTERS:
for flow in energy_source[adapter.flow_type]:
untyped_flow = cast(dict, flow)
# No need to create an entity if we already have a cost stat
if untyped_flow.get(adapter.total_money_key) is not None:
continue
# This is unique among all flow_from's
key = (adapter.flow_type, untyped_flow[adapter.stat_energy_key])
if untyped_flow.get(adapter.entity_energy_key) is None or (
untyped_flow.get("entity_energy_price") is None
and untyped_flow.get("number_energy_price") is None
):
continue
current_entity = to_remove.pop(key, None)
if current_entity:
current_entity.update_config(untyped_flow)
continue
current_entities[key] = EnergyCostSensor(
adapter,
untyped_flow,
)
to_add.append(current_entities[key])
await finish()
class EnergyCostSensor(SensorEntity):
def __init__(
self,
adapter: FlowAdapter,
flow: dict,
) -> None:
super().__init__()
self._adapter = adapter
self.entity_id = f"{flow[adapter.entity_energy_key]}_{adapter.entity_id_suffix}"
self._attr_device_class = DEVICE_CLASS_MONETARY
self._attr_state_class = STATE_CLASS_MEASUREMENT
self._flow = flow
self._last_energy_sensor_state: State | None = None
self._cur_value = 0.0
def _reset(self, energy_state: State) -> None:
self._attr_state = 0.0
self._cur_value = 0.0
self._attr_last_reset = dt_util.utcnow()
self._last_energy_sensor_state = energy_state
self.async_write_ha_state()
@callback
def _update_cost(self) -> None:
energy_state = self.hass.states.get(
cast(str, self._flow[self._adapter.entity_energy_key])
)
if energy_state is None or ATTR_LAST_RESET not in energy_state.attributes:
return
try:
energy = float(energy_state.state)
except ValueError:
return
if self._flow["entity_energy_price"] is not None:
energy_price_state = self.hass.states.get(self._flow["entity_energy_price"])
if energy_price_state is None:
return
try:
energy_price = float(energy_price_state.state)
except ValueError:
return
else:
energy_price_state = None
energy_price = cast(float, self._flow["number_energy_price"])
if self._last_energy_sensor_state is None:
self._reset(energy_state)
return
if (
energy_state.attributes[ATTR_LAST_RESET]
!= self._last_energy_sensor_state.attributes[ATTR_LAST_RESET]
):
# Energy meter was reset, reset cost sensor too
self._reset(energy_state)
else:
# Update with newly incurred cost
old_energy_value = float(self._last_energy_sensor_state.state)
self._cur_value += (energy - old_energy_value) * energy_price
self._attr_state = round(self._cur_value, 2)
self._last_energy_sensor_state = energy_state
async def async_added_to_hass(self) -> None:
energy_state = self.hass.states.get(self._flow[self._adapter.entity_energy_key])
if energy_state:
name = energy_state.name
else:
name = split_entity_id(self._flow[self._adapter.entity_energy_key])[
0
].replace("_", " ")
self._attr_name = f"{name} {self._adapter.name_suffix}"
self._update_cost()
# Store stat ID in hass.data so frontend can look it up
self.hass.data[DOMAIN]["cost_sensors"][
self._flow[self._adapter.entity_energy_key]
] = self.entity_id
@callback
def async_state_changed_listener(*_: Any) -> None:
self._update_cost()
self.async_write_ha_state()
self.async_on_remove(
async_track_state_change_event(
self.hass,
cast(str, self._flow[self._adapter.entity_energy_key]),
async_state_changed_listener,
)
)
async def async_will_remove_from_hass(self) -> None:
self.hass.data[DOMAIN]["cost_sensors"].pop(
self._flow[self._adapter.entity_energy_key]
)
await super().async_will_remove_from_hass()
@callback
def update_config(self, flow: dict) -> None:
self._flow = flow
@property
def unit_of_measurement(self) -> str | None:
return self.hass.config.currency
| true
| true
|
1c42ec42c6f4857d394d3710fd38afd08eee5376
| 452
|
py
|
Python
|
transitland/errors.py
|
transit-land/onestop-id-python-client
|
d03d8759d0758803519c51c6970213946a4078d4
|
[
"MIT"
] | null | null | null |
transitland/errors.py
|
transit-land/onestop-id-python-client
|
d03d8759d0758803519c51c6970213946a4078d4
|
[
"MIT"
] | null | null | null |
transitland/errors.py
|
transit-land/onestop-id-python-client
|
d03d8759d0758803519c51c6970213946a4078d4
|
[
"MIT"
] | null | null | null |
##### Exceptions #####
class ExistingIdentifierError(KeyError):
pass
class NoPointsError(ValueError):
pass
class InvalidFeedRegistryError(ValueError):
pass
class InvalidChecksumError(ValueError):
pass
class DatastoreError(Exception):
def __init__(self, message, response_code=None, response_body=None):
super(DatastoreError, self).__init__(message)
self.response_code = response_code
self.response_body = response_body
| 22.6
| 70
| 0.765487
|
class InvalidFeedRegistryError(ValueError):
pass
class InvalidChecksumError(ValueError):
pass
class DatastoreError(Exception):
def __init__(self, message, response_code=None, response_body=None):
super(DatastoreError, self).__init__(message)
self.response_code = response_code
self.response_body = response_body
| true
| true
|
1c42ec6d739f7de77d8c8d27b1b8559886a3439c
| 3,487
|
py
|
Python
|
builder/implicit_ratings_calculator.py
|
lncohn/practical_recommender_systems
|
118f791b224b3f10a8dcddf93d10eff1dea5cbde
|
[
"MIT"
] | null | null | null |
builder/implicit_ratings_calculator.py
|
lncohn/practical_recommender_systems
|
118f791b224b3f10a8dcddf93d10eff1dea5cbde
|
[
"MIT"
] | null | null | null |
builder/implicit_ratings_calculator.py
|
lncohn/practical_recommender_systems
|
118f791b224b3f10a8dcddf93d10eff1dea5cbde
|
[
"MIT"
] | null | null | null |
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "prs_project.settings")
import django
django.setup()
from django.db.models import Count
import datetime
from datetime import date, timedelta
from collections import defaultdict
from collector.models import Log
from analytics.models import Rating
w1 = 100
w2 = 50
w3 = 15
def calculate_decay(age_in_days):
return 1/age_in_days
def query_log_for_users():
"""
Equivalent to following sql:
select distinct(user_id)
from collector_log log
"""
return Log.objects.values('user_id').distinct()
def query_log_data_for_user(userid):
"""
Equivalent to following sql:
SELECT *
FROM collector_log log
WHERE user_id = {}
"""
return Log.objects.filter(user_id=userid)
def query_aggregated_log_data_for_user(userid):
user_data = Log.objects.filter(user_id = userid).values('user_id',
'content_id',
'event').annotate(count=Count('created'))
return user_data
def calculate_implicit_ratings_w_timedecay(user_id):
data = query_log_data_for_user(user_id)
weights = {'buy': w1, 'moredetails': w2, 'details': w3 }
ratings = dict()
for entry in data:
movie_id = entry.movie_id
event_type = entry.event
if movie_id in ratings:
age = (date.today() - entry.created) // timedelta(days=365.2425)
decay = calculate_decay(age)
ratings[movie_id] += weights[event_type]*decay
return ratings
def calculate_implicit_ratings_for_user(user_id):
data = query_aggregated_log_data_for_user(user_id)
agg_data = dict()
max_rating = 0
for row in data:
content_id = str(row['content_id'])
if content_id not in agg_data .keys():
agg_data[content_id] = defaultdict(int)
agg_data[content_id][row['event']] = row['count']
ratings = dict()
for k, v in agg_data .items():
rating = w1 * v['buy'] + w2 * v['details'] + w3 * v['moredetails']
max_rating = max(max_rating, rating)
ratings[k] = rating
for content_id in ratings.keys():
ratings[content_id] = 10 * ratings[content_id] / max_rating
return ratings
def save_ratings(ratings, user_id, type):
print("saving ratings for {}".format(user_id))
i = 0
for content_id, rating in ratings.items():
if rating > 0:
Rating(
user_id=user_id,
movie_id=str(content_id),
rating=rating,
rating_timestamp=datetime.datetime.now(),
type=type
).save()
print ('{} {}'.format(user_id, str(content_id)))
i += 1
if i == 100:
print('.', end="")
i = 0
def calculate_ratings_with_timedecay():
for user in query_log_for_users():
userid = user['user_id']
ratings = calculate_implicit_ratings_w_timedecay(userid)
save_ratings(ratings, userid, 'implicit_w')
def calculate_ratings():
rows = query_log_for_users()
for user in rows:
userid = user['user_id']
ratings = calculate_implicit_ratings_for_user(userid)
save_ratings(ratings, userid, 'implicit')
if __name__ == '__main__':
print("Calculating implicit ratings...")
Rating.objects.filter(type='implicit').delete()
calculate_ratings()
| 23.092715
| 101
| 0.619444
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "prs_project.settings")
import django
django.setup()
from django.db.models import Count
import datetime
from datetime import date, timedelta
from collections import defaultdict
from collector.models import Log
from analytics.models import Rating
w1 = 100
w2 = 50
w3 = 15
def calculate_decay(age_in_days):
return 1/age_in_days
def query_log_for_users():
return Log.objects.values('user_id').distinct()
def query_log_data_for_user(userid):
return Log.objects.filter(user_id=userid)
def query_aggregated_log_data_for_user(userid):
user_data = Log.objects.filter(user_id = userid).values('user_id',
'content_id',
'event').annotate(count=Count('created'))
return user_data
def calculate_implicit_ratings_w_timedecay(user_id):
data = query_log_data_for_user(user_id)
weights = {'buy': w1, 'moredetails': w2, 'details': w3 }
ratings = dict()
for entry in data:
movie_id = entry.movie_id
event_type = entry.event
if movie_id in ratings:
age = (date.today() - entry.created) // timedelta(days=365.2425)
decay = calculate_decay(age)
ratings[movie_id] += weights[event_type]*decay
return ratings
def calculate_implicit_ratings_for_user(user_id):
data = query_aggregated_log_data_for_user(user_id)
agg_data = dict()
max_rating = 0
for row in data:
content_id = str(row['content_id'])
if content_id not in agg_data .keys():
agg_data[content_id] = defaultdict(int)
agg_data[content_id][row['event']] = row['count']
ratings = dict()
for k, v in agg_data .items():
rating = w1 * v['buy'] + w2 * v['details'] + w3 * v['moredetails']
max_rating = max(max_rating, rating)
ratings[k] = rating
for content_id in ratings.keys():
ratings[content_id] = 10 * ratings[content_id] / max_rating
return ratings
def save_ratings(ratings, user_id, type):
print("saving ratings for {}".format(user_id))
i = 0
for content_id, rating in ratings.items():
if rating > 0:
Rating(
user_id=user_id,
movie_id=str(content_id),
rating=rating,
rating_timestamp=datetime.datetime.now(),
type=type
).save()
print ('{} {}'.format(user_id, str(content_id)))
i += 1
if i == 100:
print('.', end="")
i = 0
def calculate_ratings_with_timedecay():
for user in query_log_for_users():
userid = user['user_id']
ratings = calculate_implicit_ratings_w_timedecay(userid)
save_ratings(ratings, userid, 'implicit_w')
def calculate_ratings():
rows = query_log_for_users()
for user in rows:
userid = user['user_id']
ratings = calculate_implicit_ratings_for_user(userid)
save_ratings(ratings, userid, 'implicit')
if __name__ == '__main__':
print("Calculating implicit ratings...")
Rating.objects.filter(type='implicit').delete()
calculate_ratings()
| true
| true
|
1c42ec954cada986b5ee4960aedaeb76e474d17a
| 5,219
|
py
|
Python
|
nemo/collections/tts/parts/talknet.py
|
ParikhKadam/NeMo
|
ee11f7c4666d410d91f9da33c61f4819ea625013
|
[
"Apache-2.0"
] | 1
|
2020-11-05T09:39:59.000Z
|
2020-11-05T09:39:59.000Z
|
nemo/collections/tts/parts/talknet.py
|
ParikhKadam/NeMo
|
ee11f7c4666d410d91f9da33c61f4819ea625013
|
[
"Apache-2.0"
] | 1
|
2020-06-11T00:54:42.000Z
|
2020-06-11T00:54:42.000Z
|
nemo/collections/tts/parts/talknet.py
|
ParikhKadam/NeMo
|
ee11f7c4666d410d91f9da33c61f4819ea625013
|
[
"Apache-2.0"
] | 3
|
2020-03-10T05:10:07.000Z
|
2020-12-08T01:33:35.000Z
|
# Copyright 2020 NVIDIA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Inspired by: https://github.com/r9y9/wavenet_vocoder
# Copyright (c) 2017: Ryuichi Yamamoto.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
import numpy as np
import torch
from torch.nn import functional as F
def dmld_loss(y_pred, y_true, num_classes):
"""Discretized mixture of logistic distributions loss
https://github.com/r9y9/wavenet_vocoder/blob/master/wavenet_vocoder/mixture.py
https://arxiv.org/pdf/1701.05517.pdf
Args:
y_pred (Tensor): Predicted output (B x T x C)
y_true (Tensor): Target (B x T).
num_classes (int): Number of classes
Returns
Tensor: loss
"""
def log_sum_exp(x):
""" numerically stable log_sum_exp implementation that prevents overflow """
axis = len(x.size()) - 1
m, _ = torch.max(x, dim=axis)
m2, _ = torch.max(x, dim=axis, keepdim=True)
return m + torch.log(torch.sum(torch.exp(x - m2), dim=axis))
z_shape = y_pred.size(-1)
assert z_shape % 3 == 0
nr_mix = z_shape // 3
# unpack parameters. (B, T, num_mixtures) x 3
logit_probs = y_pred[:, :, :nr_mix]
means = y_pred[:, :, nr_mix : 2 * nr_mix]
log_scales = torch.clamp(y_pred[:, :, 2 * nr_mix : 3 * nr_mix], min=-7.0)
# B x T -> B x T x num_mixtures
y_true = y_true.unsqueeze(-1).expand_as(means)
centered_y = y_true - means
inv_stdv = torch.exp(-log_scales)
plus_in = inv_stdv * (centered_y + 1.0 / (num_classes - 1))
cdf_plus = torch.sigmoid(plus_in)
min_in = inv_stdv * (centered_y - 1.0 / (num_classes - 1))
cdf_min = torch.sigmoid(min_in)
# log probability for edge case of 0 (before scaling)
# equivalent: torch.log(torch.sigmoid(plus_in))
log_cdf_plus = plus_in - F.softplus(plus_in)
# log probability for edge case of 255 (before scaling)
# equivalent: (1 - torch.sigmoid(min_in)).log()
log_one_minus_cdf_min = -F.softplus(min_in)
# probability for all other cases
cdf_delta = cdf_plus - cdf_min
mid_in = inv_stdv * centered_y
# log probability in the center of the bin, to be used in extreme cases
# (not actually used in our code)
log_pdf_mid = mid_in - log_scales - 2.0 * F.softplus(mid_in)
inner_inner_cond = (cdf_delta > 1e-5).float()
# noinspection PyTypeChecker
inner_inner_out = inner_inner_cond * torch.log(torch.clamp(cdf_delta, min=1e-12)) + (1.0 - inner_inner_cond) * (
log_pdf_mid - np.log((num_classes - 1) / 2)
)
inner_cond = (y_true > 0.999).float()
inner_out = inner_cond * log_one_minus_cdf_min + (1.0 - inner_cond) * inner_inner_out
cond = (y_true < -0.999).float()
log_probs = cond * log_cdf_plus + (1.0 - cond) * inner_out
log_probs = log_probs + F.log_softmax(logit_probs, -1)
return -log_sum_exp(log_probs)
def dmld_sample(y):
"""Sample from discretized mixture of logistic distributions.
Args:
y (Tensor): B x T x C
Returns:
Tensor: sample in range of [-1.0, 1.0].
"""
z_shape = y.size(-1)
assert z_shape % 3 == 0
nr_mix = z_shape // 3
# B x T x C
logit_probs = y[:, :, :nr_mix]
# sample mixture indicator from softmax
temp = logit_probs.data.new(logit_probs.size()).uniform_(1e-5, 1.0 - 1e-5)
temp = logit_probs.data - torch.log(-torch.log(temp))
_, argmax = temp.max(dim=-1)
# (B, T) -> (B, T, nr_mix)
one_hot = torch.zeros(argmax.size() + (nr_mix,), dtype=torch.float, device=argmax.device)
one_hot.scatter_(len(argmax.size()), argmax.unsqueeze(-1), 1.0)
# select logistic parameters
means = torch.sum(y[:, :, nr_mix : 2 * nr_mix] * one_hot, dim=-1)
log_scales = torch.sum(y[:, :, 2 * nr_mix : 3 * nr_mix] * one_hot, dim=-1)
log_scales = torch.clamp(log_scales, min=-7.0)
# sample from logistic & clip to interval
# we don't actually round to the nearest 8bit value when sampling
u = means.data.new(means.size()).uniform_(1e-5, 1.0 - 1e-5)
x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1.0 - u))
x = torch.clamp(torch.clamp(x, min=-1.0), max=1.0)
return x
| 36.496503
| 117
| 0.669477
|
import numpy as np
import torch
from torch.nn import functional as F
def dmld_loss(y_pred, y_true, num_classes):
def log_sum_exp(x):
axis = len(x.size()) - 1
m, _ = torch.max(x, dim=axis)
m2, _ = torch.max(x, dim=axis, keepdim=True)
return m + torch.log(torch.sum(torch.exp(x - m2), dim=axis))
z_shape = y_pred.size(-1)
assert z_shape % 3 == 0
nr_mix = z_shape // 3
logit_probs = y_pred[:, :, :nr_mix]
means = y_pred[:, :, nr_mix : 2 * nr_mix]
log_scales = torch.clamp(y_pred[:, :, 2 * nr_mix : 3 * nr_mix], min=-7.0)
y_true = y_true.unsqueeze(-1).expand_as(means)
centered_y = y_true - means
inv_stdv = torch.exp(-log_scales)
plus_in = inv_stdv * (centered_y + 1.0 / (num_classes - 1))
cdf_plus = torch.sigmoid(plus_in)
min_in = inv_stdv * (centered_y - 1.0 / (num_classes - 1))
cdf_min = torch.sigmoid(min_in)
log_cdf_plus = plus_in - F.softplus(plus_in)
log_one_minus_cdf_min = -F.softplus(min_in)
cdf_delta = cdf_plus - cdf_min
mid_in = inv_stdv * centered_y
log_pdf_mid = mid_in - log_scales - 2.0 * F.softplus(mid_in)
inner_inner_cond = (cdf_delta > 1e-5).float()
inner_inner_out = inner_inner_cond * torch.log(torch.clamp(cdf_delta, min=1e-12)) + (1.0 - inner_inner_cond) * (
log_pdf_mid - np.log((num_classes - 1) / 2)
)
inner_cond = (y_true > 0.999).float()
inner_out = inner_cond * log_one_minus_cdf_min + (1.0 - inner_cond) * inner_inner_out
cond = (y_true < -0.999).float()
log_probs = cond * log_cdf_plus + (1.0 - cond) * inner_out
log_probs = log_probs + F.log_softmax(logit_probs, -1)
return -log_sum_exp(log_probs)
def dmld_sample(y):
z_shape = y.size(-1)
assert z_shape % 3 == 0
nr_mix = z_shape // 3
logit_probs = y[:, :, :nr_mix]
temp = logit_probs.data.new(logit_probs.size()).uniform_(1e-5, 1.0 - 1e-5)
temp = logit_probs.data - torch.log(-torch.log(temp))
_, argmax = temp.max(dim=-1)
one_hot = torch.zeros(argmax.size() + (nr_mix,), dtype=torch.float, device=argmax.device)
one_hot.scatter_(len(argmax.size()), argmax.unsqueeze(-1), 1.0)
means = torch.sum(y[:, :, nr_mix : 2 * nr_mix] * one_hot, dim=-1)
log_scales = torch.sum(y[:, :, 2 * nr_mix : 3 * nr_mix] * one_hot, dim=-1)
log_scales = torch.clamp(log_scales, min=-7.0)
u = means.data.new(means.size()).uniform_(1e-5, 1.0 - 1e-5)
x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1.0 - u))
x = torch.clamp(torch.clamp(x, min=-1.0), max=1.0)
return x
| true
| true
|
1c42eda9d5fcf36fe53ac3ad8ac5d39f16da82fb
| 14,976
|
py
|
Python
|
layers/box_utils.py
|
zhaozhongch/yolact_ros
|
ee3e086626f49a81ffd06b2740dae849552151cb
|
[
"MIT"
] | 1
|
2022-02-06T05:11:24.000Z
|
2022-02-06T05:11:24.000Z
|
layers/box_utils.py
|
zhaozhongch/yolact_ros
|
ee3e086626f49a81ffd06b2740dae849552151cb
|
[
"MIT"
] | null | null | null |
layers/box_utils.py
|
zhaozhongch/yolact_ros
|
ee3e086626f49a81ffd06b2740dae849552151cb
|
[
"MIT"
] | 1
|
2022-02-06T05:11:26.000Z
|
2022-02-06T05:11:26.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import torch
from utils import timer
from data import cfg
@torch.jit.script
def point_form(boxes):
""" Convert prior_boxes to (xmin, ymin, xmax, ymax)
representation for comparison to point form ground truth data.
Args:
boxes: (tensor) center-size default boxes from priorbox layers.
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin
boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax
@torch.jit.script
def center_size(boxes):
""" Convert prior_boxes to (cx, cy, w, h)
representation for comparison to center-size form ground truth data.
Args:
boxes: (tensor) point_form boxes
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return torch.cat(( (boxes[:, 2:] + boxes[:, :2])/2, # cx, cy
boxes[:, 2:] - boxes[:, :2] ), 1) # w, h
@torch.jit.script
def intersect(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [n,A,4].
box_b: (tensor) bounding boxes, Shape: [n,B,4].
Return:
(tensor) intersection area, Shape: [n,A,B].
"""
n = box_a.size(0)
A = box_a.size(1)
B = box_b.size(1)
max_xy = torch.min(box_a[:, :, 2:].unsqueeze(2).expand(n, A, B, 2),
box_b[:, :, 2:].unsqueeze(1).expand(n, A, B, 2))
min_xy = torch.max(box_a[:, :, :2].unsqueeze(2).expand(n, A, B, 2),
box_b[:, :, :2].unsqueeze(1).expand(n, A, B, 2))
return torch.clamp(max_xy - min_xy, min=0).prod(3) # inter
def jaccard(box_a, box_b, iscrowd:bool=False):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes. Here we operate on
ground truth boxes and default boxes. If iscrowd=True, put the crowd in box_b.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
Return:
jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
"""
use_batch = True
if box_a.dim() == 2:
use_batch = False
box_a = box_a[None, ...]
box_b = box_b[None, ...]
inter = intersect(box_a, box_b)
area_a = ((box_a[:, :, 2]-box_a[:, :, 0]) *
(box_a[:, :, 3]-box_a[:, :, 1])).unsqueeze(2).expand_as(inter) # [A,B]
area_b = ((box_b[:, :, 2]-box_b[:, :, 0]) *
(box_b[:, :, 3]-box_b[:, :, 1])).unsqueeze(1).expand_as(inter) # [A,B]
union = area_a + area_b - inter
out = inter / area_a if iscrowd else inter / union
return out if use_batch else out.squeeze(0)
def elemwise_box_iou(box_a, box_b):
""" Does the same as above but instead of pairwise, elementwise along the inner dimension. """
max_xy = torch.min(box_a[:, 2:], box_b[:, 2:])
min_xy = torch.max(box_a[:, :2], box_b[:, :2])
inter = torch.clamp((max_xy - min_xy), min=0)
inter = inter[:, 0] * inter[:, 1]
area_a = (box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1])
area_b = (box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1])
union = area_a + area_b - inter
union = torch.clamp(union, min=0.1)
# Return value is [n] for inputs [n, 4]
return torch.clamp(inter / union, max=1)
def mask_iou(masks_a, masks_b, iscrowd=False):
"""
Computes the pariwise mask IoU between two sets of masks of size [a, h, w] and [b, h, w].
The output is of size [a, b].
Wait I thought this was "box_utils", why am I putting this in here?
"""
masks_a = masks_a.view(masks_a.size(0), -1)
masks_b = masks_b.view(masks_b.size(0), -1)
intersection = masks_a @ masks_b.t()
area_a = masks_a.sum(dim=1).unsqueeze(1)
area_b = masks_b.sum(dim=1).unsqueeze(0)
return intersection / (area_a + area_b - intersection) if not iscrowd else intersection / area_a
def elemwise_mask_iou(masks_a, masks_b):
""" Does the same as above but instead of pairwise, elementwise along the outer dimension. """
masks_a = masks_a.view(-1, masks_a.size(-1))
masks_b = masks_b.view(-1, masks_b.size(-1))
intersection = (masks_a * masks_b).sum(dim=0)
area_a = masks_a.sum(dim=0)
area_b = masks_b.sum(dim=0)
# Return value is [n] for inputs [h, w, n]
return torch.clamp(intersection / torch.clamp(area_a + area_b - intersection, min=0.1), max=1)
def change(gt, priors):
"""
Compute the d_change metric proposed in Box2Pix:
https://lmb.informatik.uni-freiburg.de/Publications/2018/UB18/paper-box2pix.pdf
Input should be in point form (xmin, ymin, xmax, ymax).
Output is of shape [num_gt, num_priors]
Note this returns -change so it can be a drop in replacement for
"""
num_priors = priors.size(0)
num_gt = gt.size(0)
gt_w = (gt[:, 2] - gt[:, 0])[:, None].expand(num_gt, num_priors)
gt_h = (gt[:, 3] - gt[:, 1])[:, None].expand(num_gt, num_priors)
gt_mat = gt[:, None, :].expand(num_gt, num_priors, 4)
pr_mat = priors[None, :, :].expand(num_gt, num_priors, 4)
diff = gt_mat - pr_mat
diff[:, :, 0] /= gt_w
diff[:, :, 2] /= gt_w
diff[:, :, 1] /= gt_h
diff[:, :, 3] /= gt_h
return -torch.sqrt( (diff ** 2).sum(dim=2) )
def match(pos_thresh, neg_thresh, truths, priors, labels, crowd_boxes, loc_t, conf_t, idx_t, idx, loc_data):
"""Match each prior box with the ground truth box of the highest jaccard
overlap, encode the bounding boxes, then return the matched indices
corresponding to both confidence and location preds.
Args:
pos_thresh: (float) IoU > pos_thresh ==> positive.
neg_thresh: (float) IoU < neg_thresh ==> negative.
truths: (tensor) Ground truth boxes, Shape: [num_obj, num_priors].
priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].
labels: (tensor) All the class labels for the image, Shape: [num_obj].
crowd_boxes: (tensor) All the crowd box annotations or None if there are none.
loc_t: (tensor) Tensor to be filled w/ endcoded location targets.
conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds. Note: -1 means neutral.
idx_t: (tensor) Tensor to be filled w/ the index of the matched gt box for each prior.
idx: (int) current batch index.
loc_data: (tensor) The predicted bbox regression coordinates for this batch.
Return:
The matched indices corresponding to 1)location and 2)confidence preds.
"""
decoded_priors = decode(loc_data, priors, cfg.use_yolo_regressors) if cfg.use_prediction_matching else point_form(priors)
# Size [num_objects, num_priors]
overlaps = jaccard(truths, decoded_priors) if not cfg.use_change_matching else change(truths, decoded_priors)
# Size [num_priors] best ground truth for each prior
best_truth_overlap, best_truth_idx = overlaps.max(0)
# We want to ensure that each gt gets used at least once so that we don't
# waste any training data. In order to do that, find the max overlap anchor
# with each gt, and force that anchor to use that gt.
for _ in range(overlaps.size(0)):
# Find j, the gt with the highest overlap with a prior
# In effect, this will loop through overlaps.size(0) in a "smart" order,
# always choosing the highest overlap first.
best_prior_overlap, best_prior_idx = overlaps.max(1)
j = best_prior_overlap.max(0)[1]
# Find i, the highest overlap anchor with this gt
i = best_prior_idx[j]
# Set all other overlaps with i to be -1 so that no other gt uses it
overlaps[:, i] = -1
# Set all other overlaps with j to be -1 so that this loop never uses j again
overlaps[j, :] = -1
# Overwrite i's score to be 2 so it doesn't get thresholded ever
best_truth_overlap[i] = 2
# Set the gt to be used for i to be j, overwriting whatever was there
best_truth_idx[i] = j
matches = truths[best_truth_idx] # Shape: [num_priors,4]
conf = labels[best_truth_idx] + 1 # Shape: [num_priors]
conf[best_truth_overlap < pos_thresh] = -1 # label as neutral
conf[best_truth_overlap < neg_thresh] = 0 # label as background
# Deal with crowd annotations for COCO
if crowd_boxes is not None and cfg.crowd_iou_threshold < 1:
# Size [num_priors, num_crowds]
crowd_overlaps = jaccard(decoded_priors, crowd_boxes, iscrowd=True)
# Size [num_priors]
best_crowd_overlap, best_crowd_idx = crowd_overlaps.max(1)
# Set non-positives with crowd iou of over the threshold to be neutral.
conf[(conf <= 0) & (best_crowd_overlap > cfg.crowd_iou_threshold)] = -1
loc = encode(matches, priors, cfg.use_yolo_regressors)
loc_t[idx] = loc # [num_priors,4] encoded offsets to learn
conf_t[idx] = conf # [num_priors] top class label for each prior
idx_t[idx] = best_truth_idx # [num_priors] indices for lookup
@torch.jit.script
def encode(matched, priors, use_yolo_regressors:bool=False):
"""
Encode bboxes matched with each prior into the format
produced by the network. See decode for more details on
this format. Note that encode(decode(x, p), p) = x.
Args:
- matched: A tensor of bboxes in point form with shape [num_priors, 4]
- priors: The tensor of all priors with shape [num_priors, 4]
Return: A tensor with encoded relative coordinates in the format
outputted by the network (see decode). Size: [num_priors, 4]
"""
if use_yolo_regressors:
# Exactly the reverse of what we did in decode
# In fact encode(decode(x, p), p) should be x
boxes = center_size(matched)
loc = torch.cat((
boxes[:, :2] - priors[:, :2],
torch.log(boxes[:, 2:] / priors[:, 2:])
), 1)
else:
variances = [0.1, 0.2]
# dist b/t match center and prior's center
g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
# encode variance
g_cxcy /= (variances[0] * priors[:, 2:])
# match wh / prior wh
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
g_wh = torch.log(g_wh) / variances[1]
# return target for smooth_l1_loss
loc = torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
return loc
@torch.jit.script
def decode(loc, priors, use_yolo_regressors:bool=False):
"""
Decode predicted bbox coordinates using the same scheme
employed by Yolov2: https://arxiv.org/pdf/1612.08242.pdf
b_x = (sigmoid(pred_x) - .5) / conv_w + prior_x
b_y = (sigmoid(pred_y) - .5) / conv_h + prior_y
b_w = prior_w * exp(loc_w)
b_h = prior_h * exp(loc_h)
Note that loc is inputed as [(s(x)-.5)/conv_w, (s(y)-.5)/conv_h, w, h]
while priors are inputed as [x, y, w, h] where each coordinate
is relative to size of the image (even sigmoid(x)). We do this
in the network by dividing by the 'cell size', which is just
the size of the convouts.
Also note that prior_x and prior_y are center coordinates which
is why we have to subtract .5 from sigmoid(pred_x and pred_y).
Args:
- loc: The predicted bounding boxes of size [num_priors, 4]
- priors: The priorbox coords with size [num_priors, 4]
Returns: A tensor of decoded relative coordinates in point form
form with size [num_priors, 4]
"""
if use_yolo_regressors:
# Decoded boxes in center-size notation
boxes = torch.cat((
loc[:, :2] + priors[:, :2],
priors[:, 2:] * torch.exp(loc[:, 2:])
), 1)
boxes = point_form(boxes)
else:
variances = [0.1, 0.2]
boxes = torch.cat((
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def log_sum_exp(x):
"""Utility function for computing log_sum_exp while determining
This will be used to determine unaveraged confidence loss across
all examples in a batch.
Args:
x (Variable(tensor)): conf_preds from conf layers
"""
x_max = x.data.max()
return torch.log(torch.sum(torch.exp(x-x_max), 1)) + x_max
@torch.jit.script
def sanitize_coordinates(_x1, _x2, img_size:int, padding:int=0, cast:bool=True):
"""
Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0, and x2 <= image_size.
Also converts from relative to absolute coordinates and casts the results to long tensors.
If cast is false, the result won't be cast to longs.
Warning: this does things in-place behind the scenes so copy if necessary.
"""
_x1 = _x1 * img_size
_x2 = _x2 * img_size
if cast:
_x1 = _x1.long()
_x2 = _x2.long()
x1 = torch.min(_x1, _x2)
x2 = torch.max(_x1, _x2)
x1 = torch.clamp(x1-padding, min=0)
x2 = torch.clamp(x2+padding, max=img_size)
return x1, x2
@torch.jit.script
def crop(masks, boxes, padding:int=1):
"""
"Crop" predicted masks by zeroing out everything not in the predicted bbox.
Vectorized by Chong (thanks Chong).
Args:
- masks should be a size [h, w, n] tensor of masks
- boxes should be a size [n, 4] tensor of bbox coords in relative point form
"""
h, w, n = masks.size()
x1, x2 = sanitize_coordinates(boxes[:, 0], boxes[:, 2], w, padding, cast=False)
y1, y2 = sanitize_coordinates(boxes[:, 1], boxes[:, 3], h, padding, cast=False)
rows = torch.arange(w, device=masks.device, dtype=x1.dtype).view(1, -1, 1).expand(h, w, n)
cols = torch.arange(h, device=masks.device, dtype=x1.dtype).view(-1, 1, 1).expand(h, w, n)
masks_left = rows >= x1.view(1, 1, -1)
masks_right = rows < x2.view(1, 1, -1)
masks_up = cols >= y1.view(1, 1, -1)
masks_down = cols < y2.view(1, 1, -1)
crop_mask = masks_left * masks_right * masks_up * masks_down
return masks * crop_mask.float()
def index2d(src, idx):
"""
Indexes a tensor by a 2d index.
In effect, this does
out[i, j] = src[i, idx[i, j]]
Both src and idx should have the same size.
"""
offs = torch.arange(idx.size(0), device=idx.device)[:, None].expand_as(idx)
idx = idx + offs * idx.size(1)
return src.view(-1)[idx.view(-1)].view(idx.size())
| 38.204082
| 125
| 0.615585
|
import torch
from utils import timer
from data import cfg
@torch.jit.script
def point_form(boxes):
return torch.cat((boxes[:, :2] - boxes[:, 2:]/2,
boxes[:, :2] + boxes[:, 2:]/2), 1)
@torch.jit.script
def center_size(boxes):
return torch.cat(( (boxes[:, 2:] + boxes[:, :2])/2,
boxes[:, 2:] - boxes[:, :2] ), 1)
@torch.jit.script
def intersect(box_a, box_b):
n = box_a.size(0)
A = box_a.size(1)
B = box_b.size(1)
max_xy = torch.min(box_a[:, :, 2:].unsqueeze(2).expand(n, A, B, 2),
box_b[:, :, 2:].unsqueeze(1).expand(n, A, B, 2))
min_xy = torch.max(box_a[:, :, :2].unsqueeze(2).expand(n, A, B, 2),
box_b[:, :, :2].unsqueeze(1).expand(n, A, B, 2))
return torch.clamp(max_xy - min_xy, min=0).prod(3)
def jaccard(box_a, box_b, iscrowd:bool=False):
use_batch = True
if box_a.dim() == 2:
use_batch = False
box_a = box_a[None, ...]
box_b = box_b[None, ...]
inter = intersect(box_a, box_b)
area_a = ((box_a[:, :, 2]-box_a[:, :, 0]) *
(box_a[:, :, 3]-box_a[:, :, 1])).unsqueeze(2).expand_as(inter)
area_b = ((box_b[:, :, 2]-box_b[:, :, 0]) *
(box_b[:, :, 3]-box_b[:, :, 1])).unsqueeze(1).expand_as(inter)
union = area_a + area_b - inter
out = inter / area_a if iscrowd else inter / union
return out if use_batch else out.squeeze(0)
def elemwise_box_iou(box_a, box_b):
max_xy = torch.min(box_a[:, 2:], box_b[:, 2:])
min_xy = torch.max(box_a[:, :2], box_b[:, :2])
inter = torch.clamp((max_xy - min_xy), min=0)
inter = inter[:, 0] * inter[:, 1]
area_a = (box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1])
area_b = (box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1])
union = area_a + area_b - inter
union = torch.clamp(union, min=0.1)
return torch.clamp(inter / union, max=1)
def mask_iou(masks_a, masks_b, iscrowd=False):
masks_a = masks_a.view(masks_a.size(0), -1)
masks_b = masks_b.view(masks_b.size(0), -1)
intersection = masks_a @ masks_b.t()
area_a = masks_a.sum(dim=1).unsqueeze(1)
area_b = masks_b.sum(dim=1).unsqueeze(0)
return intersection / (area_a + area_b - intersection) if not iscrowd else intersection / area_a
def elemwise_mask_iou(masks_a, masks_b):
masks_a = masks_a.view(-1, masks_a.size(-1))
masks_b = masks_b.view(-1, masks_b.size(-1))
intersection = (masks_a * masks_b).sum(dim=0)
area_a = masks_a.sum(dim=0)
area_b = masks_b.sum(dim=0)
return torch.clamp(intersection / torch.clamp(area_a + area_b - intersection, min=0.1), max=1)
def change(gt, priors):
num_priors = priors.size(0)
num_gt = gt.size(0)
gt_w = (gt[:, 2] - gt[:, 0])[:, None].expand(num_gt, num_priors)
gt_h = (gt[:, 3] - gt[:, 1])[:, None].expand(num_gt, num_priors)
gt_mat = gt[:, None, :].expand(num_gt, num_priors, 4)
pr_mat = priors[None, :, :].expand(num_gt, num_priors, 4)
diff = gt_mat - pr_mat
diff[:, :, 0] /= gt_w
diff[:, :, 2] /= gt_w
diff[:, :, 1] /= gt_h
diff[:, :, 3] /= gt_h
return -torch.sqrt( (diff ** 2).sum(dim=2) )
def match(pos_thresh, neg_thresh, truths, priors, labels, crowd_boxes, loc_t, conf_t, idx_t, idx, loc_data):
decoded_priors = decode(loc_data, priors, cfg.use_yolo_regressors) if cfg.use_prediction_matching else point_form(priors)
overlaps = jaccard(truths, decoded_priors) if not cfg.use_change_matching else change(truths, decoded_priors)
best_truth_overlap, best_truth_idx = overlaps.max(0)
# waste any training data. In order to do that, find the max overlap anchor
# with each gt, and force that anchor to use that gt.
for _ in range(overlaps.size(0)):
# Find j, the gt with the highest overlap with a prior
# In effect, this will loop through overlaps.size(0) in a "smart" order,
# always choosing the highest overlap first.
best_prior_overlap, best_prior_idx = overlaps.max(1)
j = best_prior_overlap.max(0)[1]
# Find i, the highest overlap anchor with this gt
i = best_prior_idx[j]
# Set all other overlaps with i to be -1 so that no other gt uses it
overlaps[:, i] = -1
# Set all other overlaps with j to be -1 so that this loop never uses j again
overlaps[j, :] = -1
# Overwrite i's score to be 2 so it doesn't get thresholded ever
best_truth_overlap[i] = 2
# Set the gt to be used for i to be j, overwriting whatever was there
best_truth_idx[i] = j
matches = truths[best_truth_idx] # Shape: [num_priors,4]
conf = labels[best_truth_idx] + 1 # Shape: [num_priors]
conf[best_truth_overlap < pos_thresh] = -1 # label as neutral
conf[best_truth_overlap < neg_thresh] = 0 # label as background
# Deal with crowd annotations for COCO
if crowd_boxes is not None and cfg.crowd_iou_threshold < 1:
# Size [num_priors, num_crowds]
crowd_overlaps = jaccard(decoded_priors, crowd_boxes, iscrowd=True)
# Size [num_priors]
best_crowd_overlap, best_crowd_idx = crowd_overlaps.max(1)
# Set non-positives with crowd iou of over the threshold to be neutral.
conf[(conf <= 0) & (best_crowd_overlap > cfg.crowd_iou_threshold)] = -1
loc = encode(matches, priors, cfg.use_yolo_regressors)
loc_t[idx] = loc # [num_priors,4] encoded offsets to learn
conf_t[idx] = conf # [num_priors] top class label for each prior
idx_t[idx] = best_truth_idx # [num_priors] indices for lookup
@torch.jit.script
def encode(matched, priors, use_yolo_regressors:bool=False):
if use_yolo_regressors:
# Exactly the reverse of what we did in decode
# In fact encode(decode(x, p), p) should be x
boxes = center_size(matched)
loc = torch.cat((
boxes[:, :2] - priors[:, :2],
torch.log(boxes[:, 2:] / priors[:, 2:])
), 1)
else:
variances = [0.1, 0.2]
# dist b/t match center and prior's center
g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
g_cxcy /= (variances[0] * priors[:, 2:])
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
g_wh = torch.log(g_wh) / variances[1]
loc = torch.cat([g_cxcy, g_wh], 1)
return loc
@torch.jit.script
def decode(loc, priors, use_yolo_regressors:bool=False):
if use_yolo_regressors:
boxes = torch.cat((
loc[:, :2] + priors[:, :2],
priors[:, 2:] * torch.exp(loc[:, 2:])
), 1)
boxes = point_form(boxes)
else:
variances = [0.1, 0.2]
boxes = torch.cat((
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def log_sum_exp(x):
x_max = x.data.max()
return torch.log(torch.sum(torch.exp(x-x_max), 1)) + x_max
@torch.jit.script
def sanitize_coordinates(_x1, _x2, img_size:int, padding:int=0, cast:bool=True):
_x1 = _x1 * img_size
_x2 = _x2 * img_size
if cast:
_x1 = _x1.long()
_x2 = _x2.long()
x1 = torch.min(_x1, _x2)
x2 = torch.max(_x1, _x2)
x1 = torch.clamp(x1-padding, min=0)
x2 = torch.clamp(x2+padding, max=img_size)
return x1, x2
@torch.jit.script
def crop(masks, boxes, padding:int=1):
h, w, n = masks.size()
x1, x2 = sanitize_coordinates(boxes[:, 0], boxes[:, 2], w, padding, cast=False)
y1, y2 = sanitize_coordinates(boxes[:, 1], boxes[:, 3], h, padding, cast=False)
rows = torch.arange(w, device=masks.device, dtype=x1.dtype).view(1, -1, 1).expand(h, w, n)
cols = torch.arange(h, device=masks.device, dtype=x1.dtype).view(-1, 1, 1).expand(h, w, n)
masks_left = rows >= x1.view(1, 1, -1)
masks_right = rows < x2.view(1, 1, -1)
masks_up = cols >= y1.view(1, 1, -1)
masks_down = cols < y2.view(1, 1, -1)
crop_mask = masks_left * masks_right * masks_up * masks_down
return masks * crop_mask.float()
def index2d(src, idx):
offs = torch.arange(idx.size(0), device=idx.device)[:, None].expand_as(idx)
idx = idx + offs * idx.size(1)
return src.view(-1)[idx.view(-1)].view(idx.size())
| true
| true
|
1c42edffb7139808c7e7bed7ee187abc5c29299d
| 3,558
|
py
|
Python
|
alipay/aop/api/domain/AlipayOpenPublicMessageGroupSendModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/AlipayOpenPublicMessageGroupSendModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/AlipayOpenPublicMessageGroupSendModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.Article import Article
from alipay.aop.api.domain.Image import Image
from alipay.aop.api.domain.Text import Text
class AlipayOpenPublicMessageGroupSendModel(object):
def __init__(self):
self._articles = None
self._group_id = None
self._image = None
self._msg_type = None
self._text = None
@property
def articles(self):
return self._articles
@articles.setter
def articles(self, value):
if isinstance(value, list):
self._articles = list()
for i in value:
if isinstance(i, Article):
self._articles.append(i)
else:
self._articles.append(Article.from_alipay_dict(i))
@property
def group_id(self):
return self._group_id
@group_id.setter
def group_id(self, value):
self._group_id = value
@property
def image(self):
return self._image
@image.setter
def image(self, value):
if isinstance(value, Image):
self._image = value
else:
self._image = Image.from_alipay_dict(value)
@property
def msg_type(self):
return self._msg_type
@msg_type.setter
def msg_type(self, value):
self._msg_type = value
@property
def text(self):
return self._text
@text.setter
def text(self, value):
if isinstance(value, Text):
self._text = value
else:
self._text = Text.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.articles:
if isinstance(self.articles, list):
for i in range(0, len(self.articles)):
element = self.articles[i]
if hasattr(element, 'to_alipay_dict'):
self.articles[i] = element.to_alipay_dict()
if hasattr(self.articles, 'to_alipay_dict'):
params['articles'] = self.articles.to_alipay_dict()
else:
params['articles'] = self.articles
if self.group_id:
if hasattr(self.group_id, 'to_alipay_dict'):
params['group_id'] = self.group_id.to_alipay_dict()
else:
params['group_id'] = self.group_id
if self.image:
if hasattr(self.image, 'to_alipay_dict'):
params['image'] = self.image.to_alipay_dict()
else:
params['image'] = self.image
if self.msg_type:
if hasattr(self.msg_type, 'to_alipay_dict'):
params['msg_type'] = self.msg_type.to_alipay_dict()
else:
params['msg_type'] = self.msg_type
if self.text:
if hasattr(self.text, 'to_alipay_dict'):
params['text'] = self.text.to_alipay_dict()
else:
params['text'] = self.text
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenPublicMessageGroupSendModel()
if 'articles' in d:
o.articles = d['articles']
if 'group_id' in d:
o.group_id = d['group_id']
if 'image' in d:
o.image = d['image']
if 'msg_type' in d:
o.msg_type = d['msg_type']
if 'text' in d:
o.text = d['text']
return o
| 29.404959
| 70
| 0.554244
|
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.Article import Article
from alipay.aop.api.domain.Image import Image
from alipay.aop.api.domain.Text import Text
class AlipayOpenPublicMessageGroupSendModel(object):
def __init__(self):
self._articles = None
self._group_id = None
self._image = None
self._msg_type = None
self._text = None
@property
def articles(self):
return self._articles
@articles.setter
def articles(self, value):
if isinstance(value, list):
self._articles = list()
for i in value:
if isinstance(i, Article):
self._articles.append(i)
else:
self._articles.append(Article.from_alipay_dict(i))
@property
def group_id(self):
return self._group_id
@group_id.setter
def group_id(self, value):
self._group_id = value
@property
def image(self):
return self._image
@image.setter
def image(self, value):
if isinstance(value, Image):
self._image = value
else:
self._image = Image.from_alipay_dict(value)
@property
def msg_type(self):
return self._msg_type
@msg_type.setter
def msg_type(self, value):
self._msg_type = value
@property
def text(self):
return self._text
@text.setter
def text(self, value):
if isinstance(value, Text):
self._text = value
else:
self._text = Text.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.articles:
if isinstance(self.articles, list):
for i in range(0, len(self.articles)):
element = self.articles[i]
if hasattr(element, 'to_alipay_dict'):
self.articles[i] = element.to_alipay_dict()
if hasattr(self.articles, 'to_alipay_dict'):
params['articles'] = self.articles.to_alipay_dict()
else:
params['articles'] = self.articles
if self.group_id:
if hasattr(self.group_id, 'to_alipay_dict'):
params['group_id'] = self.group_id.to_alipay_dict()
else:
params['group_id'] = self.group_id
if self.image:
if hasattr(self.image, 'to_alipay_dict'):
params['image'] = self.image.to_alipay_dict()
else:
params['image'] = self.image
if self.msg_type:
if hasattr(self.msg_type, 'to_alipay_dict'):
params['msg_type'] = self.msg_type.to_alipay_dict()
else:
params['msg_type'] = self.msg_type
if self.text:
if hasattr(self.text, 'to_alipay_dict'):
params['text'] = self.text.to_alipay_dict()
else:
params['text'] = self.text
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenPublicMessageGroupSendModel()
if 'articles' in d:
o.articles = d['articles']
if 'group_id' in d:
o.group_id = d['group_id']
if 'image' in d:
o.image = d['image']
if 'msg_type' in d:
o.msg_type = d['msg_type']
if 'text' in d:
o.text = d['text']
return o
| true
| true
|
1c42eef5e88d12088d2e2f4ab09315ae19042ecf
| 929
|
py
|
Python
|
index.py
|
EDAII/Lista1_Guilherme_Isaque
|
b7dc5b2cde48ed192b3b0c6592975324006c6a01
|
[
"MIT"
] | 1
|
2019-09-02T13:17:28.000Z
|
2019-09-02T13:17:28.000Z
|
index.py
|
EDAII/Lista1_Guilherme_Isaque
|
b7dc5b2cde48ed192b3b0c6592975324006c6a01
|
[
"MIT"
] | null | null | null |
index.py
|
EDAII/Lista1_Guilherme_Isaque
|
b7dc5b2cde48ed192b3b0c6592975324006c6a01
|
[
"MIT"
] | null | null | null |
import load_and_prepare_data as ld
import search_methods as sm
data = ld.load_and_prepare_data()
# print(data)
for i in data:
print(i)
print("digite o co_cnes do Centro médico")
value = int(input())
result_binary_search_iterative = sm.binary_search(data, value)
result_recursive_binary_search = sm.recursive_binary_search(
data, 0, len(data), value)
result_interpolation_search = sm.interpolation_search(data, value)
result_sequential_search = sm.sequential_search(data, value)
index_list = sm.create_index_list(data, 1000)
result_indexed_sequential_search = sm.indexed_sequential_search(
data, index_list, value)
print("recursive_binary_search", result_recursive_binary_search)
print("binary_search", result_recursive_binary_search)
print("interpolation_search", result_interpolation_search)
print("sequential_search", result_sequential_search)
print("indexed_sequential_search", result_recursive_binary_search)
| 35.730769
| 66
| 0.826695
|
import load_and_prepare_data as ld
import search_methods as sm
data = ld.load_and_prepare_data()
for i in data:
print(i)
print("digite o co_cnes do Centro médico")
value = int(input())
result_binary_search_iterative = sm.binary_search(data, value)
result_recursive_binary_search = sm.recursive_binary_search(
data, 0, len(data), value)
result_interpolation_search = sm.interpolation_search(data, value)
result_sequential_search = sm.sequential_search(data, value)
index_list = sm.create_index_list(data, 1000)
result_indexed_sequential_search = sm.indexed_sequential_search(
data, index_list, value)
print("recursive_binary_search", result_recursive_binary_search)
print("binary_search", result_recursive_binary_search)
print("interpolation_search", result_interpolation_search)
print("sequential_search", result_sequential_search)
print("indexed_sequential_search", result_recursive_binary_search)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.